[llvm] 3371849 - AMDGPU: Round out system atomics tests

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 8 11:38:22 PDT 2023


Author: Matt Arsenault
Date: 2023-08-08T14:38:05-04:00
New Revision: 33718491940767de5d03bdb198796560e979196c

URL: https://github.com/llvm/llvm-project/commit/33718491940767de5d03bdb198796560e979196c
DIFF: https://github.com/llvm/llvm-project/commit/33718491940767de5d03bdb198796560e979196c.diff

LOG: AMDGPU: Round out system atomics tests

There were system scope tests only for integer min/max. Expand this to
cover all of the integer operations.

Added: 
    llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
    llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll
    llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
    llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
    llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
    llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll

Modified: 
    llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
    llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
    llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll

Removed: 
    llvm/test/CodeGen/AMDGPU/flat_atomics_i64_min_max_system.ll
    llvm/test/CodeGen/AMDGPU/flat_atomics_min_max_system.ll
    llvm/test/CodeGen/AMDGPU/global_atomics_i64_min_max_system.ll
    llvm/test/CodeGen/AMDGPU/global_atomics_min_max_system.ll


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
new file mode 100644
index 00000000000000..e3f870b2c2731d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
@@ -0,0 +1,6810 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN1 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN2 %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN3 %s
+
+; ---------------------------------------------------------------------
+; atomicrmw xchg
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_xchg_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_xchg_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xchg ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_xchg_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_xchg_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw xchg ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_xchg_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_xchg_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xchg ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_xchg_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_xchg_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw xchg ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_xchg_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_xchg_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xchg ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_xchg_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_xchg_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw xchg ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_xchg_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_xchg_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xchg ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_xchg_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_xchg_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw xchg ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw add
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_add_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_add_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw add ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_add_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_add_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw add ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_add_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_add_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw add ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_add_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_add_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw add ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_add_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_add_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw add ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_add_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_add_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw add ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_add_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_add_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw add ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_add_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_add_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw add ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw sub
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_sub_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_sub_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw sub ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_sub_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_sub_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw sub ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_sub_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_sub_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw sub ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_sub_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_sub_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw sub ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_sub_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_sub_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw sub ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_sub_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_sub_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw sub ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_sub_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_sub_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw sub ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_sub_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_sub_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw sub ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw and
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_and_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_and_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw and ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_and_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_and_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw and ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_and_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_and_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw and ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_and_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_and_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw and ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_and_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_and_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw and ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_and_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_and_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw and ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_and_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_and_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw and ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_and_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_and_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw and ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw nand
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_nand_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_nand_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB32_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN1-NEXT:    v_not_b32_e32 v3, v3
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB32_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB32_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN2-NEXT:    v_not_b32_e32 v3, v3
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB32_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB32_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN3-NEXT:    v_not_b32_e32 v3, v3
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB32_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw nand ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_nand_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_nand_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB33_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN1-NEXT:    v_not_b32_e32 v3, v3
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB33_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB33_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN2-NEXT:    v_not_b32_e32 v3, v3
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB33_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB33_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN3-NEXT:    v_not_b32_e32 v3, v3
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB33_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw nand ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_nand_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_nand_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB34_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN1-NEXT:    v_not_b32_e32 v3, v3
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB34_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v3
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB34_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN2-NEXT:    v_not_b32_e32 v3, v3
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB34_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v3
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB34_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN3-NEXT:    v_not_b32_e32 v3, v3
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB34_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_nand_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_nand_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v3, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v0, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB35_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_and_b32_e32 v0, v1, v2
+; GCN1-NEXT:    v_not_b32_e32 v0, v0
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB35_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v0, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB35_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_and_b32_e32 v0, v1, v2
+; GCN2-NEXT:    v_not_b32_e32 v0, v0
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB35_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB35_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_and_b32_e32 v3, v4, v2
+; GCN3-NEXT:    v_not_b32_e32 v3, v3
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB35_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw nand ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_nand_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_nand_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB36_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    v_not_b32_e32 v0, v0
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB36_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB36_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    v_not_b32_e32 v0, v0
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB36_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB36_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_not_b32_e32 v0, v0
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB36_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw nand ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_nand_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_nand_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB37_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    v_not_b32_e32 v0, v0
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB37_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB37_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    v_not_b32_e32 v0, v0
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB37_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB37_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_not_b32_e32 v0, v0
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB37_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw nand ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_nand_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_nand_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB38_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    v_not_b32_e32 v0, v0
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB38_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB38_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    v_not_b32_e32 v0, v0
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB38_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB38_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_not_b32_e32 v0, v0
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB38_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_nand_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_nand_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB39_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    v_not_b32_e32 v0, v0
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB39_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB39_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    v_not_b32_e32 v0, v0
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB39_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB39_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_and_b32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_not_b32_e32 v0, v0
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB39_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw nand ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw or
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_or_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_or_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw or ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_or_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_or_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw or ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_or_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_or_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw or ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_or_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_or_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw or ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_or_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_or_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw or ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_or_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_or_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw or ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_or_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_or_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw or ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_or_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_or_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw or ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw xor
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_xor_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_xor_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xor ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_xor_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_xor_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw xor ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_xor_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_xor_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xor ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_xor_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_xor_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw xor ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_xor_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_xor_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xor ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_xor_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_xor_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw xor ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_xor_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_xor_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xor ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_xor_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_xor_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw xor ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw max
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_max_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_max_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB56_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB56_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB56_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB56_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB56_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB56_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw max ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_max_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_max_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB57_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB57_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB57_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB57_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB57_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB57_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_max_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_max_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB58_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB58_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v3
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB58_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB58_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v3
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB58_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB58_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw max ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_max_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_max_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v3, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v0, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB59_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_max_i32_e32 v0, v1, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB59_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v0, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB59_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_max_i32_e32 v0, v1, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB59_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB59_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_max_i32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB59_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw max ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_max_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_max_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB60_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB60_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB60_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB60_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB60_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB60_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw max ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_max_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_max_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB61_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB61_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB61_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB61_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB61_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB61_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_max_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_max_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB62_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB62_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB62_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB62_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB62_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB62_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw max ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_max_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_max_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB63_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB63_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB63_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB63_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB63_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_max_i32_e32 v0, s6, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB63_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw max ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr %out, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_max_i32_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN1-NEXT:    s_mov_b32 s4, s3
+; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s4
+; GCN1-NEXT:    s_addc_u32 s1, s1, s5
+; GCN1-NEXT:    s_add_u32 s0, s0, 16
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB64_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_i32_e32 v0, s2, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB64_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN2-NEXT:    s_mov_b32 s4, s3
+; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s4
+; GCN2-NEXT:    s_addc_u32 s1, s1, s5
+; GCN2-NEXT:    s_add_u32 s0, s0, 16
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB64_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_i32_e32 v0, s2, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB64_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN3-NEXT:    s_mov_b32 s4, s3
+; GCN3-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN3-NEXT:    s_add_u32 s0, s0, s4
+; GCN3-NEXT:    s_addc_u32 s1, s1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB64_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_i32_e32 v0, s2, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB64_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %gep = getelementptr i32, ptr %ptr, i32 4
+  %tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr %out, ptr %out2, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_max_i32_ret_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN1-NEXT:    s_mov_b32 s6, s5
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    s_add_u32 s0, s0, 16
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB65_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    v_max_i32_e32 v0, s4, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB65_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v1, s2
+; GCN1-NEXT:    v_mov_b32_e32 v2, s3
+; GCN1-NEXT:    flat_store_dword v[1:2], v0
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_ret_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN2-NEXT:    s_mov_b32 s6, s5
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    s_add_u32 s0, s0, 16
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB65_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    v_max_i32_e32 v0, s4, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB65_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v1, s2
+; GCN2-NEXT:    v_mov_b32_e32 v2, s3
+; GCN2-NEXT:    flat_store_dword v[1:2], v0
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_ret_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s1, s3, 31
+; GCN3-NEXT:    s_mov_b32 s0, s3
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB65_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    v_max_i32_e32 v0, s2, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB65_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, s6
+; GCN3-NEXT:    v_mov_b32_e32 v2, s7
+; GCN3-NEXT:    flat_store_dword v[1:2], v0
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %gep = getelementptr i32, ptr %ptr, i32 4
+  %tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst
+  store i32 %tmp0, ptr %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_addr64(ptr %out, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_max_i32_addr64:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN1-NEXT:    s_mov_b32 s4, s3
+; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s4
+; GCN1-NEXT:    s_addc_u32 s1, s1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB66_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_i32_e32 v0, s2, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB66_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_addr64:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN2-NEXT:    s_mov_b32 s4, s3
+; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s4
+; GCN2-NEXT:    s_addc_u32 s1, s1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB66_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_i32_e32 v0, s2, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB66_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_addr64:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN3-NEXT:    s_mov_b32 s4, s3
+; GCN3-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN3-NEXT:    s_add_u32 s0, s0, s4
+; GCN3-NEXT:    s_addc_u32 s1, s1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v1, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB66_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_i32_e32 v0, s2, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB66_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %tmp0 = atomicrmw max ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr %out, ptr %out2, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_max_i32_ret_addr64:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN1-NEXT:    s_mov_b32 s6, s5
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB67_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    v_max_i32_e32 v0, s4, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB67_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v1, s2
+; GCN1-NEXT:    v_mov_b32_e32 v2, s3
+; GCN1-NEXT:    flat_store_dword v[1:2], v0
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_ret_addr64:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN2-NEXT:    s_mov_b32 s6, s5
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB67_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    v_max_i32_e32 v0, s4, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB67_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v1, s2
+; GCN2-NEXT:    v_mov_b32_e32 v2, s3
+; GCN2-NEXT:    flat_store_dword v[1:2], v0
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_ret_addr64:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s1, s3, 31
+; GCN3-NEXT:    s_mov_b32 s0, s3
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v0, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB67_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    v_max_i32_e32 v0, s2, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB67_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, s6
+; GCN3-NEXT:    v_mov_b32_e32 v2, s7
+; GCN3-NEXT:    flat_store_dword v[1:2], v0
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %tmp0 = atomicrmw max ptr %ptr, i32 %in seq_cst
+  store i32 %tmp0, ptr %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw umax
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_umax_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_umax_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB68_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB68_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB68_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB68_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB68_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB68_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umax ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_umax_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_umax_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB69_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB69_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB69_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB69_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB69_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB69_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_umax_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_umax_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB70_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB70_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v3
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB70_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB70_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v3
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB70_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB70_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umax ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_umax_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_umax_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v3, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v0, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB71_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_max_u32_e32 v0, v1, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB71_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v0, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB71_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_max_u32_e32 v0, v1, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB71_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB71_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_max_u32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB71_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw umax ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_umax_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_umax_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB72_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB72_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB72_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB72_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB72_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB72_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umax ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_umax_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_umax_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB73_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB73_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB73_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB73_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB73_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB73_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_umax_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_umax_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB74_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB74_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB74_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB74_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB74_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB74_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umax ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_umax_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_umax_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB75_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB75_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB75_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB75_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB75_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_max_u32_e32 v0, s6, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB75_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw umax ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr %out, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_umax_i32_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN1-NEXT:    s_mov_b32 s4, s3
+; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s4
+; GCN1-NEXT:    s_addc_u32 s1, s1, s5
+; GCN1-NEXT:    s_add_u32 s0, s0, 16
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB76_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_max_u32_e32 v0, s2, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB76_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN2-NEXT:    s_mov_b32 s4, s3
+; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s4
+; GCN2-NEXT:    s_addc_u32 s1, s1, s5
+; GCN2-NEXT:    s_add_u32 s0, s0, 16
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB76_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_max_u32_e32 v0, s2, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB76_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN3-NEXT:    s_mov_b32 s4, s3
+; GCN3-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN3-NEXT:    s_add_u32 s0, s0, s4
+; GCN3-NEXT:    s_addc_u32 s1, s1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB76_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_max_u32_e32 v0, s2, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB76_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %gep = getelementptr i32, ptr %ptr, i32 4
+  %tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr %out, ptr %out2, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_umax_i32_ret_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN1-NEXT:    s_mov_b32 s6, s5
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    s_add_u32 s0, s0, 16
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB77_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    v_max_u32_e32 v0, s4, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB77_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v1, s2
+; GCN1-NEXT:    v_mov_b32_e32 v2, s3
+; GCN1-NEXT:    flat_store_dword v[1:2], v0
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_ret_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN2-NEXT:    s_mov_b32 s6, s5
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    s_add_u32 s0, s0, 16
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB77_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    v_max_u32_e32 v0, s4, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB77_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v1, s2
+; GCN2-NEXT:    v_mov_b32_e32 v2, s3
+; GCN2-NEXT:    flat_store_dword v[1:2], v0
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_ret_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s1, s3, 31
+; GCN3-NEXT:    s_mov_b32 s0, s3
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB77_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    v_max_u32_e32 v0, s2, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB77_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, s6
+; GCN3-NEXT:    v_mov_b32_e32 v2, s7
+; GCN3-NEXT:    flat_store_dword v[1:2], v0
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %gep = getelementptr i32, ptr %ptr, i32 4
+  %tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst
+  store i32 %tmp0, ptr %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr %out, ptr %out2, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_umax_i32_ret_addr64:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN1-NEXT:    s_mov_b32 s6, s5
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB78_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    v_max_u32_e32 v0, s4, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB78_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v1, s2
+; GCN1-NEXT:    v_mov_b32_e32 v2, s3
+; GCN1-NEXT:    flat_store_dword v[1:2], v0
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_ret_addr64:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN2-NEXT:    s_mov_b32 s6, s5
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB78_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    v_max_u32_e32 v0, s4, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB78_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v1, s2
+; GCN2-NEXT:    v_mov_b32_e32 v2, s3
+; GCN2-NEXT:    flat_store_dword v[1:2], v0
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_ret_addr64:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s1, s3, 31
+; GCN3-NEXT:    s_mov_b32 s0, s3
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v0, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB78_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    v_max_u32_e32 v0, s2, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB78_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, s6
+; GCN3-NEXT:    v_mov_b32_e32 v2, s7
+; GCN3-NEXT:    flat_store_dword v[1:2], v0
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %tmp0 = atomicrmw umax ptr %ptr, i32 %in seq_cst
+  store i32 %tmp0, ptr %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw umin
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_umin_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_umin_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB79_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB79_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB79_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB79_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB79_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB79_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umin ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_umin_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_umin_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB80_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB80_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB80_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB80_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB80_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB80_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw umin ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_umin_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_umin_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB81_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB81_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v3
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB81_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB81_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v3
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB81_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB81_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umin ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_umin_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_umin_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v3, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v0, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB82_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_min_u32_e32 v0, v1, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB82_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v0, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB82_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_min_u32_e32 v0, v1, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB82_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB82_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_min_u32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB82_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw umin ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_umin_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_umin_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB83_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB83_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB83_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB83_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB83_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB83_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umin ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_umin_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_umin_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB84_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB84_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB84_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB84_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB84_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB84_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw umin ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_umin_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_umin_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB85_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB85_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB85_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB85_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB85_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB85_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umin ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_umin_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_umin_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB86_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB86_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB86_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB86_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB86_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_min_u32_e32 v0, s6, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB86_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw umin ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw min
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_min_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_min_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB87_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB87_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB87_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB87_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB87_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB87_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw min ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_min_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_min_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB88_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB88_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB88_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB88_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v4, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB88_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB88_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_min_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_min_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB89_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v4, v3
+; GCN1-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB89_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v3
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB89_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v4, v3
+; GCN2-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB89_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v3
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB89_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB89_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw min ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_min_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_min_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v3, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v0, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB90_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_min_i32_e32 v0, v1, v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB90_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v0, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB90_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_min_i32_e32 v0, v1, v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB90_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dword v3, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB90_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v4, v3
+; GCN3-NEXT:    v_min_i32_e32 v3, v4, v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB90_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v3
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw min ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_min_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_min_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB91_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB91_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB91_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB91_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB91_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB91_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw min ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_min_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_min_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB92_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB92_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB92_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB92_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB92_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB92_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_min_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_min_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB93_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB93_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB93_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB93_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB93_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB93_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw min ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_min_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_min_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB94_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB94_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB94_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB94_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB94_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    v_min_i32_e32 v0, s6, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB94_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw min ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr %out, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_min_i32_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN1-NEXT:    s_mov_b32 s4, s3
+; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s4
+; GCN1-NEXT:    s_addc_u32 s1, s1, s5
+; GCN1-NEXT:    s_add_u32 s0, s0, 16
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB95_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_i32_e32 v0, s2, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB95_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN2-NEXT:    s_mov_b32 s4, s3
+; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s4
+; GCN2-NEXT:    s_addc_u32 s1, s1, s5
+; GCN2-NEXT:    s_add_u32 s0, s0, 16
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB95_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_i32_e32 v0, s2, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB95_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s5, s3, 31
+; GCN3-NEXT:    s_mov_b32 s4, s3
+; GCN3-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN3-NEXT:    s_add_u32 s0, s0, s4
+; GCN3-NEXT:    s_addc_u32 s1, s1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB95_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_i32_e32 v0, s2, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB95_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %gep = getelementptr i32, ptr %ptr, i32 4
+  %tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr %out, ptr %out2, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_min_i32_ret_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN1-NEXT:    s_mov_b32 s6, s5
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    s_add_u32 s0, s0, 16
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB96_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    v_min_i32_e32 v0, s4, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB96_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v1, s2
+; GCN1-NEXT:    v_mov_b32_e32 v2, s3
+; GCN1-NEXT:    flat_store_dword v[1:2], v0
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_ret_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN2-NEXT:    s_mov_b32 s6, s5
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    s_add_u32 s0, s0, 16
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB96_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    v_min_i32_e32 v0, s4, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB96_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v1, s2
+; GCN2-NEXT:    v_mov_b32_e32 v2, s3
+; GCN2-NEXT:    flat_store_dword v[1:2], v0
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_ret_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s1, s3, 31
+; GCN3-NEXT:    s_mov_b32 s0, s3
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB96_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    v_min_i32_e32 v0, s2, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB96_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, s6
+; GCN3-NEXT:    v_mov_b32_e32 v2, s7
+; GCN3-NEXT:    flat_store_dword v[1:2], v0
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %gep = getelementptr i32, ptr %ptr, i32 4
+  %tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst
+  store i32 %tmp0, ptr %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32(ptr %out, i32 %in) {
+; GCN1-LABEL: atomic_min_i32:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT:    s_mov_b64 s[0:1], 0
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s2
+; GCN1-NEXT:    v_mov_b32_e32 v1, s3
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:  .LBB97_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_min_i32_e32 v0, s4, v1
+; GCN1-NEXT:    v_mov_b32_e32 v3, s3
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; GCN1-NEXT:    s_cbranch_execnz .LBB97_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT:    s_mov_b64 s[0:1], 0
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s2
+; GCN2-NEXT:    v_mov_b32_e32 v1, s3
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:  .LBB97_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_min_i32_e32 v0, s4, v1
+; GCN2-NEXT:    v_mov_b32_e32 v3, s3
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; GCN2-NEXT:    s_cbranch_execnz .LBB97_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT:    s_mov_b64 s[0:1], 0
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s2
+; GCN3-NEXT:    v_mov_b32_e32 v1, s3
+; GCN3-NEXT:    flat_load_dword v1, v[0:1]
+; GCN3-NEXT:  .LBB97_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_min_i32_e32 v0, s4, v1
+; GCN3-NEXT:    v_mov_b32_e32 v3, s3
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT:    s_cbranch_execnz .LBB97_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %tmp0 = atomicrmw min ptr %out, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr %out, ptr %out2, i32 %in, i32 %index) {
+; GCN1-LABEL: atomic_min_i32_ret_addr64:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN1-NEXT:    s_mov_b32 s6, s5
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB98_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v1, v0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s1
+; GCN1-NEXT:    v_mov_b32_e32 v2, s0
+; GCN1-NEXT:    v_min_i32_e32 v0, s4, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB98_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v1, s2
+; GCN1-NEXT:    v_mov_b32_e32 v2, s3
+; GCN1-NEXT:    flat_store_dword v[1:2], v0
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_ret_addr64:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_ashr_i32 s7, s5, 31
+; GCN2-NEXT:    s_mov_b32 s6, s5
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB98_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v1, v0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s1
+; GCN2-NEXT:    v_mov_b32_e32 v2, s0
+; GCN2-NEXT:    v_min_i32_e32 v0, s4, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB98_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v1, s2
+; GCN2-NEXT:    v_mov_b32_e32 v2, s3
+; GCN2-NEXT:    flat_store_dword v[1:2], v0
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_ret_addr64:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_ashr_i32 s1, s3, 31
+; GCN3-NEXT:    s_mov_b32 s0, s3
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dword v0, v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB98_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v1, v0
+; GCN3-NEXT:    v_mov_b32_e32 v3, s1
+; GCN3-NEXT:    v_mov_b32_e32 v2, s0
+; GCN3-NEXT:    v_min_i32_e32 v0, s2, v1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB98_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v1, s6
+; GCN3-NEXT:    v_mov_b32_e32 v2, s7
+; GCN3-NEXT:    flat_store_dword v[1:2], v0
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr %out, i32 %index
+  %tmp0 = atomicrmw min ptr %ptr, i32 %in seq_cst
+  store i32 %tmp0, ptr %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw uinc_wrap
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_uinc_wrap_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw uinc_wrap ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_uinc_wrap_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_uinc_wrap_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw uinc_wrap ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_uinc_wrap_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_uinc_wrap_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw uinc_wrap ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_uinc_wrap_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_uinc_wrap_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw uinc_wrap ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_uinc_wrap_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw udec_wrap
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_udec_wrap_i32_noret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i32_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i32_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i32_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw udec_wrap ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_udec_wrap_i32_noret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i32_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i32_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i32_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @flat_atomic_udec_wrap_i32_ret(ptr %ptr, i32 %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i32_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i32_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i32_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw udec_wrap ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @flat_atomic_udec_wrap_i32_ret_offset(ptr %out, i32 %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i32_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i32_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i32_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i32_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i32_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i32_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw udec_wrap ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i32_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i32_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec v[0:1], v2
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i32_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec v[0:1], v2 offset:16
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %tmp0 = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_scalar(ptr inreg %ptr, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i32_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i32_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i32_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw udec_wrap ptr %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_offset_scalar(ptr inreg %out, i32 inreg %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i32_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 16
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s34
+; GCN1-NEXT:    v_mov_b32_e32 v1, s35
+; GCN1-NEXT:    v_mov_b32_e32 v2, s6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i32_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 16
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s34
+; GCN2-NEXT:    v_mov_b32_e32 v1, s35
+; GCN2-NEXT:    v_mov_b32_e32 v2, s6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i32_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    v_mov_b32_e32 v2, s6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec v0, v[0:1], v2 offset:16 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr %out, i32 4
+  %result = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst
+  ret i32 %result
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_min_max_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_min_max_system.ll
deleted file mode 100644
index 843800fd3881ce..00000000000000
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_min_max_system.ll
+++ /dev/null
@@ -1,2515 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN1 %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN2 %s
-
-define amdgpu_kernel void @atomic_max_i64_offset(ptr %out, i64 %in) {
-; GCN1-LABEL: atomic_max_i64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:  .LBB0_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB0_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:  .LBB0_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB0_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr %out, i64 4
-  %tmp0 = atomicrmw volatile max ptr %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
-; GCN1-LABEL: atomic_max_i64_ret_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s2, s4, 32
-; GCN1-NEXT:    s_addc_u32 s3, s5, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s2
-; GCN1-NEXT:    v_mov_b32_e32 v1, s3
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB1_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s1
-; GCN1-NEXT:    v_mov_b32_e32 v6, s0
-; GCN1-NEXT:    v_mov_b32_e32 v5, s3
-; GCN1-NEXT:    v_mov_b32_e32 v4, s2
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB1_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s6
-; GCN1-NEXT:    v_mov_b32_e32 v3, s7
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i64_ret_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s2, s4, 32
-; GCN2-NEXT:    s_addc_u32 s3, s5, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s2
-; GCN2-NEXT:    v_mov_b32_e32 v1, s3
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB1_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s1
-; GCN2-NEXT:    v_mov_b32_e32 v6, s0
-; GCN2-NEXT:    v_mov_b32_e32 v5, s3
-; GCN2-NEXT:    v_mov_b32_e32 v4, s2
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB1_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s6
-; GCN2-NEXT:    v_mov_b32_e32 v3, s7
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr %out, i64 4
-  %tmp0 = atomicrmw volatile max ptr %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_max_i64_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s4
-; GCN1-NEXT:    s_addc_u32 s1, s1, s5
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB2_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB2_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i64_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s4
-; GCN2-NEXT:    s_addc_u32 s1, s1, s5
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB2_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB2_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %gep = getelementptr i64, ptr %ptr, i64 4
-  %tmp0 = atomicrmw volatile max ptr %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_max_i64_ret_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s6
-; GCN1-NEXT:    s_addc_u32 s1, s1, s7
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[6:7], 0
-; GCN1-NEXT:  .LBB3_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s5
-; GCN1-NEXT:    v_mov_b32_e32 v6, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    s_cbranch_execnz .LBB3_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i64_ret_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s6
-; GCN2-NEXT:    s_addc_u32 s1, s1, s7
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[6:7], 0
-; GCN2-NEXT:  .LBB3_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s5
-; GCN2-NEXT:    v_mov_b32_e32 v6, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    s_cbranch_execnz .LBB3_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %gep = getelementptr i64, ptr %ptr, i64 4
-  %tmp0 = atomicrmw volatile max ptr %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64(ptr %out, i64 %in) {
-; GCN1-LABEL: atomic_max_i64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:  .LBB4_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB4_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:  .LBB4_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB4_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile max ptr %out, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
-; GCN1-LABEL: atomic_max_i64_ret:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s4
-; GCN1-NEXT:    v_mov_b32_e32 v1, s5
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:  .LBB5_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s1
-; GCN1-NEXT:    v_mov_b32_e32 v6, s0
-; GCN1-NEXT:    v_mov_b32_e32 v4, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s5
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB5_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s6
-; GCN1-NEXT:    v_mov_b32_e32 v3, s7
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i64_ret:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s4
-; GCN2-NEXT:    v_mov_b32_e32 v1, s5
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:  .LBB5_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s1
-; GCN2-NEXT:    v_mov_b32_e32 v6, s0
-; GCN2-NEXT:    v_mov_b32_e32 v4, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s5
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB5_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s6
-; GCN2-NEXT:    v_mov_b32_e32 v3, s7
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile max ptr %out, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_addr64(ptr %out, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_max_i64_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s4
-; GCN1-NEXT:    s_addc_u32 s1, s1, s5
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB6_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB6_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i64_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s4
-; GCN2-NEXT:    s_addc_u32 s1, s1, s5
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB6_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB6_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_max_i64_ret_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s6
-; GCN1-NEXT:    s_addc_u32 s1, s1, s7
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[6:7], 0
-; GCN1-NEXT:  .LBB7_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s5
-; GCN1-NEXT:    v_mov_b32_e32 v6, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    s_cbranch_execnz .LBB7_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i64_ret_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s6
-; GCN2-NEXT:    s_addc_u32 s1, s1, s7
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[6:7], 0
-; GCN2-NEXT:  .LBB7_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s5
-; GCN2-NEXT:    v_mov_b32_e32 v6, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    s_cbranch_execnz .LBB7_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_offset(ptr %out, i64 %in) {
-; GCN1-LABEL: atomic_umax_i64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:  .LBB8_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB8_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:  .LBB8_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB8_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr %out, i64 4
-  %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
-; GCN1-LABEL: atomic_umax_i64_ret_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s2, s4, 32
-; GCN1-NEXT:    s_addc_u32 s3, s5, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s2
-; GCN1-NEXT:    v_mov_b32_e32 v1, s3
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB9_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s1
-; GCN1-NEXT:    v_mov_b32_e32 v6, s0
-; GCN1-NEXT:    v_mov_b32_e32 v5, s3
-; GCN1-NEXT:    v_mov_b32_e32 v4, s2
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB9_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s6
-; GCN1-NEXT:    v_mov_b32_e32 v3, s7
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i64_ret_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s2, s4, 32
-; GCN2-NEXT:    s_addc_u32 s3, s5, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s2
-; GCN2-NEXT:    v_mov_b32_e32 v1, s3
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB9_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s1
-; GCN2-NEXT:    v_mov_b32_e32 v6, s0
-; GCN2-NEXT:    v_mov_b32_e32 v5, s3
-; GCN2-NEXT:    v_mov_b32_e32 v4, s2
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB9_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s6
-; GCN2-NEXT:    v_mov_b32_e32 v3, s7
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr %out, i64 4
-  %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_umax_i64_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s4
-; GCN1-NEXT:    s_addc_u32 s1, s1, s5
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB10_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB10_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i64_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s4
-; GCN2-NEXT:    s_addc_u32 s1, s1, s5
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB10_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB10_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %gep = getelementptr i64, ptr %ptr, i64 4
-  %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_umax_i64_ret_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s6
-; GCN1-NEXT:    s_addc_u32 s1, s1, s7
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[6:7], 0
-; GCN1-NEXT:  .LBB11_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s5
-; GCN1-NEXT:    v_mov_b32_e32 v6, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    s_cbranch_execnz .LBB11_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i64_ret_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s6
-; GCN2-NEXT:    s_addc_u32 s1, s1, s7
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[6:7], 0
-; GCN2-NEXT:  .LBB11_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s5
-; GCN2-NEXT:    v_mov_b32_e32 v6, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    s_cbranch_execnz .LBB11_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %gep = getelementptr i64, ptr %ptr, i64 4
-  %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64(ptr %out, i64 %in) {
-; GCN1-LABEL: atomic_umax_i64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:  .LBB12_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB12_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:  .LBB12_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB12_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile umax ptr %out, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
-; GCN1-LABEL: atomic_umax_i64_ret:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s4
-; GCN1-NEXT:    v_mov_b32_e32 v1, s5
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:  .LBB13_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s1
-; GCN1-NEXT:    v_mov_b32_e32 v6, s0
-; GCN1-NEXT:    v_mov_b32_e32 v4, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s5
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB13_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s6
-; GCN1-NEXT:    v_mov_b32_e32 v3, s7
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i64_ret:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s4
-; GCN2-NEXT:    v_mov_b32_e32 v1, s5
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:  .LBB13_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s1
-; GCN2-NEXT:    v_mov_b32_e32 v6, s0
-; GCN2-NEXT:    v_mov_b32_e32 v4, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s5
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB13_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s6
-; GCN2-NEXT:    v_mov_b32_e32 v3, s7
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile umax ptr %out, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_addr64(ptr %out, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_umax_i64_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s4
-; GCN1-NEXT:    s_addc_u32 s1, s1, s5
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB14_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB14_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i64_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s4
-; GCN2-NEXT:    s_addc_u32 s1, s1, s5
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB14_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB14_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_umax_i64_ret_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s6
-; GCN1-NEXT:    s_addc_u32 s1, s1, s7
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[6:7], 0
-; GCN1-NEXT:  .LBB15_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s5
-; GCN1-NEXT:    v_mov_b32_e32 v6, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    s_cbranch_execnz .LBB15_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i64_ret_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s6
-; GCN2-NEXT:    s_addc_u32 s1, s1, s7
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[6:7], 0
-; GCN2-NEXT:  .LBB15_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s5
-; GCN2-NEXT:    v_mov_b32_e32 v6, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    s_cbranch_execnz .LBB15_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_offset(ptr %out, i64 %in) {
-; GCN1-LABEL: atomic_min_i64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:  .LBB16_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB16_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:  .LBB16_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB16_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr %out, i64 4
-  %tmp0 = atomicrmw volatile min ptr %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
-; GCN1-LABEL: atomic_min_i64_ret_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s2, s4, 32
-; GCN1-NEXT:    s_addc_u32 s3, s5, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s2
-; GCN1-NEXT:    v_mov_b32_e32 v1, s3
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB17_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s1
-; GCN1-NEXT:    v_mov_b32_e32 v6, s0
-; GCN1-NEXT:    v_mov_b32_e32 v5, s3
-; GCN1-NEXT:    v_mov_b32_e32 v4, s2
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB17_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s6
-; GCN1-NEXT:    v_mov_b32_e32 v3, s7
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i64_ret_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s2, s4, 32
-; GCN2-NEXT:    s_addc_u32 s3, s5, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s2
-; GCN2-NEXT:    v_mov_b32_e32 v1, s3
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB17_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s1
-; GCN2-NEXT:    v_mov_b32_e32 v6, s0
-; GCN2-NEXT:    v_mov_b32_e32 v5, s3
-; GCN2-NEXT:    v_mov_b32_e32 v4, s2
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB17_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s6
-; GCN2-NEXT:    v_mov_b32_e32 v3, s7
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr %out, i64 4
-  %tmp0 = atomicrmw volatile min ptr %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_min_i64_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s4
-; GCN1-NEXT:    s_addc_u32 s1, s1, s5
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB18_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB18_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i64_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s4
-; GCN2-NEXT:    s_addc_u32 s1, s1, s5
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB18_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB18_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %gep = getelementptr i64, ptr %ptr, i64 4
-  %tmp0 = atomicrmw volatile min ptr %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_min_i64_ret_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s6
-; GCN1-NEXT:    s_addc_u32 s1, s1, s7
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[6:7], 0
-; GCN1-NEXT:  .LBB19_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s5
-; GCN1-NEXT:    v_mov_b32_e32 v6, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    s_cbranch_execnz .LBB19_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i64_ret_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s6
-; GCN2-NEXT:    s_addc_u32 s1, s1, s7
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[6:7], 0
-; GCN2-NEXT:  .LBB19_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s5
-; GCN2-NEXT:    v_mov_b32_e32 v6, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    s_cbranch_execnz .LBB19_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %gep = getelementptr i64, ptr %ptr, i64 4
-  %tmp0 = atomicrmw volatile min ptr %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64(ptr %out, i64 %in) {
-; GCN1-LABEL: atomic_min_i64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:  .LBB20_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB20_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:  .LBB20_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB20_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile min ptr %out, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
-; GCN1-LABEL: atomic_min_i64_ret:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s4
-; GCN1-NEXT:    v_mov_b32_e32 v1, s5
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:  .LBB21_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s1
-; GCN1-NEXT:    v_mov_b32_e32 v6, s0
-; GCN1-NEXT:    v_mov_b32_e32 v4, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s5
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB21_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s6
-; GCN1-NEXT:    v_mov_b32_e32 v3, s7
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i64_ret:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s4
-; GCN2-NEXT:    v_mov_b32_e32 v1, s5
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:  .LBB21_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s1
-; GCN2-NEXT:    v_mov_b32_e32 v6, s0
-; GCN2-NEXT:    v_mov_b32_e32 v4, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s5
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB21_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s6
-; GCN2-NEXT:    v_mov_b32_e32 v3, s7
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile min ptr %out, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_addr64(ptr %out, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_min_i64_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s4
-; GCN1-NEXT:    s_addc_u32 s1, s1, s5
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB22_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB22_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i64_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s4
-; GCN2-NEXT:    s_addc_u32 s1, s1, s5
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB22_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB22_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_min_i64_ret_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s6
-; GCN1-NEXT:    s_addc_u32 s1, s1, s7
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[6:7], 0
-; GCN1-NEXT:  .LBB23_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s5
-; GCN1-NEXT:    v_mov_b32_e32 v6, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    s_cbranch_execnz .LBB23_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i64_ret_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s6
-; GCN2-NEXT:    s_addc_u32 s1, s1, s7
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[6:7], 0
-; GCN2-NEXT:  .LBB23_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s5
-; GCN2-NEXT:    v_mov_b32_e32 v6, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    s_cbranch_execnz .LBB23_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_offset(ptr %out, i64 %in) {
-; GCN1-LABEL: atomic_umin_i64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:  .LBB24_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB24_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:  .LBB24_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB24_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr %out, i64 4
-  %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
-; GCN1-LABEL: atomic_umin_i64_ret_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s2, s4, 32
-; GCN1-NEXT:    s_addc_u32 s3, s5, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s2
-; GCN1-NEXT:    v_mov_b32_e32 v1, s3
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB25_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s1
-; GCN1-NEXT:    v_mov_b32_e32 v6, s0
-; GCN1-NEXT:    v_mov_b32_e32 v5, s3
-; GCN1-NEXT:    v_mov_b32_e32 v4, s2
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB25_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s6
-; GCN1-NEXT:    v_mov_b32_e32 v3, s7
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i64_ret_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s2, s4, 32
-; GCN2-NEXT:    s_addc_u32 s3, s5, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s2
-; GCN2-NEXT:    v_mov_b32_e32 v1, s3
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB25_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s1
-; GCN2-NEXT:    v_mov_b32_e32 v6, s0
-; GCN2-NEXT:    v_mov_b32_e32 v5, s3
-; GCN2-NEXT:    v_mov_b32_e32 v4, s2
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB25_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s6
-; GCN2-NEXT:    v_mov_b32_e32 v3, s7
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr %out, i64 4
-  %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_umin_i64_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s4
-; GCN1-NEXT:    s_addc_u32 s1, s1, s5
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB26_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB26_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i64_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s4
-; GCN2-NEXT:    s_addc_u32 s1, s1, s5
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB26_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB26_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %gep = getelementptr i64, ptr %ptr, i64 4
-  %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_umin_i64_ret_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s6
-; GCN1-NEXT:    s_addc_u32 s1, s1, s7
-; GCN1-NEXT:    s_add_u32 s0, s0, 32
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[6:7], 0
-; GCN1-NEXT:  .LBB27_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s5
-; GCN1-NEXT:    v_mov_b32_e32 v6, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    s_cbranch_execnz .LBB27_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i64_ret_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s6
-; GCN2-NEXT:    s_addc_u32 s1, s1, s7
-; GCN2-NEXT:    s_add_u32 s0, s0, 32
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[6:7], 0
-; GCN2-NEXT:  .LBB27_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s5
-; GCN2-NEXT:    v_mov_b32_e32 v6, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    s_cbranch_execnz .LBB27_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %gep = getelementptr i64, ptr %ptr, i64 4
-  %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64(ptr %out, i64 %in) {
-; GCN1-LABEL: atomic_umin_i64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:  .LBB28_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB28_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:  .LBB28_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB28_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile umin ptr %out, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
-; GCN1-LABEL: atomic_umin_i64_ret:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s4
-; GCN1-NEXT:    v_mov_b32_e32 v1, s5
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:  .LBB29_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s1
-; GCN1-NEXT:    v_mov_b32_e32 v6, s0
-; GCN1-NEXT:    v_mov_b32_e32 v4, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s5
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB29_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s6
-; GCN1-NEXT:    v_mov_b32_e32 v3, s7
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i64_ret:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s4
-; GCN2-NEXT:    v_mov_b32_e32 v1, s5
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:  .LBB29_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s1
-; GCN2-NEXT:    v_mov_b32_e32 v6, s0
-; GCN2-NEXT:    v_mov_b32_e32 v4, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s5
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB29_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s6
-; GCN2-NEXT:    v_mov_b32_e32 v3, s7
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile umin ptr %out, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_addr64(ptr %out, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_umin_i64_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s4
-; GCN1-NEXT:    s_addc_u32 s1, s1, s5
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[4:5], 0
-; GCN1-NEXT:  .LBB30_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s3
-; GCN1-NEXT:    v_mov_b32_e32 v6, s2
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT:    s_cbranch_execnz .LBB30_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i64_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s4
-; GCN2-NEXT:    s_addc_u32 s1, s1, s5
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[4:5], 0
-; GCN2-NEXT:  .LBB30_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s3
-; GCN2-NEXT:    v_mov_b32_e32 v6, s2
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT:    s_cbranch_execnz .LBB30_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
-; GCN1-LABEL: atomic_umin_i64_ret_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN1-NEXT:    s_add_u32 s0, s0, s6
-; GCN1-NEXT:    s_addc_u32 s1, s1, s7
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[6:7], 0
-; GCN1-NEXT:  .LBB31_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v3, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, v0
-; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v0, s5
-; GCN1-NEXT:    v_mov_b32_e32 v6, s4
-; GCN1-NEXT:    v_mov_b32_e32 v5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v4, s0
-; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    s_cbranch_execnz .LBB31_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i64_ret_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GCN2-NEXT:    s_add_u32 s0, s0, s6
-; GCN2-NEXT:    s_addc_u32 s1, s1, s7
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[6:7], 0
-; GCN2-NEXT:  .LBB31_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v3, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, v0
-; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v0, s5
-; GCN2-NEXT:    v_mov_b32_e32 v6, s4
-; GCN2-NEXT:    v_mov_b32_e32 v5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v4, s0
-; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    s_cbranch_execnz .LBB31_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GCN2-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr %out, i64 %index
-  %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in seq_cst
-  store i64 %tmp0, ptr %out2
-  ret void
-}

diff  --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll
new file mode 100644
index 00000000000000..163065a71d0426
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll
@@ -0,0 +1,7798 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN1 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN2 %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN3 %s
+
+; ---------------------------------------------------------------------
+; atomicrmw xchg
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_xchg_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_xchg_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xchg ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_xchg_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_xchg_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw xchg ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_xchg_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_xchg_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xchg ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_xchg_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_xchg_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw xchg ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_xchg_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_xchg_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap_x2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xchg ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_xchg_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_xchg_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap_x2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw xchg ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_xchg_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xchg ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_xchg_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xchg_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xchg_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw xchg ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw add
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_add_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_add_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw add ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_add_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_add_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw add ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_add_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_add_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw add ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_add_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_add_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw add ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_add_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_add_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add_x2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw add ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_add_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_add_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add_x2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw add ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_add_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_add_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw add ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_add_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_add_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_add_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_add_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw add ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw sub
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_sub_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_sub_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw sub ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_sub_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_sub_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw sub ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_sub_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_sub_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw sub ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_sub_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_sub_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw sub ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_sub_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_sub_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub_x2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw sub ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_sub_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_sub_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub_x2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw sub ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_sub_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw sub ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_sub_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_sub_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_sub_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw sub ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw and
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_and_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_and_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw and ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_and_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_and_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw and ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_and_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_and_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw and ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_and_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_and_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw and ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_and_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_and_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and_x2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw and ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_and_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_and_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and_x2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw and ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_and_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_and_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw and ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_and_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_and_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_and_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_and_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw and ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw nand
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_nand_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_nand_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v6, v[0:1]
+; GCN1-NEXT:    flat_load_dword v7, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB32_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_and_b32_e32 v4, v7, v3
+; GCN1-NEXT:    v_and_b32_e32 v8, v6, v2
+; GCN1-NEXT:    v_not_b32_e32 v5, v4
+; GCN1-NEXT:    v_not_b32_e32 v4, v8
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB32_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v6, v[0:1]
+; GCN2-NEXT:    flat_load_dword v7, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB32_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_and_b32_e32 v4, v7, v3
+; GCN2-NEXT:    v_and_b32_e32 v8, v6, v2
+; GCN2-NEXT:    v_not_b32_e32 v5, v4
+; GCN2-NEXT:    v_not_b32_e32 v4, v8
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB32_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB32_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_and_b32_e32 v4, v7, v3
+; GCN3-NEXT:    v_and_b32_e32 v8, v6, v2
+; GCN3-NEXT:    v_not_b32_e32 v5, v4
+; GCN3-NEXT:    v_not_b32_e32 v4, v8
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB32_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw nand ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_nand_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_nand_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v8, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v7, v[0:1]
+; GCN1-NEXT:    flat_load_dword v6, v[8:9]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB33_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_and_b32_e32 v0, v7, v3
+; GCN1-NEXT:    v_and_b32_e32 v1, v6, v2
+; GCN1-NEXT:    v_not_b32_e32 v5, v0
+; GCN1-NEXT:    v_not_b32_e32 v4, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB33_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v8, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v7, v[0:1]
+; GCN2-NEXT:    flat_load_dword v6, v[8:9]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB33_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_and_b32_e32 v0, v7, v3
+; GCN2-NEXT:    v_and_b32_e32 v1, v6, v2
+; GCN2-NEXT:    v_not_b32_e32 v5, v0
+; GCN2-NEXT:    v_not_b32_e32 v4, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB33_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB33_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_and_b32_e32 v4, v7, v3
+; GCN3-NEXT:    v_and_b32_e32 v8, v6, v2
+; GCN3-NEXT:    v_not_b32_e32 v5, v4
+; GCN3-NEXT:    v_not_b32_e32 v4, v8
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB33_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw nand ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_nand_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_nand_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v5, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    flat_load_dword v5, v[5:6]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB34_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    v_and_b32_e32 v4, v7, v3
+; GCN1-NEXT:    v_and_b32_e32 v8, v6, v2
+; GCN1-NEXT:    v_not_b32_e32 v5, v4
+; GCN1-NEXT:    v_not_b32_e32 v4, v8
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB34_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v4
+; GCN1-NEXT:    v_mov_b32_e32 v1, v5
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v5, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    flat_load_dword v5, v[5:6]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB34_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    v_and_b32_e32 v4, v7, v3
+; GCN2-NEXT:    v_and_b32_e32 v8, v6, v2
+; GCN2-NEXT:    v_not_b32_e32 v5, v4
+; GCN2-NEXT:    v_not_b32_e32 v4, v8
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB34_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v4
+; GCN2-NEXT:    v_mov_b32_e32 v1, v5
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB34_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_and_b32_e32 v4, v7, v3
+; GCN3-NEXT:    v_and_b32_e32 v8, v6, v2
+; GCN3-NEXT:    v_not_b32_e32 v5, v4
+; GCN3-NEXT:    v_not_b32_e32 v4, v8
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB34_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_nand_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_nand_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB35_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v9, v1
+; GCN1-NEXT:    v_mov_b32_e32 v8, v0
+; GCN1-NEXT:    v_and_b32_e32 v0, v9, v3
+; GCN1-NEXT:    v_and_b32_e32 v1, v8, v2
+; GCN1-NEXT:    v_not_b32_e32 v7, v0
+; GCN1-NEXT:    v_not_b32_e32 v6, v1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB35_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB35_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v9, v1
+; GCN2-NEXT:    v_mov_b32_e32 v8, v0
+; GCN2-NEXT:    v_and_b32_e32 v0, v9, v3
+; GCN2-NEXT:    v_and_b32_e32 v1, v8, v2
+; GCN2-NEXT:    v_not_b32_e32 v7, v0
+; GCN2-NEXT:    v_not_b32_e32 v6, v1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB35_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB35_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_and_b32_e32 v4, v7, v3
+; GCN3-NEXT:    v_and_b32_e32 v8, v6, v2
+; GCN3-NEXT:    v_not_b32_e32 v5, v4
+; GCN3-NEXT:    v_not_b32_e32 v4, v8
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB35_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw nand ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_nand_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_nand_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s34
+; GCN1-NEXT:    v_mov_b32_e32 v4, s35
+; GCN1-NEXT:    flat_load_dword v2, v[0:1]
+; GCN1-NEXT:    flat_load_dword v3, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB36_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN1-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_not_b32_e32 v1, v0
+; GCN1-NEXT:    v_not_b32_e32 v0, v6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB36_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s34
+; GCN2-NEXT:    v_mov_b32_e32 v4, s35
+; GCN2-NEXT:    flat_load_dword v2, v[0:1]
+; GCN2-NEXT:    flat_load_dword v3, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB36_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN2-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_not_b32_e32 v1, v0
+; GCN2-NEXT:    v_not_b32_e32 v0, v6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB36_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB36_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN3-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_not_b32_e32 v1, v0
+; GCN3-NEXT:    v_not_b32_e32 v0, v6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB36_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw nand ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_nand_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_nand_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    flat_load_dword v2, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB37_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN1-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_not_b32_e32 v1, v0
+; GCN1-NEXT:    v_not_b32_e32 v0, v6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB37_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    flat_load_dword v2, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB37_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN2-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_not_b32_e32 v1, v0
+; GCN2-NEXT:    v_not_b32_e32 v0, v6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB37_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB37_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN3-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_not_b32_e32 v1, v0
+; GCN3-NEXT:    v_not_b32_e32 v0, v6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB37_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw nand ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_nand_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    flat_load_dword v1, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB38_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN1-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_not_b32_e32 v1, v0
+; GCN1-NEXT:    v_not_b32_e32 v0, v6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB38_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    flat_load_dword v1, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB38_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN2-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_not_b32_e32 v1, v0
+; GCN2-NEXT:    v_not_b32_e32 v0, v6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB38_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB38_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN3-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_not_b32_e32 v1, v0
+; GCN3-NEXT:    v_not_b32_e32 v0, v6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB38_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_nand_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB39_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN1-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_not_b32_e32 v1, v0
+; GCN1-NEXT:    v_not_b32_e32 v0, v6
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB39_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_nand_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB39_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN2-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_not_b32_e32 v1, v0
+; GCN2-NEXT:    v_not_b32_e32 v0, v6
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB39_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_nand_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB39_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_and_b32_e32 v0, s7, v3
+; GCN3-NEXT:    v_and_b32_e32 v6, s6, v2
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_not_b32_e32 v1, v0
+; GCN3-NEXT:    v_not_b32_e32 v0, v6
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB39_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw nand ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw or
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_or_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_or_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw or ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_or_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_or_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw or ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_or_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_or_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw or ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_or_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_or_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw or ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_or_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_or_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or_x2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw or ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_or_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_or_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or_x2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw or ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_or_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_or_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw or ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_or_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_or_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_or_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_or_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw or ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw xor
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_xor_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_xor_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xor ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_xor_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_xor_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw xor ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_xor_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_xor_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xor ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_xor_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_xor_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw xor ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_xor_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_xor_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor_x2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xor ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_xor_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_xor_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor_x2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw xor ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_xor_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xor ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_xor_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_xor_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_xor_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw xor ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw max
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_max_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_max_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v6, v[0:1]
+; GCN1-NEXT:    flat_load_dword v7, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB56_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB56_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v6, v[0:1]
+; GCN2-NEXT:    flat_load_dword v7, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB56_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB56_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB56_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB56_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw max ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_max_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_max_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v8, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v7, v[0:1]
+; GCN1-NEXT:    flat_load_dword v6, v[8:9]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB57_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB57_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v8, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v7, v[0:1]
+; GCN2-NEXT:    flat_load_dword v6, v[8:9]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB57_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB57_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB57_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB57_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_max_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_max_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v5, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    flat_load_dword v5, v[5:6]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB58_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB58_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v4
+; GCN1-NEXT:    v_mov_b32_e32 v1, v5
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v5, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    flat_load_dword v5, v[5:6]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB58_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB58_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v4
+; GCN2-NEXT:    v_mov_b32_e32 v1, v5
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB58_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB58_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw max ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_max_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_max_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB59_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v9, v1
+; GCN1-NEXT:    v_mov_b32_e32 v8, v0
+; GCN1-NEXT:    v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB59_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB59_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v9, v1
+; GCN2-NEXT:    v_mov_b32_e32 v8, v0
+; GCN2-NEXT:    v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB59_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB59_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB59_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw max ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_max_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_max_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s34
+; GCN1-NEXT:    v_mov_b32_e32 v4, s35
+; GCN1-NEXT:    flat_load_dword v2, v[0:1]
+; GCN1-NEXT:    flat_load_dword v3, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB60_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB60_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s34
+; GCN2-NEXT:    v_mov_b32_e32 v4, s35
+; GCN2-NEXT:    flat_load_dword v2, v[0:1]
+; GCN2-NEXT:    flat_load_dword v3, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB60_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB60_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB60_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB60_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw max ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_max_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_max_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    flat_load_dword v2, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB61_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB61_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    flat_load_dword v2, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB61_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB61_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB61_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB61_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_max_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_max_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    flat_load_dword v1, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB62_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB62_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    flat_load_dword v1, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB62_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB62_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB62_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB62_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw max ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_max_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_max_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB63_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB63_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_max_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB63_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB63_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_max_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB63_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB63_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw max ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i64_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s4
+; GCN1-NEXT:    s_addc_u32 s1, s1, s5
+; GCN1-NEXT:    s_add_u32 s0, s0, 32
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB64_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s3
+; GCN1-NEXT:    v_mov_b32_e32 v6, s2
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB64_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s4
+; GCN2-NEXT:    s_addc_u32 s1, s1, s5
+; GCN2-NEXT:    s_add_u32 s0, s0, 32
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB64_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s3
+; GCN2-NEXT:    v_mov_b32_e32 v6, s2
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB64_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_max_i64_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[2:3], 0
+; GCN3-NEXT:  .LBB64_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT:    s_cbranch_execnz .LBB64_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %gep = getelementptr i64, ptr %ptr, i64 4
+  %tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i64_ret_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    s_add_u32 s0, s0, 32
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB65_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s5
+; GCN1-NEXT:    v_mov_b32_e32 v6, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB65_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v2, s2
+; GCN1-NEXT:    v_mov_b32_e32 v3, s3
+; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_ret_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    s_add_u32 s0, s0, 32
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB65_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s5
+; GCN2-NEXT:    v_mov_b32_e32 v6, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB65_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v2, s2
+; GCN2-NEXT:    v_mov_b32_e32 v3, s3
+; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_max_i64_ret_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN3-NEXT:    s_add_u32 s0, s0, s6
+; GCN3-NEXT:    s_addc_u32 s1, s1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[6:7], 0
+; GCN3-NEXT:  .LBB65_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s5
+; GCN3-NEXT:    v_mov_b32_e32 v6, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    s_cbranch_execnz .LBB65_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v2, s2
+; GCN3-NEXT:    v_mov_b32_e32 v3, s3
+; GCN3-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %gep = getelementptr i64, ptr %ptr, i64 4
+  %tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst
+  store i64 %tmp0, ptr %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_addr64(ptr %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i64_addr64:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s4
+; GCN1-NEXT:    s_addc_u32 s1, s1, s5
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB66_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s3
+; GCN1-NEXT:    v_mov_b32_e32 v6, s2
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB66_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_addr64:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s4
+; GCN2-NEXT:    s_addc_u32 s1, s1, s5
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB66_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s3
+; GCN2-NEXT:    v_mov_b32_e32 v6, s2
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB66_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_max_i64_addr64:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[2:3], 0
+; GCN3-NEXT:  .LBB66_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT:    s_cbranch_execnz .LBB66_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %tmp0 = atomicrmw max ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i64_ret_addr64:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB67_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s5
+; GCN1-NEXT:    v_mov_b32_e32 v6, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB67_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v2, s2
+; GCN1-NEXT:    v_mov_b32_e32 v3, s3
+; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_ret_addr64:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB67_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s5
+; GCN2-NEXT:    v_mov_b32_e32 v6, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB67_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v2, s2
+; GCN2-NEXT:    v_mov_b32_e32 v3, s3
+; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_max_i64_ret_addr64:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN3-NEXT:    s_add_u32 s0, s0, s6
+; GCN3-NEXT:    s_addc_u32 s1, s1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[6:7], 0
+; GCN3-NEXT:  .LBB67_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s5
+; GCN3-NEXT:    v_mov_b32_e32 v6, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    s_cbranch_execnz .LBB67_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v2, s2
+; GCN3-NEXT:    v_mov_b32_e32 v3, s3
+; GCN3-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %tmp0 = atomicrmw max ptr %ptr, i64 %in seq_cst
+  store i64 %tmp0, ptr %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw umax
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_umax_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_umax_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v6, v[0:1]
+; GCN1-NEXT:    flat_load_dword v7, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB68_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB68_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v6, v[0:1]
+; GCN2-NEXT:    flat_load_dword v7, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB68_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB68_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB68_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB68_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umax ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_umax_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_umax_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v8, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v7, v[0:1]
+; GCN1-NEXT:    flat_load_dword v6, v[8:9]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB69_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB69_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v8, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v7, v[0:1]
+; GCN2-NEXT:    flat_load_dword v6, v[8:9]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB69_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB69_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB69_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB69_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_umax_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_umax_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v5, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    flat_load_dword v5, v[5:6]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB70_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB70_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v4
+; GCN1-NEXT:    v_mov_b32_e32 v1, v5
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v5, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    flat_load_dword v5, v[5:6]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB70_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB70_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v4
+; GCN2-NEXT:    v_mov_b32_e32 v1, v5
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB70_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB70_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umax ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_umax_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_umax_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB71_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v9, v1
+; GCN1-NEXT:    v_mov_b32_e32 v8, v0
+; GCN1-NEXT:    v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB71_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB71_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v9, v1
+; GCN2-NEXT:    v_mov_b32_e32 v8, v0
+; GCN2-NEXT:    v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB71_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB71_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB71_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw umax ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_umax_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_umax_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s34
+; GCN1-NEXT:    v_mov_b32_e32 v4, s35
+; GCN1-NEXT:    flat_load_dword v2, v[0:1]
+; GCN1-NEXT:    flat_load_dword v3, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB72_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB72_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s34
+; GCN2-NEXT:    v_mov_b32_e32 v4, s35
+; GCN2-NEXT:    flat_load_dword v2, v[0:1]
+; GCN2-NEXT:    flat_load_dword v3, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB72_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB72_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB72_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB72_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umax ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_umax_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_umax_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    flat_load_dword v2, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB73_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB73_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    flat_load_dword v2, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB73_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB73_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB73_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB73_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_umax_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    flat_load_dword v1, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB74_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB74_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    flat_load_dword v1, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB74_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB74_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB74_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB74_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umax ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_umax_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB75_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB75_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umax_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB75_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB75_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umax_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB75_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB75_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw umax ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i64_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s4
+; GCN1-NEXT:    s_addc_u32 s1, s1, s5
+; GCN1-NEXT:    s_add_u32 s0, s0, 32
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB76_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s3
+; GCN1-NEXT:    v_mov_b32_e32 v6, s2
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB76_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s4
+; GCN2-NEXT:    s_addc_u32 s1, s1, s5
+; GCN2-NEXT:    s_add_u32 s0, s0, 32
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB76_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s3
+; GCN2-NEXT:    v_mov_b32_e32 v6, s2
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB76_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i64_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[2:3], 0
+; GCN3-NEXT:  .LBB76_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT:    s_cbranch_execnz .LBB76_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %gep = getelementptr i64, ptr %ptr, i64 4
+  %tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i64_ret_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    s_add_u32 s0, s0, 32
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB77_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s5
+; GCN1-NEXT:    v_mov_b32_e32 v6, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB77_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v2, s2
+; GCN1-NEXT:    v_mov_b32_e32 v3, s3
+; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_ret_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    s_add_u32 s0, s0, 32
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB77_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s5
+; GCN2-NEXT:    v_mov_b32_e32 v6, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB77_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v2, s2
+; GCN2-NEXT:    v_mov_b32_e32 v3, s3
+; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i64_ret_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN3-NEXT:    s_add_u32 s0, s0, s6
+; GCN3-NEXT:    s_addc_u32 s1, s1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[6:7], 0
+; GCN3-NEXT:  .LBB77_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s5
+; GCN3-NEXT:    v_mov_b32_e32 v6, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    s_cbranch_execnz .LBB77_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v2, s2
+; GCN3-NEXT:    v_mov_b32_e32 v3, s3
+; GCN3-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %gep = getelementptr i64, ptr %ptr, i64 4
+  %tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst
+  store i64 %tmp0, ptr %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i64_ret_addr64:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB78_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s5
+; GCN1-NEXT:    v_mov_b32_e32 v6, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB78_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v2, s2
+; GCN1-NEXT:    v_mov_b32_e32 v3, s3
+; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_ret_addr64:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB78_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s5
+; GCN2-NEXT:    v_mov_b32_e32 v6, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB78_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v2, s2
+; GCN2-NEXT:    v_mov_b32_e32 v3, s3
+; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i64_ret_addr64:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN3-NEXT:    s_add_u32 s0, s0, s6
+; GCN3-NEXT:    s_addc_u32 s1, s1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[6:7], 0
+; GCN3-NEXT:  .LBB78_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s5
+; GCN3-NEXT:    v_mov_b32_e32 v6, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    s_cbranch_execnz .LBB78_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v2, s2
+; GCN3-NEXT:    v_mov_b32_e32 v3, s3
+; GCN3-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %tmp0 = atomicrmw umax ptr %ptr, i64 %in seq_cst
+  store i64 %tmp0, ptr %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw umin
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_umin_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_umin_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v6, v[0:1]
+; GCN1-NEXT:    flat_load_dword v7, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB79_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB79_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v6, v[0:1]
+; GCN2-NEXT:    flat_load_dword v7, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB79_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB79_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB79_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB79_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umin ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_umin_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_umin_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v8, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v7, v[0:1]
+; GCN1-NEXT:    flat_load_dword v6, v[8:9]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB80_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB80_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v8, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v7, v[0:1]
+; GCN2-NEXT:    flat_load_dword v6, v[8:9]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB80_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB80_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB80_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB80_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw umin ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_umin_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_umin_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v5, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    flat_load_dword v5, v[5:6]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB81_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB81_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v4
+; GCN1-NEXT:    v_mov_b32_e32 v1, v5
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v5, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    flat_load_dword v5, v[5:6]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB81_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB81_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v4
+; GCN2-NEXT:    v_mov_b32_e32 v1, v5
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB81_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB81_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umin ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_umin_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_umin_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB82_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v9, v1
+; GCN1-NEXT:    v_mov_b32_e32 v8, v0
+; GCN1-NEXT:    v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB82_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB82_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v9, v1
+; GCN2-NEXT:    v_mov_b32_e32 v8, v0
+; GCN2-NEXT:    v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB82_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB82_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB82_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw umin ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_umin_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_umin_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s34
+; GCN1-NEXT:    v_mov_b32_e32 v4, s35
+; GCN1-NEXT:    flat_load_dword v2, v[0:1]
+; GCN1-NEXT:    flat_load_dword v3, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB83_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB83_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s34
+; GCN2-NEXT:    v_mov_b32_e32 v4, s35
+; GCN2-NEXT:    flat_load_dword v2, v[0:1]
+; GCN2-NEXT:    flat_load_dword v3, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB83_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB83_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB83_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB83_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umin ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_umin_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_umin_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    flat_load_dword v2, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB84_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB84_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    flat_load_dword v2, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB84_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB84_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB84_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB84_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw umin ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_umin_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    flat_load_dword v1, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB85_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB85_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    flat_load_dword v1, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB85_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB85_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB85_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB85_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umin ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_umin_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB86_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB86_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_umin_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB86_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB86_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_umin_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB86_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB86_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw umin ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw min
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_min_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_min_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v6, v[0:1]
+; GCN1-NEXT:    flat_load_dword v7, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB87_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB87_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v6, v[0:1]
+; GCN2-NEXT:    flat_load_dword v7, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB87_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB87_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB87_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB87_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw min ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_min_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_min_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v8, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v7, v[0:1]
+; GCN1-NEXT:    flat_load_dword v6, v[8:9]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB88_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v7, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v6, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB88_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v8, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v7, v[0:1]
+; GCN2-NEXT:    flat_load_dword v6, v[8:9]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB88_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v7, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v6, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB88_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[6:7], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB88_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB88_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_min_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_min_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v5, vcc, 4, v0
+; GCN1-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v4, v[0:1]
+; GCN1-NEXT:    flat_load_dword v5, v[5:6]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB89_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v7, v5
+; GCN1-NEXT:    v_mov_b32_e32 v6, v4
+; GCN1-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB89_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v0, v4
+; GCN1-NEXT:    v_mov_b32_e32 v1, v5
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v5, vcc, 4, v0
+; GCN2-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v4, v[0:1]
+; GCN2-NEXT:    flat_load_dword v5, v[5:6]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB89_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v7, v5
+; GCN2-NEXT:    v_mov_b32_e32 v6, v4
+; GCN2-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB89_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v0, v4
+; GCN2-NEXT:    v_mov_b32_e32 v1, v5
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB89_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB89_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw min ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_min_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_min_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v4, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 36, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB90_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v9, v1
+; GCN1-NEXT:    v_mov_b32_e32 v8, v0
+; GCN1-NEXT:    v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
+; GCN1-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB90_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 36, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB90_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v9, v1
+; GCN2-NEXT:    v_mov_b32_e32 v8, v0
+; GCN2-NEXT:    v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
+; GCN2-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB90_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_load_dwordx2 v[4:5], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:  .LBB90_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v7, v5
+; GCN3-NEXT:    v_mov_b32_e32 v6, v4
+; GCN3-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GCN3-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB90_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v0, v4
+; GCN3-NEXT:    v_mov_b32_e32 v1, v5
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw min ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_min_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_min_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v3, s34
+; GCN1-NEXT:    v_mov_b32_e32 v4, s35
+; GCN1-NEXT:    flat_load_dword v2, v[0:1]
+; GCN1-NEXT:    flat_load_dword v3, v[3:4]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB91_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB91_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v3, s34
+; GCN2-NEXT:    v_mov_b32_e32 v4, s35
+; GCN2-NEXT:    flat_load_dword v2, v[0:1]
+; GCN2-NEXT:    flat_load_dword v3, v[3:4]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB91_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB91_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB91_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB91_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw min ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_min_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_min_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    flat_load_dword v3, v[0:1]
+; GCN1-NEXT:    flat_load_dword v2, v[4:5]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB92_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB92_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    flat_load_dword v3, v[0:1]
+; GCN2-NEXT:    flat_load_dword v2, v[4:5]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB92_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB92_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB92_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB92_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_min_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_min_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s4
+; GCN1-NEXT:    s_add_u32 s34, s4, 4
+; GCN1-NEXT:    v_mov_b32_e32 v1, s5
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v0, v[0:1]
+; GCN1-NEXT:    flat_load_dword v1, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[34:35], 0
+; GCN1-NEXT:  .LBB93_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s5
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_cbranch_execnz .LBB93_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s4
+; GCN2-NEXT:    s_add_u32 s34, s4, 4
+; GCN2-NEXT:    v_mov_b32_e32 v1, s5
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v0, v[0:1]
+; GCN2-NEXT:    flat_load_dword v1, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[34:35], 0
+; GCN2-NEXT:  .LBB93_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s5
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_cbranch_execnz .LBB93_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB93_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB93_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw min ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_min_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_min_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    s_add_u32 s36, s4, 36
+; GCN1-NEXT:    s_addc_u32 s37, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s36
+; GCN1-NEXT:    v_mov_b32_e32 v1, s37
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    flat_load_dword v1, v[0:1]
+; GCN1-NEXT:    flat_load_dword v0, v[2:3]
+; GCN1-NEXT:    s_mov_b64 s[36:37], 0
+; GCN1-NEXT:  .LBB94_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s7
+; GCN1-NEXT:    v_mov_b32_e32 v6, s6
+; GCN1-NEXT:    v_mov_b32_e32 v4, s34
+; GCN1-NEXT:    v_mov_b32_e32 v5, s35
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_cbranch_execnz .LBB94_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_min_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    s_add_u32 s36, s4, 36
+; GCN2-NEXT:    s_addc_u32 s37, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s36
+; GCN2-NEXT:    v_mov_b32_e32 v1, s37
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    flat_load_dword v1, v[0:1]
+; GCN2-NEXT:    flat_load_dword v0, v[2:3]
+; GCN2-NEXT:    s_mov_b64 s[36:37], 0
+; GCN2-NEXT:  .LBB94_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s7
+; GCN2-NEXT:    v_mov_b32_e32 v6, s6
+; GCN2-NEXT:    v_mov_b32_e32 v4, s34
+; GCN2-NEXT:    v_mov_b32_e32 v5, s35
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_cbranch_execnz .LBB94_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[36:37]
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_min_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s4
+; GCN3-NEXT:    v_mov_b32_e32 v1, s5
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[34:35], 0
+; GCN3-NEXT:  .LBB94_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v4, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s5
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_cbranch_execnz .LBB94_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw min ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i64_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s4
+; GCN1-NEXT:    s_addc_u32 s1, s1, s5
+; GCN1-NEXT:    s_add_u32 s0, s0, 32
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:  .LBB95_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s3
+; GCN1-NEXT:    v_mov_b32_e32 v6, s2
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB95_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s4
+; GCN2-NEXT:    s_addc_u32 s1, s1, s5
+; GCN2-NEXT:    s_add_u32 s0, s0, 32
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:  .LBB95_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s3
+; GCN2-NEXT:    v_mov_b32_e32 v6, s2
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB95_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_min_i64_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
+; GCN3-NEXT:    s_add_u32 s0, s4, s0
+; GCN3-NEXT:    s_addc_u32 s1, s5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[2:3], 0
+; GCN3-NEXT:  .LBB95_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s7
+; GCN3-NEXT:    v_mov_b32_e32 v6, s6
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT:    s_cbranch_execnz .LBB95_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %gep = getelementptr i64, ptr %ptr, i64 4
+  %tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i64_ret_addr64_offset:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    s_add_u32 s0, s0, 32
+; GCN1-NEXT:    s_addc_u32 s1, s1, 0
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB96_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s5
+; GCN1-NEXT:    v_mov_b32_e32 v6, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB96_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v2, s2
+; GCN1-NEXT:    v_mov_b32_e32 v3, s3
+; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_ret_addr64_offset:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    s_add_u32 s0, s0, 32
+; GCN2-NEXT:    s_addc_u32 s1, s1, 0
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB96_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s5
+; GCN2-NEXT:    v_mov_b32_e32 v6, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB96_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v2, s2
+; GCN2-NEXT:    v_mov_b32_e32 v3, s3
+; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_min_i64_ret_addr64_offset:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN3-NEXT:    s_add_u32 s0, s0, s6
+; GCN3-NEXT:    s_addc_u32 s1, s1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] offset:32
+; GCN3-NEXT:    s_mov_b64 s[6:7], 0
+; GCN3-NEXT:  .LBB96_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s5
+; GCN3-NEXT:    v_mov_b32_e32 v6, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    s_cbranch_execnz .LBB96_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v2, s2
+; GCN3-NEXT:    v_mov_b32_e32 v3, s3
+; GCN3-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %gep = getelementptr i64, ptr %ptr, i64 4
+  %tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst
+  store i64 %tmp0, ptr %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64(ptr %out, i64 %in) {
+; GCN1-LABEL: atomic_min_i64:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT:    s_mov_b64 s[4:5], 0
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:  .LBB97_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s3
+; GCN1-NEXT:    v_mov_b32_e32 v6, s2
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT:    s_cbranch_execnz .LBB97_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT:    s_mov_b64 s[4:5], 0
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:  .LBB97_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s3
+; GCN2-NEXT:    v_mov_b32_e32 v6, s2
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT:    s_cbranch_execnz .LBB97_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_min_i64:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN3-NEXT:    s_mov_b64 s[4:5], 0
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:  .LBB97_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s3
+; GCN3-NEXT:    v_mov_b32_e32 v6, s2
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GCN3-NEXT:    s_cbranch_execnz .LBB97_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_endpgm
+entry:
+  %tmp0 = atomicrmw min ptr %out, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i64_ret_addr64:
+; GCN1:       ; %bb.0: ; %entry
+; GCN1-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN1-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT:    s_add_u32 s0, s0, s6
+; GCN1-NEXT:    s_addc_u32 s1, s1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v0, s0
+; GCN1-NEXT:    v_mov_b32_e32 v1, s1
+; GCN1-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT:    s_mov_b64 s[6:7], 0
+; GCN1-NEXT:  .LBB98_1: ; %atomicrmw.start
+; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v3, v1
+; GCN1-NEXT:    v_mov_b32_e32 v2, v0
+; GCN1-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT:    v_mov_b32_e32 v0, s5
+; GCN1-NEXT:    v_mov_b32_e32 v6, s4
+; GCN1-NEXT:    v_mov_b32_e32 v5, s1
+; GCN1-NEXT:    v_mov_b32_e32 v4, s0
+; GCN1-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    s_cbranch_execnz .LBB98_1
+; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT:    v_mov_b32_e32 v2, s2
+; GCN1-NEXT:    v_mov_b32_e32 v3, s3
+; GCN1-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_ret_addr64:
+; GCN2:       ; %bb.0: ; %entry
+; GCN2-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN2-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT:    s_add_u32 s0, s0, s6
+; GCN2-NEXT:    s_addc_u32 s1, s1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v0, s0
+; GCN2-NEXT:    v_mov_b32_e32 v1, s1
+; GCN2-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT:    s_mov_b64 s[6:7], 0
+; GCN2-NEXT:  .LBB98_1: ; %atomicrmw.start
+; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v3, v1
+; GCN2-NEXT:    v_mov_b32_e32 v2, v0
+; GCN2-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT:    v_mov_b32_e32 v0, s5
+; GCN2-NEXT:    v_mov_b32_e32 v6, s4
+; GCN2-NEXT:    v_mov_b32_e32 v5, s1
+; GCN2-NEXT:    v_mov_b32_e32 v4, s0
+; GCN2-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    s_cbranch_execnz .LBB98_1
+; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT:    v_mov_b32_e32 v2, s2
+; GCN2-NEXT:    v_mov_b32_e32 v3, s3
+; GCN2-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_endpgm
+;
+; GCN3-LABEL: atomic_min_i64_ret_addr64:
+; GCN3:       ; %bb.0: ; %entry
+; GCN3-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN3-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GCN3-NEXT:    s_add_u32 s0, s0, s6
+; GCN3-NEXT:    s_addc_u32 s1, s1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v0, s0
+; GCN3-NEXT:    v_mov_b32_e32 v1, s1
+; GCN3-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN3-NEXT:    s_mov_b64 s[6:7], 0
+; GCN3-NEXT:  .LBB98_1: ; %atomicrmw.start
+; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v3, v1
+; GCN3-NEXT:    v_mov_b32_e32 v2, v0
+; GCN3-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN3-NEXT:    v_mov_b32_e32 v0, s5
+; GCN3-NEXT:    v_mov_b32_e32 v6, s4
+; GCN3-NEXT:    v_mov_b32_e32 v5, s1
+; GCN3-NEXT:    v_mov_b32_e32 v4, s0
+; GCN3-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN3-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN3-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GCN3-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    s_cbranch_execnz .LBB98_1
+; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN3-NEXT:    v_mov_b32_e32 v2, s2
+; GCN3-NEXT:    v_mov_b32_e32 v3, s3
+; GCN3-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr %out, i64 %index
+  %tmp0 = atomicrmw min ptr %ptr, i64 %in seq_cst
+  store i64 %tmp0, ptr %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw uinc_wrap
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_uinc_wrap_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw uinc_wrap ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_uinc_wrap_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_uinc_wrap_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw uinc_wrap ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_uinc_wrap_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_uinc_wrap_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc_x2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw uinc_wrap ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_uinc_wrap_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc_x2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw uinc_wrap ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_uinc_wrap_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_uinc_wrap_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_uinc_wrap_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw udec_wrap
+; ---------------------------------------------------------------------
+
+define void @flat_atomic_udec_wrap_i64_noret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i64_noret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i64_noret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i64_noret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw udec_wrap ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @flat_atomic_udec_wrap_i64_noret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i64_noret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i64_noret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i64_noret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @flat_atomic_udec_wrap_i64_ret(ptr %ptr, i64 %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i64_ret:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i64_ret:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i64_ret:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw udec_wrap ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @flat_atomic_udec_wrap_i64_ret_offset(ptr %out, i64 %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i64_ret_offset:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GCN1-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i64_ret_offset:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GCN2-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i64_ret_offset:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @flat_atomic_udec_wrap_i64_noret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i64_noret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i64_noret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i64_noret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec_x2 v[2:3], v[0:1]
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw udec_wrap ptr %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @flat_atomic_udec_wrap_i64_noret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i64_noret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec_x2 v[2:3], v[0:1]
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i64_noret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec_x2 v[2:3], v[0:1]
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i64_noret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec_x2 v[2:3], v[0:1] offset:32
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %tmp0 = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_scalar(ptr inreg %ptr, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i64_ret_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v2, s4
+; GCN1-NEXT:    v_mov_b32_e32 v3, s5
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i64_ret_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v2, s4
+; GCN2-NEXT:    v_mov_b32_e32 v3, s5
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i64_ret_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw udec_wrap ptr %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_offset_scalar(ptr inreg %out, i64 inreg %in) {
+; GCN1-LABEL: flat_atomic_udec_wrap_i64_ret_offset_scalar:
+; GCN1:       ; %bb.0:
+; GCN1-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    s_add_u32 s34, s4, 32
+; GCN1-NEXT:    s_addc_u32 s35, s5, 0
+; GCN1-NEXT:    v_mov_b32_e32 v2, s34
+; GCN1-NEXT:    v_mov_b32_e32 v0, s6
+; GCN1-NEXT:    v_mov_b32_e32 v1, s7
+; GCN1-NEXT:    v_mov_b32_e32 v3, s35
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT:    buffer_wbinvl1_vol
+; GCN1-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN2-LABEL: flat_atomic_udec_wrap_i64_ret_offset_scalar:
+; GCN2:       ; %bb.0:
+; GCN2-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    s_add_u32 s34, s4, 32
+; GCN2-NEXT:    s_addc_u32 s35, s5, 0
+; GCN2-NEXT:    v_mov_b32_e32 v2, s34
+; GCN2-NEXT:    v_mov_b32_e32 v0, s6
+; GCN2-NEXT:    v_mov_b32_e32 v1, s7
+; GCN2-NEXT:    v_mov_b32_e32 v3, s35
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT:    buffer_wbinvl1_vol
+; GCN2-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN3-LABEL: flat_atomic_udec_wrap_i64_ret_offset_scalar:
+; GCN3:       ; %bb.0:
+; GCN3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    v_mov_b32_e32 v0, s6
+; GCN3-NEXT:    v_mov_b32_e32 v1, s7
+; GCN3-NEXT:    v_mov_b32_e32 v2, s4
+; GCN3-NEXT:    v_mov_b32_e32 v3, s5
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] offset:32 glc
+; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT:    buffer_wbinvl1_vol
+; GCN3-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr %out, i64 4
+  %result = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst
+  ret i64 %result
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_min_max_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_min_max_system.ll
deleted file mode 100644
index 301e104b58b6a1..00000000000000
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_min_max_system.ll
+++ /dev/null
@@ -1,3252 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN1 %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN2 %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN3 %s
-
-define amdgpu_kernel void @atomic_max_i32_offset(ptr %out, i32 %in) {
-; GCN1-LABEL: atomic_max_i32_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s2, 16
-; GCN1-NEXT:    s_addc_u32 s1, s3, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB0_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB0_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i32_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s2, 16
-; GCN2-NEXT:    s_addc_u32 s1, s3, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB0_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB0_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_max_i32_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s2
-; GCN3-NEXT:    v_mov_b32_e32 v1, s3
-; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT:  .LBB0_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s2
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s3
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB0_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr %out, i32 4
-  %val = atomicrmw volatile max ptr %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_ret_offset(ptr %out, ptr %out2, i32 %in) {
-; GCN1-LABEL: atomic_max_i32_ret_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s4, 16
-; GCN1-NEXT:    s_addc_u32 s1, s5, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:  .LBB1_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_max_i32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB1_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i32_ret_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s4, 16
-; GCN2-NEXT:    s_addc_u32 s1, s5, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:  .LBB1_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_max_i32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB1_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_max_i32_ret_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s4
-; GCN3-NEXT:    v_mov_b32_e32 v1, s5
-; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT:  .LBB1_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v2, s4
-; GCN3-NEXT:    v_mov_b32_e32 v3, s5
-; GCN3-NEXT:    v_max_i32_e32 v0, s2, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB1_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr %out, i32 4
-  %val = atomicrmw volatile max ptr %gep, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr %out, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_max_i32_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s6, s0
-; GCN1-NEXT:    s_addc_u32 s1, s7, s1
-; GCN1-NEXT:    s_add_u32 s0, s0, 16
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB2_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB2_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i32_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s6, s0
-; GCN2-NEXT:    s_addc_u32 s1, s7, s1
-; GCN2-NEXT:    s_add_u32 s0, s0, 16
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB2_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB2_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_max_i32_addr64_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN3-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s6, s0
-; GCN3-NEXT:    s_addc_u32 s1, s7, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB2_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB2_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %gep = getelementptr i32, ptr %ptr, i32 4
-  %val = atomicrmw volatile max ptr %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr %out, ptr %out2, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_max_i32_ret_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s4, s0
-; GCN1-NEXT:    s_addc_u32 s1, s5, s1
-; GCN1-NEXT:    s_add_u32 s0, s0, 16
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB3_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_max_i32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB3_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i32_ret_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s4, s0
-; GCN2-NEXT:    s_addc_u32 s1, s5, s1
-; GCN2-NEXT:    s_add_u32 s0, s0, 16
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB3_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_max_i32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB3_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_max_i32_ret_addr64_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s4, s0
-; GCN3-NEXT:    s_addc_u32 s1, s5, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB3_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    v_max_i32_e32 v0, s8, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB3_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %gep = getelementptr i32, ptr %ptr, i32 4
-  %val = atomicrmw volatile max ptr %gep, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32(ptr %out, i32 %in) {
-; GCN1-LABEL: atomic_max_i32:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_mov_b64 s[0:1], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s2
-; GCN1-NEXT:    v_mov_b32_e32 v1, s3
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:  .LBB4_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    s_cbranch_execnz .LBB4_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i32:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_mov_b64 s[0:1], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s2
-; GCN2-NEXT:    v_mov_b32_e32 v1, s3
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:  .LBB4_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    s_cbranch_execnz .LBB4_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_max_i32:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s2
-; GCN3-NEXT:    v_mov_b32_e32 v1, s3
-; GCN3-NEXT:    flat_load_dword v1, v[0:1]
-; GCN3-NEXT:  .LBB4_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s2
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s3
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB4_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile max ptr %out, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_ret(ptr %out, ptr %out2, i32 %in) {
-; GCN1-LABEL: atomic_max_i32_ret:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s2, s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[0:1], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s4
-; GCN1-NEXT:    v_mov_b32_e32 v1, s5
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:  .LBB5_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v2, s4
-; GCN1-NEXT:    v_mov_b32_e32 v3, s5
-; GCN1-NEXT:    v_max_i32_e32 v0, s2, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    s_cbranch_execnz .LBB5_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i32_ret:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[0:1], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s4
-; GCN2-NEXT:    v_mov_b32_e32 v1, s5
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:  .LBB5_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v2, s4
-; GCN2-NEXT:    v_mov_b32_e32 v3, s5
-; GCN2-NEXT:    v_max_i32_e32 v0, s2, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    s_cbranch_execnz .LBB5_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_max_i32_ret:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s4
-; GCN3-NEXT:    v_mov_b32_e32 v1, s5
-; GCN3-NEXT:    flat_load_dword v0, v[0:1]
-; GCN3-NEXT:  .LBB5_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v2, s4
-; GCN3-NEXT:    v_mov_b32_e32 v3, s5
-; GCN3-NEXT:    v_max_i32_e32 v0, s2, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB5_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile max ptr %out, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_addr64(ptr %out, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_max_i32_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s6, s0
-; GCN1-NEXT:    s_addc_u32 s1, s7, s1
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB6_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB6_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i32_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s6, s0
-; GCN2-NEXT:    s_addc_u32 s1, s7, s1
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB6_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB6_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_max_i32_addr64:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN3-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s6, s0
-; GCN3-NEXT:    s_addc_u32 s1, s7, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v1, v[0:1]
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB6_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_max_i32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB6_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %val = atomicrmw volatile max ptr %ptr, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr %out, ptr %out2, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_max_i32_ret_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s4, s0
-; GCN1-NEXT:    s_addc_u32 s1, s5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB7_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_max_i32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB7_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_max_i32_ret_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s4, s0
-; GCN2-NEXT:    s_addc_u32 s1, s5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB7_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_max_i32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB7_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_max_i32_ret_addr64:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s4, s0
-; GCN3-NEXT:    s_addc_u32 s1, s5, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v0, v[0:1]
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB7_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    v_max_i32_e32 v0, s8, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB7_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %val = atomicrmw volatile max ptr %ptr, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_offset(ptr %out, i32 %in) {
-; GCN1-LABEL: atomic_umax_i32_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s2, 16
-; GCN1-NEXT:    s_addc_u32 s1, s3, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB8_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB8_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i32_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s2, 16
-; GCN2-NEXT:    s_addc_u32 s1, s3, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB8_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB8_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umax_i32_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s2
-; GCN3-NEXT:    v_mov_b32_e32 v1, s3
-; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT:  .LBB8_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s2
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s3
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB8_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr %out, i32 4
-  %val = atomicrmw volatile umax ptr %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_ret_offset(ptr %out, ptr %out2, i32 %in) {
-; GCN1-LABEL: atomic_umax_i32_ret_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s4, 16
-; GCN1-NEXT:    s_addc_u32 s1, s5, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:  .LBB9_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_max_u32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB9_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i32_ret_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s4, 16
-; GCN2-NEXT:    s_addc_u32 s1, s5, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:  .LBB9_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_max_u32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB9_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umax_i32_ret_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s4
-; GCN3-NEXT:    v_mov_b32_e32 v1, s5
-; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT:  .LBB9_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v2, s4
-; GCN3-NEXT:    v_mov_b32_e32 v3, s5
-; GCN3-NEXT:    v_max_u32_e32 v0, s2, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB9_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr %out, i32 4
-  %val = atomicrmw volatile umax ptr %gep, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr %out, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_umax_i32_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s6, s0
-; GCN1-NEXT:    s_addc_u32 s1, s7, s1
-; GCN1-NEXT:    s_add_u32 s0, s0, 16
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB10_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB10_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i32_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s6, s0
-; GCN2-NEXT:    s_addc_u32 s1, s7, s1
-; GCN2-NEXT:    s_add_u32 s0, s0, 16
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB10_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB10_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umax_i32_addr64_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN3-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s6, s0
-; GCN3-NEXT:    s_addc_u32 s1, s7, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB10_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB10_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %gep = getelementptr i32, ptr %ptr, i32 4
-  %val = atomicrmw volatile umax ptr %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr %out, ptr %out2, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_umax_i32_ret_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s4, s0
-; GCN1-NEXT:    s_addc_u32 s1, s5, s1
-; GCN1-NEXT:    s_add_u32 s0, s0, 16
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB11_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_max_u32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB11_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i32_ret_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s4, s0
-; GCN2-NEXT:    s_addc_u32 s1, s5, s1
-; GCN2-NEXT:    s_add_u32 s0, s0, 16
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB11_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_max_u32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB11_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umax_i32_ret_addr64_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s4, s0
-; GCN3-NEXT:    s_addc_u32 s1, s5, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB11_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    v_max_u32_e32 v0, s8, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB11_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %gep = getelementptr i32, ptr %ptr, i32 4
-  %val = atomicrmw volatile umax ptr %gep, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32(ptr %out, i32 %in) {
-; GCN1-LABEL: atomic_umax_i32:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_mov_b64 s[0:1], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s2
-; GCN1-NEXT:    v_mov_b32_e32 v1, s3
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:  .LBB12_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    s_cbranch_execnz .LBB12_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i32:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_mov_b64 s[0:1], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s2
-; GCN2-NEXT:    v_mov_b32_e32 v1, s3
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:  .LBB12_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    s_cbranch_execnz .LBB12_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umax_i32:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s2
-; GCN3-NEXT:    v_mov_b32_e32 v1, s3
-; GCN3-NEXT:    flat_load_dword v1, v[0:1]
-; GCN3-NEXT:  .LBB12_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s2
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s3
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB12_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile umax ptr %out, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_ret(ptr %out, ptr %out2, i32 %in) {
-; GCN1-LABEL: atomic_umax_i32_ret:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s2, s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[0:1], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s4
-; GCN1-NEXT:    v_mov_b32_e32 v1, s5
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:  .LBB13_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v2, s4
-; GCN1-NEXT:    v_mov_b32_e32 v3, s5
-; GCN1-NEXT:    v_max_u32_e32 v0, s2, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    s_cbranch_execnz .LBB13_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i32_ret:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[0:1], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s4
-; GCN2-NEXT:    v_mov_b32_e32 v1, s5
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:  .LBB13_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v2, s4
-; GCN2-NEXT:    v_mov_b32_e32 v3, s5
-; GCN2-NEXT:    v_max_u32_e32 v0, s2, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    s_cbranch_execnz .LBB13_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umax_i32_ret:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s4
-; GCN3-NEXT:    v_mov_b32_e32 v1, s5
-; GCN3-NEXT:    flat_load_dword v0, v[0:1]
-; GCN3-NEXT:  .LBB13_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v2, s4
-; GCN3-NEXT:    v_mov_b32_e32 v3, s5
-; GCN3-NEXT:    v_max_u32_e32 v0, s2, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB13_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile umax ptr %out, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_addr64(ptr %out, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_umax_i32_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s6, s0
-; GCN1-NEXT:    s_addc_u32 s1, s7, s1
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB14_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB14_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i32_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s6, s0
-; GCN2-NEXT:    s_addc_u32 s1, s7, s1
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB14_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB14_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umax_i32_addr64:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN3-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s6, s0
-; GCN3-NEXT:    s_addc_u32 s1, s7, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v1, v[0:1]
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB14_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_max_u32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB14_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %val = atomicrmw volatile umax ptr %ptr, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr %out, ptr %out2, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_umax_i32_ret_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s4, s0
-; GCN1-NEXT:    s_addc_u32 s1, s5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB15_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_max_u32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB15_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umax_i32_ret_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s4, s0
-; GCN2-NEXT:    s_addc_u32 s1, s5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB15_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_max_u32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB15_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umax_i32_ret_addr64:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s4, s0
-; GCN3-NEXT:    s_addc_u32 s1, s5, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v0, v[0:1]
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB15_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    v_max_u32_e32 v0, s8, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB15_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %val = atomicrmw volatile umax ptr %ptr, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_offset(ptr %out, i32 %in) {
-; GCN1-LABEL: atomic_min_i32_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s2, 16
-; GCN1-NEXT:    s_addc_u32 s1, s3, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB16_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB16_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i32_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s2, 16
-; GCN2-NEXT:    s_addc_u32 s1, s3, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB16_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB16_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_min_i32_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s2
-; GCN3-NEXT:    v_mov_b32_e32 v1, s3
-; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT:  .LBB16_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s2
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s3
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB16_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr %out, i32 4
-  %val = atomicrmw volatile min ptr %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_ret_offset(ptr %out, ptr %out2, i32 %in) {
-; GCN1-LABEL: atomic_min_i32_ret_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s4, 16
-; GCN1-NEXT:    s_addc_u32 s1, s5, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:  .LBB17_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_min_i32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB17_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i32_ret_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s4, 16
-; GCN2-NEXT:    s_addc_u32 s1, s5, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:  .LBB17_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_min_i32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB17_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_min_i32_ret_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s4
-; GCN3-NEXT:    v_mov_b32_e32 v1, s5
-; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT:  .LBB17_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v2, s4
-; GCN3-NEXT:    v_mov_b32_e32 v3, s5
-; GCN3-NEXT:    v_min_i32_e32 v0, s2, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB17_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr %out, i32 4
-  %val = atomicrmw volatile min ptr %gep, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr %out, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_min_i32_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s6, s0
-; GCN1-NEXT:    s_addc_u32 s1, s7, s1
-; GCN1-NEXT:    s_add_u32 s0, s0, 16
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB18_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB18_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i32_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s6, s0
-; GCN2-NEXT:    s_addc_u32 s1, s7, s1
-; GCN2-NEXT:    s_add_u32 s0, s0, 16
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB18_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB18_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_min_i32_addr64_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN3-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s6, s0
-; GCN3-NEXT:    s_addc_u32 s1, s7, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB18_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB18_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %gep = getelementptr i32, ptr %ptr, i32 4
-  %val = atomicrmw volatile min ptr %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr %out, ptr %out2, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_min_i32_ret_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s4, s0
-; GCN1-NEXT:    s_addc_u32 s1, s5, s1
-; GCN1-NEXT:    s_add_u32 s0, s0, 16
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB19_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_min_i32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB19_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i32_ret_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s4, s0
-; GCN2-NEXT:    s_addc_u32 s1, s5, s1
-; GCN2-NEXT:    s_add_u32 s0, s0, 16
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB19_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_min_i32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB19_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_min_i32_ret_addr64_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s4, s0
-; GCN3-NEXT:    s_addc_u32 s1, s5, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB19_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    v_min_i32_e32 v0, s8, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB19_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %gep = getelementptr i32, ptr %ptr, i32 4
-  %val = atomicrmw volatile min ptr %gep, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32(ptr %out, i32 %in) {
-; GCN1-LABEL: atomic_min_i32:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_mov_b64 s[0:1], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s2
-; GCN1-NEXT:    v_mov_b32_e32 v1, s3
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:  .LBB20_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    s_cbranch_execnz .LBB20_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i32:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_mov_b64 s[0:1], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s2
-; GCN2-NEXT:    v_mov_b32_e32 v1, s3
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:  .LBB20_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    s_cbranch_execnz .LBB20_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_min_i32:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s2
-; GCN3-NEXT:    v_mov_b32_e32 v1, s3
-; GCN3-NEXT:    flat_load_dword v1, v[0:1]
-; GCN3-NEXT:  .LBB20_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s2
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s3
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB20_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile min ptr %out, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_ret(ptr %out, ptr %out2, i32 %in) {
-; GCN1-LABEL: atomic_min_i32_ret:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s2, s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[0:1], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s4
-; GCN1-NEXT:    v_mov_b32_e32 v1, s5
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:  .LBB21_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v2, s4
-; GCN1-NEXT:    v_mov_b32_e32 v3, s5
-; GCN1-NEXT:    v_min_i32_e32 v0, s2, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    s_cbranch_execnz .LBB21_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i32_ret:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[0:1], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s4
-; GCN2-NEXT:    v_mov_b32_e32 v1, s5
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:  .LBB21_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v2, s4
-; GCN2-NEXT:    v_mov_b32_e32 v3, s5
-; GCN2-NEXT:    v_min_i32_e32 v0, s2, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    s_cbranch_execnz .LBB21_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_min_i32_ret:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s4
-; GCN3-NEXT:    v_mov_b32_e32 v1, s5
-; GCN3-NEXT:    flat_load_dword v0, v[0:1]
-; GCN3-NEXT:  .LBB21_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v2, s4
-; GCN3-NEXT:    v_mov_b32_e32 v3, s5
-; GCN3-NEXT:    v_min_i32_e32 v0, s2, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB21_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile min ptr %out, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_addr64(ptr %out, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_min_i32_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s6, s0
-; GCN1-NEXT:    s_addc_u32 s1, s7, s1
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB22_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB22_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i32_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s6, s0
-; GCN2-NEXT:    s_addc_u32 s1, s7, s1
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB22_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB22_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_min_i32_addr64:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN3-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s6, s0
-; GCN3-NEXT:    s_addc_u32 s1, s7, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v1, v[0:1]
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB22_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_min_i32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB22_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %val = atomicrmw volatile min ptr %ptr, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr %out, ptr %out2, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_min_i32_ret_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s4, s0
-; GCN1-NEXT:    s_addc_u32 s1, s5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB23_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_min_i32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB23_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_min_i32_ret_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s4, s0
-; GCN2-NEXT:    s_addc_u32 s1, s5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB23_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_min_i32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB23_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_min_i32_ret_addr64:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s4, s0
-; GCN3-NEXT:    s_addc_u32 s1, s5, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v0, v[0:1]
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB23_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    v_min_i32_e32 v0, s8, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB23_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %val = atomicrmw volatile min ptr %ptr, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_offset(ptr %out, i32 %in) {
-; GCN1-LABEL: atomic_umin_i32_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s2, 16
-; GCN1-NEXT:    s_addc_u32 s1, s3, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB24_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB24_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i32_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s2, 16
-; GCN2-NEXT:    s_addc_u32 s1, s3, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB24_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB24_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umin_i32_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s2
-; GCN3-NEXT:    v_mov_b32_e32 v1, s3
-; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT:  .LBB24_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s2
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s3
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB24_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr %out, i32 4
-  %val = atomicrmw volatile umin ptr %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_ret_offset(ptr %out, ptr %out2, i32 %in) {
-; GCN1-LABEL: atomic_umin_i32_ret_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_add_u32 s0, s4, 16
-; GCN1-NEXT:    s_addc_u32 s1, s5, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:  .LBB25_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_min_u32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB25_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i32_ret_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_add_u32 s0, s4, 16
-; GCN2-NEXT:    s_addc_u32 s1, s5, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:  .LBB25_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_min_u32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB25_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umin_i32_ret_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s4
-; GCN3-NEXT:    v_mov_b32_e32 v1, s5
-; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT:  .LBB25_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v2, s4
-; GCN3-NEXT:    v_mov_b32_e32 v3, s5
-; GCN3-NEXT:    v_min_u32_e32 v0, s2, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB25_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr %out, i32 4
-  %val = atomicrmw volatile umin ptr %gep, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_addr64_offset(ptr %out, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_umin_i32_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s6, s0
-; GCN1-NEXT:    s_addc_u32 s1, s7, s1
-; GCN1-NEXT:    s_add_u32 s0, s0, 16
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB26_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB26_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i32_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s6, s0
-; GCN2-NEXT:    s_addc_u32 s1, s7, s1
-; GCN2-NEXT:    s_add_u32 s0, s0, 16
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB26_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB26_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umin_i32_addr64_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN3-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s6, s0
-; GCN3-NEXT:    s_addc_u32 s1, s7, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB26_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB26_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %gep = getelementptr i32, ptr %ptr, i32 4
-  %val = atomicrmw volatile umin ptr %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(ptr %out, ptr %out2, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_umin_i32_ret_addr64_offset:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s4, s0
-; GCN1-NEXT:    s_addc_u32 s1, s5, s1
-; GCN1-NEXT:    s_add_u32 s0, s0, 16
-; GCN1-NEXT:    s_addc_u32 s1, s1, 0
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB27_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_min_u32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB27_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i32_ret_addr64_offset:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s4, s0
-; GCN2-NEXT:    s_addc_u32 s1, s5, s1
-; GCN2-NEXT:    s_add_u32 s0, s0, 16
-; GCN2-NEXT:    s_addc_u32 s1, s1, 0
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB27_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_min_u32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB27_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umin_i32_ret_addr64_offset:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s4, s0
-; GCN3-NEXT:    s_addc_u32 s1, s5, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB27_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    v_min_u32_e32 v0, s8, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB27_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %gep = getelementptr i32, ptr %ptr, i32 4
-  %val = atomicrmw volatile umin ptr %gep, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32(ptr %out, i32 %in) {
-; GCN1-LABEL: atomic_umin_i32:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_mov_b64 s[0:1], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s2
-; GCN1-NEXT:    v_mov_b32_e32 v1, s3
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:  .LBB28_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s2
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s3
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    s_cbranch_execnz .LBB28_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i32:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_mov_b64 s[0:1], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s2
-; GCN2-NEXT:    v_mov_b32_e32 v1, s3
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:  .LBB28_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s2
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s3
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    s_cbranch_execnz .LBB28_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umin_i32:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s2
-; GCN3-NEXT:    v_mov_b32_e32 v1, s3
-; GCN3-NEXT:    flat_load_dword v1, v[0:1]
-; GCN3-NEXT:  .LBB28_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s2
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s3
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB28_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile umin ptr %out, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_ret(ptr %out, ptr %out2, i32 %in) {
-; GCN1-LABEL: atomic_umin_i32_ret:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s2, s[0:1], 0xd
-; GCN1-NEXT:    s_mov_b64 s[0:1], 0
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v0, s4
-; GCN1-NEXT:    v_mov_b32_e32 v1, s5
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:  .LBB29_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v2, s4
-; GCN1-NEXT:    v_mov_b32_e32 v3, s5
-; GCN1-NEXT:    v_min_u32_e32 v0, s2, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    s_cbranch_execnz .LBB29_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i32_ret:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN2-NEXT:    s_mov_b64 s[0:1], 0
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v0, s4
-; GCN2-NEXT:    v_mov_b32_e32 v1, s5
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:  .LBB29_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v2, s4
-; GCN2-NEXT:    v_mov_b32_e32 v3, s5
-; GCN2-NEXT:    v_min_u32_e32 v0, s2, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    s_cbranch_execnz .LBB29_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umin_i32_ret:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GCN3-NEXT:    s_mov_b64 s[0:1], 0
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v0, s4
-; GCN3-NEXT:    v_mov_b32_e32 v1, s5
-; GCN3-NEXT:    flat_load_dword v0, v[0:1]
-; GCN3-NEXT:  .LBB29_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v2, s4
-; GCN3-NEXT:    v_mov_b32_e32 v3, s5
-; GCN3-NEXT:    v_min_u32_e32 v0, s2, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    s_cbranch_execnz .LBB29_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile umin ptr %out, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_addr64(ptr %out, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_umin_i32_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN1-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s4, s[0:1], 0xb
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s6, s0
-; GCN1-NEXT:    s_addc_u32 s1, s7, s1
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v1, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB30_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB30_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i32_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN2-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s6, s0
-; GCN2-NEXT:    s_addc_u32 s1, s7, s1
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v1, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB30_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB30_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umin_i32_addr64:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GCN3-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s6, s0
-; GCN3-NEXT:    s_addc_u32 s1, s7, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v1, v[0:1]
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB30_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_min_u32_e32 v0, s4, v1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB30_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %val = atomicrmw volatile umin ptr %ptr, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_ret_addr64(ptr %out, ptr %out2, i32 %in, i64 %index) {
-; GCN1-LABEL: atomic_umin_i32_ret_addr64:
-; GCN1:       ; %bb.0: ; %entry
-; GCN1-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; GCN1-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN1-NEXT:    s_load_dword s8, s[0:1], 0xd
-; GCN1-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN1-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN1-NEXT:    s_add_u32 s0, s4, s0
-; GCN1-NEXT:    s_addc_u32 s1, s5, s1
-; GCN1-NEXT:    v_mov_b32_e32 v0, s0
-; GCN1-NEXT:    v_mov_b32_e32 v1, s1
-; GCN1-NEXT:    flat_load_dword v0, v[0:1]
-; GCN1-NEXT:    s_mov_b64 s[2:3], 0
-; GCN1-NEXT:  .LBB31_1: ; %atomicrmw.start
-; GCN1-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    v_mov_b32_e32 v1, v0
-; GCN1-NEXT:    v_mov_b32_e32 v3, s1
-; GCN1-NEXT:    v_mov_b32_e32 v2, s0
-; GCN1-NEXT:    v_min_u32_e32 v0, s8, v1
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT:    buffer_wbinvl1_vol
-; GCN1-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN1-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    s_cbranch_execnz .LBB31_1
-; GCN1-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN1-NEXT:    v_mov_b32_e32 v1, s6
-; GCN1-NEXT:    v_mov_b32_e32 v2, s7
-; GCN1-NEXT:    flat_store_dword v[1:2], v0
-; GCN1-NEXT:    s_endpgm
-;
-; GCN2-LABEL: atomic_umin_i32_ret_addr64:
-; GCN2:       ; %bb.0: ; %entry
-; GCN2-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN2-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN2-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN2-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN2-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN2-NEXT:    s_add_u32 s0, s4, s0
-; GCN2-NEXT:    s_addc_u32 s1, s5, s1
-; GCN2-NEXT:    v_mov_b32_e32 v0, s0
-; GCN2-NEXT:    v_mov_b32_e32 v1, s1
-; GCN2-NEXT:    flat_load_dword v0, v[0:1]
-; GCN2-NEXT:    s_mov_b64 s[2:3], 0
-; GCN2-NEXT:  .LBB31_1: ; %atomicrmw.start
-; GCN2-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    v_mov_b32_e32 v1, v0
-; GCN2-NEXT:    v_mov_b32_e32 v3, s1
-; GCN2-NEXT:    v_mov_b32_e32 v2, s0
-; GCN2-NEXT:    v_min_u32_e32 v0, s8, v1
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT:    buffer_wbinvl1_vol
-; GCN2-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN2-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    s_cbranch_execnz .LBB31_1
-; GCN2-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN2-NEXT:    v_mov_b32_e32 v1, s6
-; GCN2-NEXT:    v_mov_b32_e32 v2, s7
-; GCN2-NEXT:    flat_store_dword v[1:2], v0
-; GCN2-NEXT:    s_endpgm
-;
-; GCN3-LABEL: atomic_umin_i32_ret_addr64:
-; GCN3:       ; %bb.0: ; %entry
-; GCN3-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GCN3-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GCN3-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GCN3-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN3-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GCN3-NEXT:    s_add_u32 s0, s4, s0
-; GCN3-NEXT:    s_addc_u32 s1, s5, s1
-; GCN3-NEXT:    v_mov_b32_e32 v0, s0
-; GCN3-NEXT:    v_mov_b32_e32 v1, s1
-; GCN3-NEXT:    flat_load_dword v0, v[0:1]
-; GCN3-NEXT:    s_mov_b64 s[2:3], 0
-; GCN3-NEXT:  .LBB31_1: ; %atomicrmw.start
-; GCN3-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    v_mov_b32_e32 v1, v0
-; GCN3-NEXT:    v_mov_b32_e32 v3, s1
-; GCN3-NEXT:    v_mov_b32_e32 v2, s0
-; GCN3-NEXT:    v_min_u32_e32 v0, s8, v1
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT:    buffer_wbinvl1_vol
-; GCN3-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GCN3-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    s_cbranch_execnz .LBB31_1
-; GCN3-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GCN3-NEXT:    v_mov_b32_e32 v1, s6
-; GCN3-NEXT:    v_mov_b32_e32 v2, s7
-; GCN3-NEXT:    flat_store_dword v[1:2], v0
-; GCN3-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr %out, i64 %index
-  %val = atomicrmw volatile umin ptr %ptr, i32 %in seq_cst
-  store i32 %val, ptr %out2
-  ret void
-}

diff  --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
new file mode 100644
index 00000000000000..56104b87a02046
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
@@ -0,0 +1,7660 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
+
+; ---------------------------------------------------------------------
+; atomicrmw xchg
+; ---------------------------------------------------------------------
+
+define void @global_atomic_xchg_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_xchg_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap v[0:1], v2, off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_xchg_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_xchg_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap v[0:1], v2, off offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_xchg_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_xchg_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap v0, v[0:1], v2, off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xchg ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_xchg_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_xchg_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap v0, v[0:1], v2, off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_xchg_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_xchg_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap v1, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap v0, v1, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_xchg_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_xchg_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap v1, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_xchg_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_xchg_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap v0, off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap v0, v0, v1, s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xchg ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_xchg_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_xchg_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap v0, off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap v0, v0, v1, s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw add
+; ---------------------------------------------------------------------
+
+define void @global_atomic_add_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_add_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add v[0:1], v2, off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw add ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_add_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_add_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add v[0:1], v2, off offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_add_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_add_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add v0, v[0:1], v2, off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw add ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_add_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_add_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add v0, v[0:1], v2, off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_add_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_add_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add v1, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add v0, v1, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw add ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_add_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_add_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add v1, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_add_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_add_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add v0, off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add v0, v0, v1, s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw add ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_add_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_add_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add v0, off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add v0, v0, v1, s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw sub
+; ---------------------------------------------------------------------
+
+define void @global_atomic_sub_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_sub_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub v[0:1], v2, off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw sub ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_sub_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_sub_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub v2, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub v[0:1], v2, off offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_sub_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_sub_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub v0, v[0:1], v2, off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw sub ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_sub_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_sub_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub v0, v[0:1], v2, off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_sub_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_sub_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub v1, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub v0, v1, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw sub ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_sub_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_sub_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub v1, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_sub_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_sub_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub v0, off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub v0, v0, v1, s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw sub ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_sub_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_sub_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub v0, off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub v0, v0, v1, s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw and
+; ---------------------------------------------------------------------
+
+define void @global_atomic_and_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_and_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and v[0:1], v2, off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw and ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_and_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_and_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and v2, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and v[0:1], v2, off offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_and_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_and_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and v0, v[0:1], v2, off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw and ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_and_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_and_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and v0, v[0:1], v2, off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_and_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_and_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and v1, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and v0, v1, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw and ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_and_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_and_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and v1, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_and_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_and_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and v0, off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and v0, v0, v1, s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw and ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_and_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_and_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and v0, off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and v0, v0, v1, s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw nand
+; ---------------------------------------------------------------------
+
+define void @global_atomic_nand_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_nand_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB32_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v3, v4, v2
+; SI-NEXT:    v_not_b32_e32 v3, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB32_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB32_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v3, v4, v2
+; VI-NEXT:    v_not_b32_e32 v3, v3
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB32_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB32_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v3, v4, v2
+; GFX9-NEXT:    v_not_b32_e32 v3, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB32_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw nand ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_nand_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_nand_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB33_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v3, v4, v2
+; SI-NEXT:    v_not_b32_e32 v3, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB33_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB33_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v3, v4, v2
+; VI-NEXT:    v_not_b32_e32 v3, v3
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB33_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB33_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v3, v4, v2
+; GFX9-NEXT:    v_not_b32_e32 v3, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB33_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_nand_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_nand_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB34_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v3, v5, v2
+; SI-NEXT:    v_not_b32_e32 v4, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB34_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB34_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    v_and_b32_e32 v3, v4, v2
+; VI-NEXT:    v_not_b32_e32 v3, v3
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB34_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v3
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB34_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_and_b32_e32 v3, v4, v2
+; GFX9-NEXT:    v_not_b32_e32 v3, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB34_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_nand_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_nand_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB35_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v3, v5, v2
+; SI-NEXT:    v_not_b32_e32 v4, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB35_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[3:4]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB35_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_and_b32_e32 v0, v1, v2
+; VI-NEXT:    v_not_b32_e32 v0, v0
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB35_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB35_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_and_b32_e32 v3, v4, v2
+; GFX9-NEXT:    v_not_b32_e32 v3, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB35_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_nand_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_nand_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB36_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, s34, v2
+; SI-NEXT:    v_not_b32_e32 v1, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB36_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB36_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_not_b32_e32 v0, v0
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB36_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB36_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v0, s6, v1
+; GFX9-NEXT:    v_not_b32_e32 v0, v0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB36_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw nand ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_nand_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_nand_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB37_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, s34, v2
+; SI-NEXT:    v_not_b32_e32 v1, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB37_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB37_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    v_not_b32_e32 v0, v0
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB37_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB37_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v0, s6, v1
+; GFX9-NEXT:    v_not_b32_e32 v0, v0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB37_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_nand_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_nand_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB38_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_and_b32_e32 v0, s34, v4
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_not_b32_e32 v3, v0
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB38_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB38_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_and_b32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_not_b32_e32 v0, v0
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB38_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB38_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_and_b32_e32 v0, s6, v3
+; GFX9-NEXT:    v_not_b32_e32 v2, v0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB38_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_nand_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_nand_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB39_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_and_b32_e32 v0, s34, v4
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_not_b32_e32 v3, v0
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB39_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB39_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_and_b32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    v_not_b32_e32 v0, v0
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB39_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB39_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_and_b32_e32 v0, s6, v3
+; GFX9-NEXT:    v_not_b32_e32 v2, v0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB39_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw or
+; ---------------------------------------------------------------------
+
+define void @global_atomic_or_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_or_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or v[0:1], v2, off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw or ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_or_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_or_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or v2, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or v[0:1], v2, off offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_or_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_or_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or v0, v[0:1], v2, off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw or ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_or_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_or_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or v0, v[0:1], v2, off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_or_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_or_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or v1, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or v0, v1, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw or ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_or_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_or_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or v1, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_or_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_or_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or v0, off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or v0, v0, v1, s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw or ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_or_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_or_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or v0, off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or v0, v0, v1, s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw xor
+; ---------------------------------------------------------------------
+
+define void @global_atomic_xor_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_xor_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor v[0:1], v2, off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xor ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_xor_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_xor_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor v2, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor v[0:1], v2, off offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_xor_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_xor_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor v0, v[0:1], v2, off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_xor_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_xor_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor v0, v[0:1], v2, off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_xor_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_xor_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor v1, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor v0, v1, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xor ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_xor_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_xor_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor v1, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_xor_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_xor_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor v0, off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor v0, v0, v1, s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_xor_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_xor_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor v0, off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor v0, v0, v1, s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw max
+; ---------------------------------------------------------------------
+
+define void @global_atomic_max_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_max_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB56_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_max_i32_e32 v3, v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB56_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB56_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_max_i32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB56_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB56_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_max_i32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB56_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_max_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_max_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB57_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_max_i32_e32 v3, v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB57_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB57_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_max_i32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB57_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB57_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_max_i32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB57_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_max_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_max_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB58_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_max_i32_e32 v4, v5, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB58_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB58_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    v_max_i32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB58_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v3
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB58_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_max_i32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB58_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_max_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_max_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB59_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_max_i32_e32 v4, v5, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB59_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[3:4]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB59_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_max_i32_e32 v0, v1, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB59_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB59_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_max_i32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB59_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_max_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_max_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB60_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_max_i32_e32 v1, s34, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB60_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB60_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_max_i32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB60_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB60_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_max_i32_e32 v0, s6, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB60_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_max_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_max_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB61_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_max_i32_e32 v1, s34, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB61_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB61_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_max_i32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB61_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB61_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_max_i32_e32 v0, s6, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB61_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_max_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_max_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB62_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_max_i32_e32 v3, s34, v4
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB62_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB62_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_max_i32_e32 v0, s6, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB62_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB62_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_max_i32_e32 v2, s6, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB62_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_max_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_max_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB63_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_max_i32_e32 v3, s34, v4
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB63_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB63_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    v_max_i32_e32 v0, s6, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB63_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB63_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_max_i32_e32 v2, s6, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB63_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i32 %index) {
+; SI-LABEL: atomic_max_i32_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s3, 31
+; SI-NEXT:    s_mov_b32 s4, s3
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s3, s[4:5], 0x4
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB64_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_max_i32_e32 v0, s2, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB64_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_max_i32_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s5, s3, 31
+; VI-NEXT:    s_mov_b32 s4, s3
+; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; VI-NEXT:    s_add_u32 s0, s0, s4
+; VI-NEXT:    s_addc_u32 s1, s1, s5
+; VI-NEXT:    s_load_dword s3, s[0:1], 0x10
+; VI-NEXT:    s_add_u32 s0, s0, 16
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:  .LBB64_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_max_i32_e32 v0, s2, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB64_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s5, s3, 31
+; GFX9-NEXT:    s_mov_b32 s4, s3
+; GFX9-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GFX9-NEXT:    s_add_u32 s0, s0, s4
+; GFX9-NEXT:    s_addc_u32 s1, s1, s5
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x10
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:  .LBB64_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_max_i32_e32 v0, s2, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB64_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
+  %tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
+; SI-LABEL: atomic_max_i32_ret_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s9, 31
+; SI-NEXT:    s_mov_b32 s4, s9
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x4
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB65_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_max_i32_e32 v0, s8, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB65_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_max_i32_ret_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s7, s5, 31
+; VI-NEXT:    s_mov_b32 s6, s5
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dword s5, s[0:1], 0x10
+; VI-NEXT:    s_add_u32 s6, s0, 16
+; VI-NEXT:    s_addc_u32 s7, s1, 0
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:  .LBB65_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_max_i32_e32 v0, s4, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT:    s_cbranch_execnz .LBB65_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_ret_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s1, s3, 31
+; GFX9-NEXT:    s_mov_b32 s0, s3
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x10
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:  .LBB65_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_max_i32_e32 v2, s2, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB65_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
+  %tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
+  store i32 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_addr64(ptr addrspace(1) %out, i32 %in, i32 %index) {
+; SI-LABEL: atomic_max_i32_addr64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s3, 31
+; SI-NEXT:    s_mov_b32 s4, s3
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s3, s[4:5], 0x0
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB66_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_max_i32_e32 v0, s2, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB66_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_max_i32_addr64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s5, s3, 31
+; VI-NEXT:    s_mov_b32 s4, s3
+; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; VI-NEXT:    s_add_u32 s0, s0, s4
+; VI-NEXT:    s_addc_u32 s1, s1, s5
+; VI-NEXT:    s_load_dword s3, s[0:1], 0x0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:  .LBB66_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_max_i32_e32 v0, s2, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB66_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_addr64:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s5, s3, 31
+; GFX9-NEXT:    s_mov_b32 s4, s3
+; GFX9-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GFX9-NEXT:    s_add_u32 s0, s0, s4
+; GFX9-NEXT:    s_addc_u32 s1, s1, s5
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:  .LBB66_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_max_i32_e32 v0, s2, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB66_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %tmp0 = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
+; SI-LABEL: atomic_max_i32_ret_addr64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s9, 31
+; SI-NEXT:    s_mov_b32 s4, s9
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x0
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB67_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_max_i32_e32 v0, s8, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB67_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_max_i32_ret_addr64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s7, s5, 31
+; VI-NEXT:    s_mov_b32 s6, s5
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dword s5, s[0:1], 0x0
+; VI-NEXT:    s_mov_b64 s[6:7], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:  .LBB67_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_max_i32_e32 v0, s4, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; VI-NEXT:    s_cbranch_execnz .LBB67_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[6:7]
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_ret_addr64:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s1, s3, 31
+; GFX9-NEXT:    s_mov_b32 s0, s3
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:  .LBB67_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_max_i32_e32 v2, s2, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB67_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %tmp0 = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
+  store i32 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw umax
+; ---------------------------------------------------------------------
+
+define void @global_atomic_umax_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_umax_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB68_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_max_u32_e32 v3, v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB68_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB68_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_max_u32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB68_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB68_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_max_u32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB68_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_umax_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_umax_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB69_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_max_u32_e32 v3, v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB69_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB69_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_max_u32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB69_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB69_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_max_u32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB69_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_umax_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_umax_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB70_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_max_u32_e32 v4, v5, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB70_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB70_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    v_max_u32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB70_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v3
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB70_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_max_u32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB70_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_umax_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_umax_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB71_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_max_u32_e32 v4, v5, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB71_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[3:4]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB71_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_max_u32_e32 v0, v1, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB71_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB71_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_max_u32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB71_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_umax_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_umax_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB72_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_max_u32_e32 v1, s34, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB72_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB72_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_max_u32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB72_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB72_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_max_u32_e32 v0, s6, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB72_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_umax_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_umax_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB73_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_max_u32_e32 v1, s34, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB73_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB73_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_max_u32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB73_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB73_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_max_u32_e32 v0, s6, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB73_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_umax_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_umax_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB74_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_max_u32_e32 v3, s34, v4
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB74_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB74_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_max_u32_e32 v0, s6, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB74_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB74_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_max_u32_e32 v2, s6, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB74_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_umax_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_umax_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB75_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_max_u32_e32 v3, s34, v4
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB75_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB75_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    v_max_u32_e32 v0, s6, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB75_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB75_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_max_u32_e32 v2, s6, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB75_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i32 %index) {
+; SI-LABEL: atomic_umax_i32_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s3, 31
+; SI-NEXT:    s_mov_b32 s4, s3
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s3, s[4:5], 0x4
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB76_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_max_u32_e32 v0, s2, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB76_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s5, s3, 31
+; VI-NEXT:    s_mov_b32 s4, s3
+; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; VI-NEXT:    s_add_u32 s0, s0, s4
+; VI-NEXT:    s_addc_u32 s1, s1, s5
+; VI-NEXT:    s_load_dword s3, s[0:1], 0x10
+; VI-NEXT:    s_add_u32 s0, s0, 16
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:  .LBB76_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_max_u32_e32 v0, s2, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB76_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s5, s3, 31
+; GFX9-NEXT:    s_mov_b32 s4, s3
+; GFX9-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GFX9-NEXT:    s_add_u32 s0, s0, s4
+; GFX9-NEXT:    s_addc_u32 s1, s1, s5
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x10
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:  .LBB76_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_max_u32_e32 v0, s2, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB76_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
+  %tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
+; SI-LABEL: atomic_umax_i32_ret_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s9, 31
+; SI-NEXT:    s_mov_b32 s4, s9
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x4
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB77_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_max_u32_e32 v0, s8, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB77_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_ret_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s7, s5, 31
+; VI-NEXT:    s_mov_b32 s6, s5
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dword s5, s[0:1], 0x10
+; VI-NEXT:    s_add_u32 s6, s0, 16
+; VI-NEXT:    s_addc_u32 s7, s1, 0
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:  .LBB77_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_max_u32_e32 v0, s4, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT:    s_cbranch_execnz .LBB77_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_ret_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s1, s3, 31
+; GFX9-NEXT:    s_mov_b32 s0, s3
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x10
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:  .LBB77_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_max_u32_e32 v2, s2, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB77_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
+  %tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
+  store i32 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
+; SI-LABEL: atomic_umax_i32_ret_addr64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s9, 31
+; SI-NEXT:    s_mov_b32 s4, s9
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x0
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB78_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_max_u32_e32 v0, s8, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB78_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_ret_addr64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s7, s5, 31
+; VI-NEXT:    s_mov_b32 s6, s5
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dword s5, s[0:1], 0x0
+; VI-NEXT:    s_mov_b64 s[6:7], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:  .LBB78_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_max_u32_e32 v0, s4, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; VI-NEXT:    s_cbranch_execnz .LBB78_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[6:7]
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_ret_addr64:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s1, s3, 31
+; GFX9-NEXT:    s_mov_b32 s0, s3
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:  .LBB78_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_max_u32_e32 v2, s2, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB78_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
+  store i32 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw umin
+; ---------------------------------------------------------------------
+
+define void @global_atomic_umin_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_umin_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB79_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_min_u32_e32 v3, v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB79_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB79_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_min_u32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB79_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB79_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_min_u32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB79_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umin ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_umin_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_umin_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB80_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_min_u32_e32 v3, v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB80_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB80_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_min_u32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB80_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB80_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_min_u32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB80_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_umin_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_umin_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB81_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_min_u32_e32 v4, v5, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB81_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB81_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    v_min_u32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB81_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v3
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB81_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_min_u32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB81_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_umin_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_umin_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB82_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_min_u32_e32 v4, v5, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB82_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[3:4]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB82_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_min_u32_e32 v0, v1, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB82_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB82_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_min_u32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB82_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_umin_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_umin_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB83_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_min_u32_e32 v1, s34, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB83_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB83_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_min_u32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB83_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB83_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_min_u32_e32 v0, s6, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB83_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umin ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_umin_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_umin_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB84_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_min_u32_e32 v1, s34, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB84_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB84_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_min_u32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB84_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB84_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_min_u32_e32 v0, s6, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB84_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_umin_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_umin_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB85_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_min_u32_e32 v3, s34, v4
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB85_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB85_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_min_u32_e32 v0, s6, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB85_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB85_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_min_u32_e32 v2, s6, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB85_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_umin_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_umin_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB86_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_min_u32_e32 v3, s34, v4
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB86_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB86_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    v_min_u32_e32 v0, s6, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB86_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB86_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_min_u32_e32 v2, s6, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB86_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw min
+; ---------------------------------------------------------------------
+
+define void @global_atomic_min_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_min_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB87_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_min_i32_e32 v3, v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB87_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB87_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_min_i32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB87_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB87_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_min_i32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB87_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_min_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_min_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB88_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_min_i32_e32 v3, v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, v4
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB88_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB88_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_min_i32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB88_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v4, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB88_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_min_i32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB88_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_min_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_min_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB89_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_min_i32_e32 v4, v5, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB89_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB89_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v4, v3
+; VI-NEXT:    v_min_i32_e32 v3, v4, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB89_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v3
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB89_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_min_i32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB89_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_min_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_min_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB90_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_min_i32_e32 v4, v5, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    v_mov_b32_e32 v4, v5
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB90_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v3, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[3:4]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB90_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_min_i32_e32 v0, v1, v2
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB90_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v3, v[0:1], off offset:16
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB90_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    v_min_i32_e32 v3, v4, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB90_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_min_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_min_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB91_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_min_i32_e32 v1, s34, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB91_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB91_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_min_i32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB91_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB91_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_min_i32_e32 v0, s6, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB91_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_min_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_min_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB92_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_min_i32_e32 v1, s34, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB92_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v1, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB92_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_min_i32_e32 v0, s6, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB92_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v1, v2, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB92_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_min_i32_e32 v0, s6, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB92_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_min_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_min_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB93_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_min_i32_e32 v3, s34, v4
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB93_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB93_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_min_i32_e32 v0, s6, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB93_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB93_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_min_i32_e32 v2, s6, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB93_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_min_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_min_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB94_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_min_i32_e32 v3, s34, v4
+; SI-NEXT:    v_mov_b32_e32 v2, v3
+; SI-NEXT:    v_mov_b32_e32 v3, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v4
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB94_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB94_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    v_min_i32_e32 v0, s6, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB94_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_dword v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB94_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_min_i32_e32 v2, s6, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB94_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i32 %index) {
+; SI-LABEL: atomic_min_i32_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s3, 31
+; SI-NEXT:    s_mov_b32 s4, s3
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s3, s[4:5], 0x4
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB95_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_min_i32_e32 v0, s2, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB95_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_min_i32_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s5, s3, 31
+; VI-NEXT:    s_mov_b32 s4, s3
+; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; VI-NEXT:    s_add_u32 s0, s0, s4
+; VI-NEXT:    s_addc_u32 s1, s1, s5
+; VI-NEXT:    s_load_dword s3, s[0:1], 0x10
+; VI-NEXT:    s_add_u32 s0, s0, 16
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:  .LBB95_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_min_i32_e32 v0, s2, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB95_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s5, s3, 31
+; GFX9-NEXT:    s_mov_b32 s4, s3
+; GFX9-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GFX9-NEXT:    s_add_u32 s0, s0, s4
+; GFX9-NEXT:    s_addc_u32 s1, s1, s5
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x10
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:  .LBB95_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_min_i32_e32 v0, s2, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB95_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
+  %tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
+; SI-LABEL: atomic_min_i32_ret_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s9, 31
+; SI-NEXT:    s_mov_b32 s4, s9
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x4
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB96_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_min_i32_e32 v0, s8, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB96_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_min_i32_ret_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s7, s5, 31
+; VI-NEXT:    s_mov_b32 s6, s5
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dword s5, s[0:1], 0x10
+; VI-NEXT:    s_add_u32 s6, s0, 16
+; VI-NEXT:    s_addc_u32 s7, s1, 0
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:  .LBB96_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_min_i32_e32 v0, s4, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT:    s_cbranch_execnz .LBB96_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_ret_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s1, s3, 31
+; GFX9-NEXT:    s_mov_b32 s0, s3
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x10
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:  .LBB96_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_min_i32_e32 v2, s2, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB96_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
+  %tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
+  store i32 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: atomic_min_i32:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s3, s[4:5], 0x0
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB97_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_min_i32_e32 v0, s2, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB97_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_min_i32:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; VI-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s5, s[2:3], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:  .LBB97_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_min_i32_e32 v0, s4, v1
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT:    s_cbranch_execnz .LBB97_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_mov_b64 s[0:1], 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_load_dword s5, s[2:3], 0x0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:  .LBB97_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_min_i32_e32 v0, s4, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT:    s_cbranch_execnz .LBB97_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %tmp0 = atomicrmw min ptr addrspace(1) %out, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
+; SI-LABEL: atomic_min_i32_ret_addr64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_ashr_i32 s5, s9, 31
+; SI-NEXT:    s_mov_b32 s4, s9
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x0
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB98_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_min_i32_e32 v0, s8, v1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, v1
+; SI-NEXT:    v_mov_b32_e32 v2, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, v2
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB98_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_min_i32_ret_addr64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_ashr_i32 s7, s5, 31
+; VI-NEXT:    s_mov_b32 s6, s5
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 2
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dword s5, s[0:1], 0x0
+; VI-NEXT:    s_mov_b64 s[6:7], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:  .LBB98_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_min_i32_e32 v0, s4, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; VI-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; VI-NEXT:    s_cbranch_execnz .LBB98_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[6:7]
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_ret_addr64:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s1, s3, 31
+; GFX9-NEXT:    s_mov_b32 s0, s3
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[0:1], 2
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:  .LBB98_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_min_i32_e32 v2, s2, v3
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB98_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
+  %tmp0 = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
+  store i32 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw uinc_wrap
+; ---------------------------------------------------------------------
+
+define void @global_atomic_uinc_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc v[0:1], v2, off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_uinc_wrap_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc v2, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc v[0:1], v2, off offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_uinc_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc v0, v[0:1], v2, off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_uinc_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc v0, v[0:1], v2, off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_uinc_wrap_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc v1, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc v0, v1, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_uinc_wrap_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc v1, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_uinc_wrap_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc v0, off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc v0, v0, v1, s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_uinc_wrap_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc v0, off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc v0, v0, v1, s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw udec_wrap
+; ---------------------------------------------------------------------
+
+define void @global_atomic_udec_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_udec_wrap_i32_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i32_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec v[0:1], v2, off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_udec_wrap_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_udec_wrap_i32_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec v2, v[0:1], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i32_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i32_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec v[0:1], v2, off offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define i32 @global_atomic_udec_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
+; SI-LABEL: global_atomic_udec_wrap_i32_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i32_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i32_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec v0, v[0:1], v2, off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define i32 @global_atomic_udec_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: global_atomic_udec_wrap_i32_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i32_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i32_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec v0, v[0:1], v2, off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_udec_wrap_i32_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec v1, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i32_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i32_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec v0, v1, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_udec_wrap_i32_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec v1, off, s[4:7], 0 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i32_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i32_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec v0, v1, s[4:5] offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
+; SI-LABEL: global_atomic_udec_wrap_i32_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec v0, off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i32_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i32_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec v0, v0, v1, s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
+  ret i32 %result
+}
+
+define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
+; SI-LABEL: global_atomic_udec_wrap_i32_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v1, s6, 0
+; SI-NEXT:    v_writelane_b32 v1, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec v0, off, s[4:7], 0 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v1, 1
+; SI-NEXT:    v_readlane_b32 s6, v1, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i32_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 16
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec v0, v[0:1], v2 glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i32_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec v0, v0, v1, s[4:5] offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i32, ptr addrspace(1) %out, i32 4
+  %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst
+  ret i32 %result
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_min_max_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_min_max_system.ll
deleted file mode 100644
index b9337304eb3ee2..00000000000000
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_min_max_system.ll
+++ /dev/null
@@ -1,3912 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -march=amdgcn -mcpu=bonaire -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=CI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-atomic-optimizer-strategy=None -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
-
-define amdgpu_kernel void @atomic_max_i64_offset(ptr addrspace(1) %out, i64 %in) {
-; CI-LABEL: atomic_max_i64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b64 s[4:5], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x8
-; CI-NEXT:    s_mov_b32 s6, s2
-; CI-NEXT:    s_mov_b32 s7, s3
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB0_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s7
-; CI-NEXT:    v_mov_b32_e32 v4, s6
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; CI-NEXT:    s_cbranch_execnz .LBB0_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[10:11], s[0:1], 0x20
-; VI-NEXT:    s_add_u32 s4, s0, 32
-; VI-NEXT:    s_addc_u32 s5, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s10
-; VI-NEXT:    v_mov_b32_e32 v3, s11
-; VI-NEXT:  .LBB0_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB0_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x20
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v3, s7
-; GFX9-NEXT:  .LBB0_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT:    s_cbranch_execnz .LBB0_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
-  %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in) {
-; CI-LABEL: atomic_max_i64_ret_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
-; CI-NEXT:    s_mov_b64 s[10:11], 0
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s12
-; CI-NEXT:    v_mov_b32_e32 v3, s13
-; CI-NEXT:  .LBB1_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s9
-; CI-NEXT:    v_mov_b32_e32 v4, s8
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_cbranch_execnz .LBB1_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_mov_b32 s0, s6
-; CI-NEXT:    s_mov_b32 s1, s7
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i64_ret_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[10:11], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x20
-; VI-NEXT:    s_add_u32 s0, s4, 32
-; VI-NEXT:    s_addc_u32 s1, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB1_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_cbranch_execnz .LBB1_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i64_ret_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x20
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB1_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB1_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
-  %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
-; CI-LABEL: atomic_max_i64_addr64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; CI-NEXT:    s_add_u32 s4, s0, s4
-; CI-NEXT:    s_addc_u32 s5, s1, s5
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB2_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s3
-; CI-NEXT:    v_mov_b32_e32 v4, s2
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB2_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i64_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; VI-NEXT:    s_add_u32 s4, s0, s4
-; VI-NEXT:    s_addc_u32 s5, s1, s5
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x20
-; VI-NEXT:    s_add_u32 s4, s4, 32
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_addc_u32 s5, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB2_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB2_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i64_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x20
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9-NEXT:  .LBB2_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB2_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
-  %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
-; CI-LABEL: atomic_max_i64_ret_addr64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s11, 0xf000
-; CI-NEXT:    s_mov_b32 s10, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; CI-NEXT:    s_add_u32 s8, s0, s6
-; CI-NEXT:    s_addc_u32 s9, s1, s7
-; CI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s6
-; CI-NEXT:    v_mov_b32_e32 v3, s7
-; CI-NEXT:  .LBB3_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s5
-; CI-NEXT:    v_mov_b32_e32 v4, s4
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB3_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_mov_b32 s4, s2
-; CI-NEXT:    s_mov_b32 s5, s3
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i64_ret_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s11, 0xf000
-; VI-NEXT:    s_mov_b32 s10, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; VI-NEXT:    s_add_u32 s6, s0, s6
-; VI-NEXT:    s_addc_u32 s7, s1, s7
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[6:7], 0x20
-; VI-NEXT:    s_add_u32 s8, s6, 32
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_addc_u32 s9, s7, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB3_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB3_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_mov_b32 s4, s2
-; VI-NEXT:    s_mov_b32 s5, s3
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i64_ret_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GFX9-NEXT:    s_add_u32 s0, s0, s6
-; GFX9-NEXT:    s_addc_u32 s1, s1, s7
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB3_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s5
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    s_cbranch_execnz .LBB3_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
-  %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64(ptr addrspace(1) %out, i64 %in) {
-; CI-LABEL: atomic_max_i64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b64 s[4:5], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; CI-NEXT:    s_mov_b32 s6, s2
-; CI-NEXT:    s_mov_b32 s7, s3
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB4_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s7
-; CI-NEXT:    v_mov_b32_e32 v4, s6
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; CI-NEXT:    s_cbranch_execnz .LBB4_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; VI-NEXT:    s_mov_b32 s6, s2
-; VI-NEXT:    s_mov_b32 s7, s3
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB4_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s7
-; VI-NEXT:    v_mov_b32_e32 v4, s6
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB4_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v3, s7
-; GFX9-NEXT:  .LBB4_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT:    s_cbranch_execnz .LBB4_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile max ptr addrspace(1) %out, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in) {
-; CI-LABEL: atomic_max_i64_ret:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
-; CI-NEXT:    s_mov_b64 s[10:11], 0
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s12
-; CI-NEXT:    v_mov_b32_e32 v3, s13
-; CI-NEXT:  .LBB5_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s9
-; CI-NEXT:    v_mov_b32_e32 v4, s8
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_cbranch_execnz .LBB5_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_mov_b32 s0, s6
-; CI-NEXT:    s_mov_b32 s1, s7
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i64_ret:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[10:11], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB5_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_cbranch_execnz .LBB5_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i64_ret:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB5_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB5_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile max ptr addrspace(1) %out, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_addr64(ptr addrspace(1) %out, i64 %in, i64 %index) {
-; CI-LABEL: atomic_max_i64_addr64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; CI-NEXT:    s_add_u32 s4, s0, s4
-; CI-NEXT:    s_addc_u32 s5, s1, s5
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB6_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s3
-; CI-NEXT:    v_mov_b32_e32 v4, s2
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB6_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i64_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; VI-NEXT:    s_add_u32 s4, s0, s4
-; VI-NEXT:    s_addc_u32 s5, s1, s5
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB6_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB6_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i64_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9-NEXT:  .LBB6_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB6_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %tmp0 = atomicrmw volatile max ptr addrspace(1) %ptr, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
-; CI-LABEL: atomic_max_i64_ret_addr64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s11, 0xf000
-; CI-NEXT:    s_mov_b32 s10, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; CI-NEXT:    s_add_u32 s8, s0, s6
-; CI-NEXT:    s_addc_u32 s9, s1, s7
-; CI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s6
-; CI-NEXT:    v_mov_b32_e32 v3, s7
-; CI-NEXT:  .LBB7_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s5
-; CI-NEXT:    v_mov_b32_e32 v4, s4
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB7_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_mov_b32 s4, s2
-; CI-NEXT:    s_mov_b32 s5, s3
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i64_ret_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s11, 0xf000
-; VI-NEXT:    s_mov_b32 s10, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; VI-NEXT:    s_add_u32 s8, s0, s6
-; VI-NEXT:    s_addc_u32 s9, s1, s7
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s6
-; VI-NEXT:    v_mov_b32_e32 v3, s7
-; VI-NEXT:  .LBB7_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB7_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_mov_b32 s4, s2
-; VI-NEXT:    s_mov_b32 s5, s3
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i64_ret_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GFX9-NEXT:    s_add_u32 s0, s0, s6
-; GFX9-NEXT:    s_addc_u32 s1, s1, s7
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB7_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s5
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    s_cbranch_execnz .LBB7_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %tmp0 = atomicrmw volatile max ptr addrspace(1) %ptr, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_offset(ptr addrspace(1) %out, i64 %in) {
-; CI-LABEL: atomic_umax_i64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b64 s[4:5], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x8
-; CI-NEXT:    s_mov_b32 s6, s2
-; CI-NEXT:    s_mov_b32 s7, s3
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB8_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s7
-; CI-NEXT:    v_mov_b32_e32 v4, s6
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; CI-NEXT:    s_cbranch_execnz .LBB8_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[10:11], s[0:1], 0x20
-; VI-NEXT:    s_add_u32 s4, s0, 32
-; VI-NEXT:    s_addc_u32 s5, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s10
-; VI-NEXT:    v_mov_b32_e32 v3, s11
-; VI-NEXT:  .LBB8_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB8_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x20
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v3, s7
-; GFX9-NEXT:  .LBB8_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT:    s_cbranch_execnz .LBB8_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
-  %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in) {
-; CI-LABEL: atomic_umax_i64_ret_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
-; CI-NEXT:    s_mov_b64 s[10:11], 0
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s12
-; CI-NEXT:    v_mov_b32_e32 v3, s13
-; CI-NEXT:  .LBB9_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s9
-; CI-NEXT:    v_mov_b32_e32 v4, s8
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_cbranch_execnz .LBB9_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_mov_b32 s0, s6
-; CI-NEXT:    s_mov_b32 s1, s7
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i64_ret_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[10:11], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x20
-; VI-NEXT:    s_add_u32 s0, s4, 32
-; VI-NEXT:    s_addc_u32 s1, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB9_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_cbranch_execnz .LBB9_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i64_ret_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x20
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB9_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB9_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
-  %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
-; CI-LABEL: atomic_umax_i64_addr64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; CI-NEXT:    s_add_u32 s4, s0, s4
-; CI-NEXT:    s_addc_u32 s5, s1, s5
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB10_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s3
-; CI-NEXT:    v_mov_b32_e32 v4, s2
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB10_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i64_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; VI-NEXT:    s_add_u32 s4, s0, s4
-; VI-NEXT:    s_addc_u32 s5, s1, s5
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x20
-; VI-NEXT:    s_add_u32 s4, s4, 32
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_addc_u32 s5, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB10_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB10_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i64_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x20
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9-NEXT:  .LBB10_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB10_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
-  %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
-; CI-LABEL: atomic_umax_i64_ret_addr64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s11, 0xf000
-; CI-NEXT:    s_mov_b32 s10, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; CI-NEXT:    s_add_u32 s8, s0, s6
-; CI-NEXT:    s_addc_u32 s9, s1, s7
-; CI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s6
-; CI-NEXT:    v_mov_b32_e32 v3, s7
-; CI-NEXT:  .LBB11_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s5
-; CI-NEXT:    v_mov_b32_e32 v4, s4
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB11_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_mov_b32 s4, s2
-; CI-NEXT:    s_mov_b32 s5, s3
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i64_ret_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s11, 0xf000
-; VI-NEXT:    s_mov_b32 s10, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; VI-NEXT:    s_add_u32 s6, s0, s6
-; VI-NEXT:    s_addc_u32 s7, s1, s7
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[6:7], 0x20
-; VI-NEXT:    s_add_u32 s8, s6, 32
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_addc_u32 s9, s7, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB11_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB11_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_mov_b32 s4, s2
-; VI-NEXT:    s_mov_b32 s5, s3
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i64_ret_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GFX9-NEXT:    s_add_u32 s0, s0, s6
-; GFX9-NEXT:    s_addc_u32 s1, s1, s7
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB11_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s5
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    s_cbranch_execnz .LBB11_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
-  %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64(ptr addrspace(1) %out, i64 %in) {
-; CI-LABEL: atomic_umax_i64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b64 s[4:5], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; CI-NEXT:    s_mov_b32 s6, s2
-; CI-NEXT:    s_mov_b32 s7, s3
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB12_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s7
-; CI-NEXT:    v_mov_b32_e32 v4, s6
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; CI-NEXT:    s_cbranch_execnz .LBB12_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; VI-NEXT:    s_mov_b32 s6, s2
-; VI-NEXT:    s_mov_b32 s7, s3
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB12_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s7
-; VI-NEXT:    v_mov_b32_e32 v4, s6
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB12_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v3, s7
-; GFX9-NEXT:  .LBB12_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT:    s_cbranch_execnz .LBB12_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile umax ptr addrspace(1) %out, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in) {
-; CI-LABEL: atomic_umax_i64_ret:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
-; CI-NEXT:    s_mov_b64 s[10:11], 0
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s12
-; CI-NEXT:    v_mov_b32_e32 v3, s13
-; CI-NEXT:  .LBB13_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s9
-; CI-NEXT:    v_mov_b32_e32 v4, s8
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_cbranch_execnz .LBB13_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_mov_b32 s0, s6
-; CI-NEXT:    s_mov_b32 s1, s7
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i64_ret:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[10:11], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB13_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_cbranch_execnz .LBB13_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i64_ret:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB13_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB13_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile umax ptr addrspace(1) %out, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_addr64(ptr addrspace(1) %out, i64 %in, i64 %index) {
-; CI-LABEL: atomic_umax_i64_addr64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; CI-NEXT:    s_add_u32 s4, s0, s4
-; CI-NEXT:    s_addc_u32 s5, s1, s5
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB14_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s3
-; CI-NEXT:    v_mov_b32_e32 v4, s2
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB14_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i64_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; VI-NEXT:    s_add_u32 s4, s0, s4
-; VI-NEXT:    s_addc_u32 s5, s1, s5
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB14_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB14_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i64_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9-NEXT:  .LBB14_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB14_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %tmp0 = atomicrmw volatile umax ptr addrspace(1) %ptr, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
-; CI-LABEL: atomic_umax_i64_ret_addr64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s11, 0xf000
-; CI-NEXT:    s_mov_b32 s10, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; CI-NEXT:    s_add_u32 s8, s0, s6
-; CI-NEXT:    s_addc_u32 s9, s1, s7
-; CI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s6
-; CI-NEXT:    v_mov_b32_e32 v3, s7
-; CI-NEXT:  .LBB15_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s5
-; CI-NEXT:    v_mov_b32_e32 v4, s4
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB15_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_mov_b32 s4, s2
-; CI-NEXT:    s_mov_b32 s5, s3
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i64_ret_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s11, 0xf000
-; VI-NEXT:    s_mov_b32 s10, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; VI-NEXT:    s_add_u32 s8, s0, s6
-; VI-NEXT:    s_addc_u32 s9, s1, s7
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s6
-; VI-NEXT:    v_mov_b32_e32 v3, s7
-; VI-NEXT:  .LBB15_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB15_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_mov_b32 s4, s2
-; VI-NEXT:    s_mov_b32 s5, s3
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i64_ret_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GFX9-NEXT:    s_add_u32 s0, s0, s6
-; GFX9-NEXT:    s_addc_u32 s1, s1, s7
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB15_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s5
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    s_cbranch_execnz .LBB15_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %tmp0 = atomicrmw volatile umax ptr addrspace(1) %ptr, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_offset(ptr addrspace(1) %out, i64 %in) {
-; CI-LABEL: atomic_min_i64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b64 s[4:5], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x8
-; CI-NEXT:    s_mov_b32 s6, s2
-; CI-NEXT:    s_mov_b32 s7, s3
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB16_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s7
-; CI-NEXT:    v_mov_b32_e32 v4, s6
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; CI-NEXT:    s_cbranch_execnz .LBB16_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[10:11], s[0:1], 0x20
-; VI-NEXT:    s_add_u32 s4, s0, 32
-; VI-NEXT:    s_addc_u32 s5, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s10
-; VI-NEXT:    v_mov_b32_e32 v3, s11
-; VI-NEXT:  .LBB16_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB16_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x20
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v3, s7
-; GFX9-NEXT:  .LBB16_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT:    s_cbranch_execnz .LBB16_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
-  %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in) {
-; CI-LABEL: atomic_min_i64_ret_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
-; CI-NEXT:    s_mov_b64 s[10:11], 0
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s12
-; CI-NEXT:    v_mov_b32_e32 v3, s13
-; CI-NEXT:  .LBB17_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s9
-; CI-NEXT:    v_mov_b32_e32 v4, s8
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_cbranch_execnz .LBB17_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_mov_b32 s0, s6
-; CI-NEXT:    s_mov_b32 s1, s7
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i64_ret_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[10:11], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x20
-; VI-NEXT:    s_add_u32 s0, s4, 32
-; VI-NEXT:    s_addc_u32 s1, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB17_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_cbranch_execnz .LBB17_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i64_ret_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x20
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB17_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB17_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
-  %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
-; CI-LABEL: atomic_min_i64_addr64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; CI-NEXT:    s_add_u32 s4, s0, s4
-; CI-NEXT:    s_addc_u32 s5, s1, s5
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB18_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s3
-; CI-NEXT:    v_mov_b32_e32 v4, s2
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB18_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i64_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; VI-NEXT:    s_add_u32 s4, s0, s4
-; VI-NEXT:    s_addc_u32 s5, s1, s5
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x20
-; VI-NEXT:    s_add_u32 s4, s4, 32
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_addc_u32 s5, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB18_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB18_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i64_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x20
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9-NEXT:  .LBB18_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB18_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
-  %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
-; CI-LABEL: atomic_min_i64_ret_addr64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s11, 0xf000
-; CI-NEXT:    s_mov_b32 s10, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; CI-NEXT:    s_add_u32 s8, s0, s6
-; CI-NEXT:    s_addc_u32 s9, s1, s7
-; CI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s6
-; CI-NEXT:    v_mov_b32_e32 v3, s7
-; CI-NEXT:  .LBB19_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s5
-; CI-NEXT:    v_mov_b32_e32 v4, s4
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB19_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_mov_b32 s4, s2
-; CI-NEXT:    s_mov_b32 s5, s3
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i64_ret_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s11, 0xf000
-; VI-NEXT:    s_mov_b32 s10, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; VI-NEXT:    s_add_u32 s6, s0, s6
-; VI-NEXT:    s_addc_u32 s7, s1, s7
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[6:7], 0x20
-; VI-NEXT:    s_add_u32 s8, s6, 32
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_addc_u32 s9, s7, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB19_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB19_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_mov_b32 s4, s2
-; VI-NEXT:    s_mov_b32 s5, s3
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i64_ret_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GFX9-NEXT:    s_add_u32 s0, s0, s6
-; GFX9-NEXT:    s_addc_u32 s1, s1, s7
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB19_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s5
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    s_cbranch_execnz .LBB19_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
-  %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64(ptr addrspace(1) %out, i64 %in) {
-; CI-LABEL: atomic_min_i64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b64 s[4:5], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; CI-NEXT:    s_mov_b32 s6, s2
-; CI-NEXT:    s_mov_b32 s7, s3
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB20_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s7
-; CI-NEXT:    v_mov_b32_e32 v4, s6
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; CI-NEXT:    s_cbranch_execnz .LBB20_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; VI-NEXT:    s_mov_b32 s6, s2
-; VI-NEXT:    s_mov_b32 s7, s3
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB20_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s7
-; VI-NEXT:    v_mov_b32_e32 v4, s6
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB20_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v3, s7
-; GFX9-NEXT:  .LBB20_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT:    s_cbranch_execnz .LBB20_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile min ptr addrspace(1) %out, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in) {
-; CI-LABEL: atomic_min_i64_ret:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
-; CI-NEXT:    s_mov_b64 s[10:11], 0
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s12
-; CI-NEXT:    v_mov_b32_e32 v3, s13
-; CI-NEXT:  .LBB21_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s9
-; CI-NEXT:    v_mov_b32_e32 v4, s8
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_cbranch_execnz .LBB21_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_mov_b32 s0, s6
-; CI-NEXT:    s_mov_b32 s1, s7
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i64_ret:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[10:11], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB21_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_cbranch_execnz .LBB21_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i64_ret:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB21_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB21_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile min ptr addrspace(1) %out, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_addr64(ptr addrspace(1) %out, i64 %in, i64 %index) {
-; CI-LABEL: atomic_min_i64_addr64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; CI-NEXT:    s_add_u32 s4, s0, s4
-; CI-NEXT:    s_addc_u32 s5, s1, s5
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB22_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s3
-; CI-NEXT:    v_mov_b32_e32 v4, s2
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB22_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i64_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; VI-NEXT:    s_add_u32 s4, s0, s4
-; VI-NEXT:    s_addc_u32 s5, s1, s5
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB22_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB22_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i64_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9-NEXT:  .LBB22_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB22_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %tmp0 = atomicrmw volatile min ptr addrspace(1) %ptr, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
-; CI-LABEL: atomic_min_i64_ret_addr64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s11, 0xf000
-; CI-NEXT:    s_mov_b32 s10, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; CI-NEXT:    s_add_u32 s8, s0, s6
-; CI-NEXT:    s_addc_u32 s9, s1, s7
-; CI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s6
-; CI-NEXT:    v_mov_b32_e32 v3, s7
-; CI-NEXT:  .LBB23_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s5
-; CI-NEXT:    v_mov_b32_e32 v4, s4
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB23_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_mov_b32 s4, s2
-; CI-NEXT:    s_mov_b32 s5, s3
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i64_ret_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s11, 0xf000
-; VI-NEXT:    s_mov_b32 s10, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; VI-NEXT:    s_add_u32 s8, s0, s6
-; VI-NEXT:    s_addc_u32 s9, s1, s7
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s6
-; VI-NEXT:    v_mov_b32_e32 v3, s7
-; VI-NEXT:  .LBB23_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB23_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_mov_b32 s4, s2
-; VI-NEXT:    s_mov_b32 s5, s3
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i64_ret_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GFX9-NEXT:    s_add_u32 s0, s0, s6
-; GFX9-NEXT:    s_addc_u32 s1, s1, s7
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB23_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s5
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    s_cbranch_execnz .LBB23_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %tmp0 = atomicrmw volatile min ptr addrspace(1) %ptr, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_offset(ptr addrspace(1) %out, i64 %in) {
-; CI-LABEL: atomic_umin_i64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b64 s[4:5], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x8
-; CI-NEXT:    s_mov_b32 s6, s2
-; CI-NEXT:    s_mov_b32 s7, s3
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB24_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s7
-; CI-NEXT:    v_mov_b32_e32 v4, s6
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; CI-NEXT:    s_cbranch_execnz .LBB24_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[10:11], s[0:1], 0x20
-; VI-NEXT:    s_add_u32 s4, s0, 32
-; VI-NEXT:    s_addc_u32 s5, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s10
-; VI-NEXT:    v_mov_b32_e32 v3, s11
-; VI-NEXT:  .LBB24_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB24_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x20
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v3, s7
-; GFX9-NEXT:  .LBB24_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT:    s_cbranch_execnz .LBB24_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
-  %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in) {
-; CI-LABEL: atomic_umin_i64_ret_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
-; CI-NEXT:    s_mov_b64 s[10:11], 0
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s12
-; CI-NEXT:    v_mov_b32_e32 v3, s13
-; CI-NEXT:  .LBB25_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s9
-; CI-NEXT:    v_mov_b32_e32 v4, s8
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_cbranch_execnz .LBB25_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_mov_b32 s0, s6
-; CI-NEXT:    s_mov_b32 s1, s7
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i64_ret_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[10:11], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x20
-; VI-NEXT:    s_add_u32 s0, s4, 32
-; VI-NEXT:    s_addc_u32 s1, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB25_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_cbranch_execnz .LBB25_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i64_ret_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x20
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB25_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB25_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
-  %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
-; CI-LABEL: atomic_umin_i64_addr64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; CI-NEXT:    s_add_u32 s4, s0, s4
-; CI-NEXT:    s_addc_u32 s5, s1, s5
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB26_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s3
-; CI-NEXT:    v_mov_b32_e32 v4, s2
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB26_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i64_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; VI-NEXT:    s_add_u32 s4, s0, s4
-; VI-NEXT:    s_addc_u32 s5, s1, s5
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x20
-; VI-NEXT:    s_add_u32 s4, s4, 32
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_addc_u32 s5, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB26_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB26_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i64_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x20
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9-NEXT:  .LBB26_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB26_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
-  %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
-; CI-LABEL: atomic_umin_i64_ret_addr64_offset:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s11, 0xf000
-; CI-NEXT:    s_mov_b32 s10, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; CI-NEXT:    s_add_u32 s8, s0, s6
-; CI-NEXT:    s_addc_u32 s9, s1, s7
-; CI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x8
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s6
-; CI-NEXT:    v_mov_b32_e32 v3, s7
-; CI-NEXT:  .LBB27_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s5
-; CI-NEXT:    v_mov_b32_e32 v4, s4
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB27_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_mov_b32 s4, s2
-; CI-NEXT:    s_mov_b32 s5, s3
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i64_ret_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s11, 0xf000
-; VI-NEXT:    s_mov_b32 s10, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; VI-NEXT:    s_add_u32 s6, s0, s6
-; VI-NEXT:    s_addc_u32 s7, s1, s7
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[6:7], 0x20
-; VI-NEXT:    s_add_u32 s8, s6, 32
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_addc_u32 s9, s7, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB27_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB27_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_mov_b32 s4, s2
-; VI-NEXT:    s_mov_b32 s5, s3
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i64_ret_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GFX9-NEXT:    s_add_u32 s0, s0, s6
-; GFX9-NEXT:    s_addc_u32 s1, s1, s7
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB27_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s5
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    s_cbranch_execnz .LBB27_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
-  %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64(ptr addrspace(1) %out, i64 %in) {
-; CI-LABEL: atomic_umin_i64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b64 s[4:5], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; CI-NEXT:    s_mov_b32 s6, s2
-; CI-NEXT:    s_mov_b32 s7, s3
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB28_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s7
-; CI-NEXT:    v_mov_b32_e32 v4, s6
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; CI-NEXT:    s_cbranch_execnz .LBB28_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; VI-NEXT:    s_mov_b32 s6, s2
-; VI-NEXT:    s_mov_b32 s7, s3
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB28_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s7
-; VI-NEXT:    v_mov_b32_e32 v4, s6
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB28_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v3, s7
-; GFX9-NEXT:  .LBB28_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v5, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT:    s_cbranch_execnz .LBB28_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile umin ptr addrspace(1) %out, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in) {
-; CI-LABEL: atomic_umin_i64_ret:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
-; CI-NEXT:    s_mov_b64 s[10:11], 0
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s12
-; CI-NEXT:    v_mov_b32_e32 v3, s13
-; CI-NEXT:  .LBB29_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s9
-; CI-NEXT:    v_mov_b32_e32 v4, s8
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_cbranch_execnz .LBB29_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; CI-NEXT:    s_mov_b32 s3, 0xf000
-; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    s_mov_b32 s0, s6
-; CI-NEXT:    s_mov_b32 s1, s7
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i64_ret:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[10:11], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s12
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:  .LBB29_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_cbranch_execnz .LBB29_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[10:11]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i64_ret:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB29_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB29_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %tmp0 = atomicrmw volatile umin ptr addrspace(1) %out, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_addr64(ptr addrspace(1) %out, i64 %in, i64 %index) {
-; CI-LABEL: atomic_umin_i64_addr64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
-; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; CI-NEXT:    s_add_u32 s4, s0, s4
-; CI-NEXT:    s_addc_u32 s5, s1, s5
-; CI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s8
-; CI-NEXT:    v_mov_b32_e32 v3, s9
-; CI-NEXT:  .LBB30_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s3
-; CI-NEXT:    v_mov_b32_e32 v4, s2
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB30_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i64_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
-; VI-NEXT:    s_add_u32 s4, s0, s4
-; VI-NEXT:    s_addc_u32 s5, s1, s5
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
-; VI-NEXT:  .LBB30_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v4, s2
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB30_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i64_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9-NEXT:  .LBB30_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v2, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB30_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %tmp0 = atomicrmw volatile umin ptr addrspace(1) %ptr, i64 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
-; CI-LABEL: atomic_umin_i64_ret_addr64:
-; CI:       ; %bb.0: ; %entry
-; CI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s11, 0xf000
-; CI-NEXT:    s_mov_b32 s10, -1
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; CI-NEXT:    s_add_u32 s8, s0, s6
-; CI-NEXT:    s_addc_u32 s9, s1, s7
-; CI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
-; CI-NEXT:    s_mov_b64 s[0:1], 0
-; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_mov_b32_e32 v2, s6
-; CI-NEXT:    v_mov_b32_e32 v3, s7
-; CI-NEXT:  .LBB31_1: ; %atomicrmw.start
-; CI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CI-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v0, s5
-; CI-NEXT:    v_mov_b32_e32 v4, s4
-; CI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; CI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v7, v3
-; CI-NEXT:    v_mov_b32_e32 v6, v2
-; CI-NEXT:    v_mov_b32_e32 v5, v1
-; CI-NEXT:    v_mov_b32_e32 v4, v0
-; CI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    buffer_wbinvl1_vol
-; CI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; CI-NEXT:    v_mov_b32_e32 v2, v4
-; CI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT:    v_mov_b32_e32 v3, v5
-; CI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_cbranch_execnz .LBB31_1
-; CI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; CI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; CI-NEXT:    s_mov_b32 s7, 0xf000
-; CI-NEXT:    s_mov_b32 s6, -1
-; CI-NEXT:    s_mov_b32 s4, s2
-; CI-NEXT:    s_mov_b32 s5, s3
-; CI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; CI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i64_ret_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s11, 0xf000
-; VI-NEXT:    s_mov_b32 s10, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; VI-NEXT:    s_add_u32 s8, s0, s6
-; VI-NEXT:    s_addc_u32 s9, s1, s7
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s6
-; VI-NEXT:    v_mov_b32_e32 v3, s7
-; VI-NEXT:  .LBB31_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, v3
-; VI-NEXT:    v_mov_b32_e32 v6, v2
-; VI-NEXT:    v_mov_b32_e32 v5, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v2, v4
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, v5
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB31_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_mov_b32 s4, s2
-; VI-NEXT:    s_mov_b32 s5, s3
-; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i64_ret_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
-; GFX9-NEXT:    s_add_u32 s0, s0, s6
-; GFX9-NEXT:    s_addc_u32 s1, s1, s7
-; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v1, s9
-; GFX9-NEXT:  .LBB31_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v6, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, v0
-; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[4:5], v[5:6]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s5
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    s_cbranch_execnz .LBB31_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
-  %tmp0 = atomicrmw volatile umin ptr addrspace(1) %ptr, i64 %in seq_cst
-  store i64 %tmp0, ptr addrspace(1) %out2
-  ret void
-}

diff  --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
new file mode 100644
index 00000000000000..9120b4e48dbb06
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
@@ -0,0 +1,8536 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
+
+; ---------------------------------------------------------------------
+; atomicrmw xchg
+; ---------------------------------------------------------------------
+
+define void @global_atomic_xchg_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_xchg_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap_x2 v[0:1], v[2:3], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_xchg_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_xchg_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap_x2 v[0:1], v[2:3], off offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_xchg_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_xchg_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xchg ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_xchg_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_xchg_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_xchg_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_xchg_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    v_mov_b32_e32 v2, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap_x2 v[1:2], off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap_x2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_xchg_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_xchg_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_mov_b32_e32 v2, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap_x2 v[1:2], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap_x2 v2, v[0:1], s[4:5] offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_xchg_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_xchg_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap_x2 v[0:1], v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xchg ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_xchg_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_xchg_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xchg_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xchg_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_swap_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw add
+; ---------------------------------------------------------------------
+
+define void @global_atomic_add_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_add_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add_x2 v[0:1], v[2:3], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw add ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_add_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_add_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add_x2 v[0:1], v[2:3], off offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_add_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_add_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add_x2 v[0:1], v[0:1], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw add ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_add_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_add_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_add_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_add_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    v_mov_b32_e32 v2, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add_x2 v[1:2], off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add_x2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw add ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_add_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_add_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_mov_b32_e32 v2, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add_x2 v[1:2], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add_x2 v2, v[0:1], s[4:5] offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_add_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_add_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add_x2 v[0:1], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add_x2 v[0:1], v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw add ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_add_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_add_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_add_x2 v[0:1], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_add_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_add_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_add_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw sub
+; ---------------------------------------------------------------------
+
+define void @global_atomic_sub_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_sub_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub_x2 v[0:1], v[2:3], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw sub ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_sub_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_sub_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub_x2 v[0:1], v[2:3], off offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_sub_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw sub ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_sub_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_sub_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_sub_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    v_mov_b32_e32 v2, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub_x2 v[1:2], off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub_x2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw sub ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_sub_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_sub_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_mov_b32_e32 v2, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub_x2 v[1:2], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub_x2 v2, v[0:1], s[4:5] offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_sub_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_sub_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub_x2 v[0:1], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub_x2 v[0:1], v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw sub ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_sub_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_sub_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_sub_x2 v[0:1], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_sub_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_sub_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_sub_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw and
+; ---------------------------------------------------------------------
+
+define void @global_atomic_and_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_and_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and_x2 v[0:1], v[2:3], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw and ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_and_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_and_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and_x2 v[0:1], v[2:3], off offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_and_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw and ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_and_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_and_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_and_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    v_mov_b32_e32 v2, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and_x2 v[1:2], off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and_x2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw and ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_and_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_and_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_mov_b32_e32 v2, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and_x2 v[1:2], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and_x2 v2, v[0:1], s[4:5] offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_and_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and_x2 v[0:1], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and_x2 v[0:1], v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw and ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_and_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_and_x2 v[0:1], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_and_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_and_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_and_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw nand
+; ---------------------------------------------------------------------
+
+define void @global_atomic_nand_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_nand_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB32_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v4, v7, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v8, v6, v2
+; SI-NEXT:    v_not_b32_e32 v5, v4
+; SI-NEXT:    v_not_b32_e32 v4, v8
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB32_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB32_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v4, v7, v3
+; VI-NEXT:    v_and_b32_e32 v8, v6, v2
+; VI-NEXT:    v_not_b32_e32 v5, v4
+; VI-NEXT:    v_not_b32_e32 v4, v8
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB32_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB32_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v4, v7, v3
+; GFX9-NEXT:    v_and_b32_e32 v8, v6, v2
+; GFX9-NEXT:    v_not_b32_e32 v5, v4
+; GFX9-NEXT:    v_not_b32_e32 v4, v8
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB32_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw nand ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_nand_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_nand_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB33_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v4, v7, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v8, v6, v2
+; SI-NEXT:    v_not_b32_e32 v5, v4
+; SI-NEXT:    v_not_b32_e32 v4, v8
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB33_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB33_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v4, v7, v3
+; VI-NEXT:    v_and_b32_e32 v8, v6, v2
+; VI-NEXT:    v_not_b32_e32 v5, v4
+; VI-NEXT:    v_not_b32_e32 v4, v8
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB33_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB33_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v4, v7, v3
+; GFX9-NEXT:    v_and_b32_e32 v8, v6, v2
+; GFX9-NEXT:    v_not_b32_e32 v5, v4
+; GFX9-NEXT:    v_not_b32_e32 v4, v8
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB33_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_nand_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v3
+; SI-NEXT:    v_mov_b32_e32 v5, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB34_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v0, v11, v4
+; SI-NEXT:    v_and_b32_e32 v1, v10, v5
+; SI-NEXT:    v_not_b32_e32 v9, v0
+; SI-NEXT:    v_not_b32_e32 v8, v1
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB34_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB34_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    v_and_b32_e32 v4, v7, v3
+; VI-NEXT:    v_and_b32_e32 v8, v6, v2
+; VI-NEXT:    v_not_b32_e32 v5, v4
+; VI-NEXT:    v_not_b32_e32 v4, v8
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB34_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v4
+; VI-NEXT:    v_mov_b32_e32 v1, v5
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB34_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_and_b32_e32 v4, v7, v3
+; GFX9-NEXT:    v_and_b32_e32 v8, v6, v2
+; GFX9-NEXT:    v_not_b32_e32 v5, v4
+; GFX9-NEXT:    v_not_b32_e32 v4, v8
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB34_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_nand_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, v3
+; SI-NEXT:    v_mov_b32_e32 v5, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB35_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v0, v11, v4
+; SI-NEXT:    v_and_b32_e32 v1, v10, v5
+; SI-NEXT:    v_not_b32_e32 v9, v0
+; SI-NEXT:    v_not_b32_e32 v8, v1
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB35_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB35_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v9, v1
+; VI-NEXT:    v_mov_b32_e32 v8, v0
+; VI-NEXT:    v_and_b32_e32 v0, v9, v3
+; VI-NEXT:    v_and_b32_e32 v1, v8, v2
+; VI-NEXT:    v_not_b32_e32 v7, v0
+; VI-NEXT:    v_not_b32_e32 v6, v1
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB35_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB35_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_and_b32_e32 v4, v7, v3
+; GFX9-NEXT:    v_and_b32_e32 v8, v6, v2
+; GFX9-NEXT:    v_not_b32_e32 v5, v4
+; GFX9-NEXT:    v_not_b32_e32 v4, v8
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB35_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_nand_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_nand_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB36_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, s35, v4
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v5, s34, v3
+; SI-NEXT:    v_not_b32_e32 v2, v1
+; SI-NEXT:    v_not_b32_e32 v1, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB36_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB36_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, s7, v3
+; VI-NEXT:    v_and_b32_e32 v6, s6, v2
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_not_b32_e32 v1, v0
+; VI-NEXT:    v_not_b32_e32 v0, v6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB36_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB36_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v0, s7, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, s6, v2
+; GFX9-NEXT:    v_not_b32_e32 v1, v0
+; GFX9-NEXT:    v_not_b32_e32 v0, v5
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB36_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw nand ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_nand_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_nand_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB37_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, s35, v4
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v5, s34, v3
+; SI-NEXT:    v_not_b32_e32 v2, v1
+; SI-NEXT:    v_not_b32_e32 v1, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB37_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB37_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, s7, v3
+; VI-NEXT:    v_and_b32_e32 v6, s6, v2
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_not_b32_e32 v1, v0
+; VI-NEXT:    v_not_b32_e32 v0, v6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB37_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB37_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v0, s7, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, s6, v2
+; GFX9-NEXT:    v_not_b32_e32 v1, v0
+; GFX9-NEXT:    v_not_b32_e32 v0, v5
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB37_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_nand_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB38_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_and_b32_e32 v0, s35, v8
+; SI-NEXT:    v_and_b32_e32 v1, s34, v7
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_not_b32_e32 v6, v0
+; SI-NEXT:    v_not_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB38_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB38_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_and_b32_e32 v0, s7, v3
+; VI-NEXT:    v_and_b32_e32 v6, s6, v2
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_not_b32_e32 v1, v0
+; VI-NEXT:    v_not_b32_e32 v0, v6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB38_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB38_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_and_b32_e32 v0, s7, v6
+; GFX9-NEXT:    v_and_b32_e32 v1, s6, v5
+; GFX9-NEXT:    v_not_b32_e32 v4, v0
+; GFX9-NEXT:    v_not_b32_e32 v3, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB38_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_nand_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB39_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_and_b32_e32 v0, s35, v8
+; SI-NEXT:    v_and_b32_e32 v1, s34, v7
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_not_b32_e32 v6, v0
+; SI-NEXT:    v_not_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB39_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_nand_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB39_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_and_b32_e32 v0, s7, v3
+; VI-NEXT:    v_and_b32_e32 v6, s6, v2
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_not_b32_e32 v1, v0
+; VI-NEXT:    v_not_b32_e32 v0, v6
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB39_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_nand_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB39_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_and_b32_e32 v0, s7, v6
+; GFX9-NEXT:    v_and_b32_e32 v1, s6, v5
+; GFX9-NEXT:    v_not_b32_e32 v4, v0
+; GFX9-NEXT:    v_not_b32_e32 v3, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB39_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw or
+; ---------------------------------------------------------------------
+
+define void @global_atomic_or_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_or_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or_x2 v[0:1], v[2:3], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw or ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_or_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_or_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or_x2 v[0:1], v[2:3], off offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_or_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw or ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_or_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_or_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_or_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    v_mov_b32_e32 v2, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or_x2 v[1:2], off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or_x2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw or ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_or_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_or_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_mov_b32_e32 v2, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or_x2 v[1:2], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or_x2 v2, v[0:1], s[4:5] offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_or_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or_x2 v[0:1], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or_x2 v[0:1], v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw or ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_or_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_or_x2 v[0:1], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_or_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_or_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_or_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw xor
+; ---------------------------------------------------------------------
+
+define void @global_atomic_xor_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_xor_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor_x2 v[0:1], v[2:3], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xor ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_xor_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_xor_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor_x2 v[0:1], v[2:3], off offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_xor_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_xor_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_xor_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_xor_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    v_mov_b32_e32 v2, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor_x2 v[1:2], off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor_x2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xor ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_xor_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_xor_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_mov_b32_e32 v2, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor_x2 v[1:2], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor_x2 v2, v[0:1], s[4:5] offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_xor_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor_x2 v[0:1], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor_x2 v[0:1], v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_xor_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_xor_x2 v[0:1], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_xor_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_xor_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_xor_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw max
+; ---------------------------------------------------------------------
+
+define void @global_atomic_max_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_max_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB56_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB56_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB56_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB56_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB56_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB56_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_max_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_max_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB57_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB57_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB57_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB57_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB57_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB57_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_max_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB58_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v9, v5, v11, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v8, v4, v10, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB58_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB58_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB58_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v4
+; VI-NEXT:    v_mov_b32_e32 v1, v5
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB58_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB58_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_max_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB59_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v9, v5, v11, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v8, v4, v10, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB59_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB59_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v9, v1
+; VI-NEXT:    v_mov_b32_e32 v8, v0
+; VI-NEXT:    v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB59_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB59_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB59_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_max_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_max_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB60_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_lt_i64_e32 vcc, s[34:35], v[3:4]
+; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB60_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB60_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB60_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB60_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB60_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_max_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_max_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB61_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_lt_i64_e32 vcc, s[34:35], v[3:4]
+; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB61_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB61_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB61_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB61_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB61_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_max_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_max_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB62_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_cmp_lt_i64_e32 vcc, s[34:35], v[7:8]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e32 v6, v0, v8, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    v_cndmask_b32_e32 v5, v0, v7, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB62_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB62_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB62_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB62_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB62_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_max_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_max_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB63_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_cmp_lt_i64_e32 vcc, s[34:35], v[7:8]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e32 v6, v0, v8, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    v_cndmask_b32_e32 v5, v0, v7, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB63_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_max_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB63_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB63_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_max_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB63_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB63_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
+; SI-LABEL: atomic_max_i64_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x8
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s8
+; SI-NEXT:    v_mov_b32_e32 v3, s9
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB64_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB64_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_max_i64_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT:    s_add_u32 s0, s0, s4
+; VI-NEXT:    s_addc_u32 s1, s1, s5
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x20
+; VI-NEXT:    s_add_u32 s0, s0, 32
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:  .LBB64_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v6, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB64_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x20
+; GFX9-NEXT:    s_mov_b64 s[2:3], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9-NEXT:  .LBB64_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT:    s_cbranch_execnz .LBB64_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
+  %tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
+; SI-LABEL: atomic_max_i64_ret_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; SI-NEXT:    s_add_u32 s8, s0, s6
+; SI-NEXT:    s_addc_u32 s9, s1, s7
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x8
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:  .LBB65_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB65_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_max_i64_ret_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
+; VI-NEXT:    s_add_u32 s6, s0, 32
+; VI-NEXT:    s_addc_u32 s7, s1, 0
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:  .LBB65_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v6, s4
+; VI-NEXT:    v_mov_b32_e32 v4, s6
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT:    s_cbranch_execnz .LBB65_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_ret_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT:    s_add_u32 s0, s0, s6
+; GFX9-NEXT:    s_addc_u32 s1, s1, s7
+; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
+; GFX9-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s8
+; GFX9-NEXT:    v_mov_b32_e32 v1, s9
+; GFX9-NEXT:  .LBB65_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    s_cbranch_execnz .LBB65_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
+  %tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
+  store i64 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_addr64(ptr addrspace(1) %out, i64 %in, i64 %index) {
+; SI-LABEL: atomic_max_i64_addr64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s8
+; SI-NEXT:    v_mov_b32_e32 v3, s9
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB66_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB66_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_max_i64_addr64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT:    s_add_u32 s0, s0, s4
+; VI-NEXT:    s_addc_u32 s1, s1, s5
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:  .LBB66_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v6, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB66_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_addr64:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT:    s_mov_b64 s[2:3], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9-NEXT:  .LBB66_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT:    s_cbranch_execnz .LBB66_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %tmp0 = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
+; SI-LABEL: atomic_max_i64_ret_addr64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; SI-NEXT:    s_add_u32 s8, s0, s6
+; SI-NEXT:    s_addc_u32 s9, s1, s7
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:  .LBB67_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB67_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_max_i64_ret_addr64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
+; VI-NEXT:    s_mov_b64 s[6:7], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:  .LBB67_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v6, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; VI-NEXT:    s_cbranch_execnz .LBB67_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[6:7]
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_ret_addr64:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT:    s_add_u32 s0, s0, s6
+; GFX9-NEXT:    s_addc_u32 s1, s1, s7
+; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX9-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s8
+; GFX9-NEXT:    v_mov_b32_e32 v1, s9
+; GFX9-NEXT:  .LBB67_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    s_cbranch_execnz .LBB67_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %tmp0 = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
+  store i64 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw umax
+; ---------------------------------------------------------------------
+
+define void @global_atomic_umax_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_umax_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB68_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB68_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB68_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB68_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB68_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB68_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_umax_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_umax_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB69_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB69_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB69_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB69_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB69_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB69_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_umax_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB70_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v9, v5, v11, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v8, v4, v10, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB70_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB70_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB70_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v4
+; VI-NEXT:    v_mov_b32_e32 v1, v5
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB70_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB70_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_umax_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB71_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v9, v5, v11, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v8, v4, v10, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB71_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB71_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v9, v1
+; VI-NEXT:    v_mov_b32_e32 v8, v0
+; VI-NEXT:    v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB71_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB71_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB71_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_umax_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_umax_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB72_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[34:35], v[3:4]
+; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB72_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB72_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB72_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB72_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB72_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_umax_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_umax_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB73_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[34:35], v[3:4]
+; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB73_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB73_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB73_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB73_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB73_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_umax_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_umax_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB74_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[34:35], v[7:8]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e32 v6, v0, v8, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    v_cndmask_b32_e32 v5, v0, v7, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB74_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB74_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB74_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB74_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB74_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_umax_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_umax_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB75_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[34:35], v[7:8]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e32 v6, v0, v8, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    v_cndmask_b32_e32 v5, v0, v7, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB75_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umax_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB75_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB75_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umax_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB75_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB75_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
+; SI-LABEL: atomic_umax_i64_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x8
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s8
+; SI-NEXT:    v_mov_b32_e32 v3, s9
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB76_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB76_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT:    s_add_u32 s0, s0, s4
+; VI-NEXT:    s_addc_u32 s1, s1, s5
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x20
+; VI-NEXT:    s_add_u32 s0, s0, 32
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:  .LBB76_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v6, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB76_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x20
+; GFX9-NEXT:    s_mov_b64 s[2:3], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9-NEXT:  .LBB76_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT:    s_cbranch_execnz .LBB76_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
+  %tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
+; SI-LABEL: atomic_umax_i64_ret_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; SI-NEXT:    s_add_u32 s8, s0, s6
+; SI-NEXT:    s_addc_u32 s9, s1, s7
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x8
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:  .LBB77_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB77_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_ret_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
+; VI-NEXT:    s_add_u32 s6, s0, 32
+; VI-NEXT:    s_addc_u32 s7, s1, 0
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:  .LBB77_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v6, s4
+; VI-NEXT:    v_mov_b32_e32 v4, s6
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT:    s_cbranch_execnz .LBB77_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_ret_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT:    s_add_u32 s0, s0, s6
+; GFX9-NEXT:    s_addc_u32 s1, s1, s7
+; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
+; GFX9-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s8
+; GFX9-NEXT:    v_mov_b32_e32 v1, s9
+; GFX9-NEXT:  .LBB77_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    s_cbranch_execnz .LBB77_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
+  %tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
+  store i64 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
+; SI-LABEL: atomic_umax_i64_ret_addr64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; SI-NEXT:    s_add_u32 s8, s0, s6
+; SI-NEXT:    s_addc_u32 s9, s1, s7
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:  .LBB78_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB78_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_ret_addr64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
+; VI-NEXT:    s_mov_b64 s[6:7], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:  .LBB78_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v6, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; VI-NEXT:    s_cbranch_execnz .LBB78_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[6:7]
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_ret_addr64:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT:    s_add_u32 s0, s0, s6
+; GFX9-NEXT:    s_addc_u32 s1, s1, s7
+; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX9-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s8
+; GFX9-NEXT:    v_mov_b32_e32 v1, s9
+; GFX9-NEXT:  .LBB78_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    s_cbranch_execnz .LBB78_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
+  store i64 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw umin
+; ---------------------------------------------------------------------
+
+define void @global_atomic_umin_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_umin_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB79_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB79_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB79_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB79_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB79_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB79_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umin ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_umin_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_umin_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB80_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB80_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB80_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB80_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB80_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB80_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_umin_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB81_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v9, v5, v11, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v8, v4, v10, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB81_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB81_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB81_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v4
+; VI-NEXT:    v_mov_b32_e32 v1, v5
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB81_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB81_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_umin_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB82_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v9, v5, v11, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v8, v4, v10, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB82_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB82_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v9, v1
+; VI-NEXT:    v_mov_b32_e32 v8, v0
+; VI-NEXT:    v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB82_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB82_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB82_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_umin_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_umin_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB83_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_ge_u64_e32 vcc, s[34:35], v[3:4]
+; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB83_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB83_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB83_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB83_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB83_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw umin ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_umin_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_umin_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB84_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_ge_u64_e32 vcc, s[34:35], v[3:4]
+; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB84_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB84_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB84_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB84_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB84_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_umin_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_umin_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB85_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_cmp_ge_u64_e32 vcc, s[34:35], v[7:8]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e32 v6, v0, v8, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    v_cndmask_b32_e32 v5, v0, v7, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB85_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB85_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB85_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB85_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB85_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_umin_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_umin_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB86_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_cmp_ge_u64_e32 vcc, s[34:35], v[7:8]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e32 v6, v0, v8, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    v_cndmask_b32_e32 v5, v0, v7, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB86_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_umin_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB86_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB86_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_umin_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB86_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_ge_u64_e32 vcc, s[6:7], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB86_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw min
+; ---------------------------------------------------------------------
+
+define void @global_atomic_min_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_min_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB87_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB87_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB87_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB87_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB87_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB87_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_min_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_min_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB88_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v7
+; SI-NEXT:    v_mov_b32_e32 v10, v6
+; SI-NEXT:    v_mov_b32_e32 v9, v5
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    v_mov_b32_e32 v7, v9
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB88_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB88_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB88_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB88_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB88_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_min_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB89_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v9, v5, v11, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v8, v4, v10, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB89_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB89_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v7, v5
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB89_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v4
+; VI-NEXT:    v_mov_b32_e32 v1, v5
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB89_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB89_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_min_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, v3
+; SI-NEXT:    v_mov_b32_e32 v4, v2
+; SI-NEXT:    v_mov_b32_e32 v7, v1
+; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_mov_b64 s[8:9], 0
+; SI-NEXT:  .LBB90_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v11, v1
+; SI-NEXT:    v_mov_b32_e32 v10, v0
+; SI-NEXT:    v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v9, v5, v11, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v8, v4, v10, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, v8
+; SI-NEXT:    v_mov_b32_e32 v1, v9
+; SI-NEXT:    v_mov_b32_e32 v2, v10
+; SI-NEXT:    v_mov_b32_e32 v3, v11
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_cbranch_execnz .LBB90_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v4, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB90_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v9, v1
+; VI-NEXT:    v_mov_b32_e32 v8, v0
+; VI-NEXT:    v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e32 v7, v3, v9, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v6, v2, v8, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB90_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[0:1], off offset:32
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB90_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v7, v5
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v3, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB90_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_min_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_min_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB91_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_ge_i64_e32 vcc, s[34:35], v[3:4]
+; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB91_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB91_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB91_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB91_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB91_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_min_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_min_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB92_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_ge_i64_e32 vcc, s[34:35], v[3:4]
+; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[5:8], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB92_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB92_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB92_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB92_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB92_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_min_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_min_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB93_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_cmp_ge_i64_e32 vcc, s[34:35], v[7:8]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e32 v6, v0, v8, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    v_cndmask_b32_e32 v5, v0, v7, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB93_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[34:35], 0
+; VI-NEXT:  .LBB93_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_cbranch_execnz .LBB93_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[34:35]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5]
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB93_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB93_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_min_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_min_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s35, s7
+; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_mov_b64 s[36:37], 0
+; SI-NEXT:  .LBB94_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v8, v4
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_cmp_ge_i64_e32 vcc, s[34:35], v[7:8]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e32 v6, v0, v8, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s34
+; SI-NEXT:    v_cndmask_b32_e32 v5, v0, v7, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    v_mov_b32_e32 v4, v6
+; SI-NEXT:    v_mov_b32_e32 v5, v7
+; SI-NEXT:    v_mov_b32_e32 v6, v8
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[3:6], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[7:8]
+; SI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; SI-NEXT:    s_cbranch_execnz .LBB94_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; SI-NEXT:    v_mov_b32_e32 v0, v3
+; SI-NEXT:    v_mov_b32_e32 v1, v4
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_min_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s34
+; VI-NEXT:    v_mov_b32_e32 v1, s35
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_mov_b64 s[36:37], 0
+; VI-NEXT:  .LBB94_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s34
+; VI-NEXT:    v_mov_b32_e32 v5, s35
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[36:37], vcc, s[36:37]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_cbranch_execnz .LBB94_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[36:37]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_min_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
+; GFX9-NEXT:  .LBB94_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[34:35], vcc, s[34:35]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_cbranch_execnz .LBB94_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[34:35]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
+; SI-LABEL: atomic_min_i64_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; SI-NEXT:    s_add_u32 s4, s0, s4
+; SI-NEXT:    s_addc_u32 s5, s1, s5
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x8
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s8
+; SI-NEXT:    v_mov_b32_e32 v3, s9
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:  .LBB95_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB95_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_min_i64_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT:    s_add_u32 s0, s0, s4
+; VI-NEXT:    s_addc_u32 s1, s1, s5
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x20
+; VI-NEXT:    s_add_u32 s0, s0, 32
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:  .LBB95_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v6, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB95_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT:    s_add_u32 s0, s4, s0
+; GFX9-NEXT:    s_addc_u32 s1, s5, s1
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x20
+; GFX9-NEXT:    s_mov_b64 s[2:3], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9-NEXT:  .LBB95_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s7
+; GFX9-NEXT:    v_mov_b32_e32 v5, s6
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT:    s_cbranch_execnz .LBB95_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
+  %tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
+; SI-LABEL: atomic_min_i64_ret_addr64_offset:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; SI-NEXT:    s_add_u32 s8, s0, s6
+; SI-NEXT:    s_addc_u32 s9, s1, s7
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x8
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:  .LBB96_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB96_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_min_i64_ret_addr64_offset:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
+; VI-NEXT:    s_add_u32 s6, s0, 32
+; VI-NEXT:    s_addc_u32 s7, s1, 0
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:  .LBB96_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v6, s4
+; VI-NEXT:    v_mov_b32_e32 v4, s6
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT:    s_cbranch_execnz .LBB96_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_ret_addr64_offset:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT:    s_add_u32 s0, s0, s6
+; GFX9-NEXT:    s_addc_u32 s1, s1, s7
+; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x20
+; GFX9-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s8
+; GFX9-NEXT:    v_mov_b32_e32 v1, s9
+; GFX9-NEXT:  .LBB96_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    s_cbranch_execnz .LBB96_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
+  %tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
+  store i64 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: atomic_min_i64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
+; SI-NEXT:    s_mov_b64 s[4:5], 0
+; SI-NEXT:    s_mov_b32 s6, s2
+; SI-NEXT:    s_mov_b32 s7, s3
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s8
+; SI-NEXT:    v_mov_b32_e32 v3, s9
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:  .LBB97_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT:    s_cbranch_execnz .LBB97_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_min_i64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:  .LBB97_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v6, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB97_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9-NEXT:    v_mov_b32_e32 v3, s7
+; GFX9-NEXT:  .LBB97_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:    v_mov_b32_e32 v5, s2
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB97_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_endpgm
+entry:
+  %tmp0 = atomicrmw min ptr addrspace(1) %out, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
+; SI-LABEL: atomic_min_i64_ret_addr64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; SI-NEXT:    s_add_u32 s8, s0, s6
+; SI-NEXT:    s_addc_u32 s9, s1, s7
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x0
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:  .LBB98_1: ; %atomicrmw.start
+; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, v3
+; SI-NEXT:    v_mov_b32_e32 v6, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, v4
+; SI-NEXT:    v_mov_b32_e32 v3, v5
+; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_cbranch_execnz .LBB98_1
+; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s4, s2
+; SI-NEXT:    s_mov_b32 s5, s3
+; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: atomic_min_i64_ret_addr64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT:    s_add_u32 s0, s0, s6
+; VI-NEXT:    s_addc_u32 s1, s1, s7
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
+; VI-NEXT:    s_mov_b64 s[6:7], 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:  .LBB98_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    v_mov_b32_e32 v3, v1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v6, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v2, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; VI-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; VI-NEXT:    s_cbranch_execnz .LBB98_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[6:7]
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_ret_addr64:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT:    s_add_u32 s0, s0, s6
+; GFX9-NEXT:    s_addc_u32 s1, s1, s7
+; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX9-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s8
+; GFX9-NEXT:    v_mov_b32_e32 v1, s9
+; GFX9-NEXT:  .LBB98_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-NEXT:    v_cmp_ge_i64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    s_cbranch_execnz .LBB98_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT:    s_endpgm
+entry:
+  %ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
+  %tmp0 = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
+  store i64 %tmp0, ptr addrspace(1) %out2
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw uinc_wrap
+; ---------------------------------------------------------------------
+
+define void @global_atomic_uinc_wrap_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v[2:3], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_uinc_wrap_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v[2:3], off offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_uinc_wrap_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    v_mov_b32_e32 v2, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc_x2 v[1:2], off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc_x2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_uinc_wrap_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_mov_b32_e32 v2, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc_x2 v[1:2], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc_x2 v2, v[0:1], s[4:5] offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc_x2 v[0:1], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_uinc_wrap_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_inc_x2 v[0:1], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_uinc_wrap_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_inc_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+; ---------------------------------------------------------------------
+; atomicrmw udec_wrap
+; ---------------------------------------------------------------------
+
+define void @global_atomic_udec_wrap_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_udec_wrap_i64_noret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i64_noret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i64_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec_x2 v[0:1], v[2:3], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define void @global_atomic_udec_wrap_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_udec_wrap_i64_noret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i64_noret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i64_noret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec_x2 v[0:1], v[2:3], off offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
+; SI-LABEL: global_atomic_udec_wrap_i64_ret:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i64_ret:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i64_ret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
+; SI-LABEL: global_atomic_udec_wrap_i64_ret_offset:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s6
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_mov_b32_e32 v0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, v3
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i64_ret_offset:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i64_ret_offset:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx void @global_atomic_udec_wrap_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_udec_wrap_i64_noret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s35
+; SI-NEXT:    v_mov_b32_e32 v2, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec_x2 v[1:2], off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i64_noret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i64_noret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec_x2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx void @global_atomic_udec_wrap_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_udec_wrap_i64_noret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v0, s6, 0
+; SI-NEXT:    v_writelane_b32 v0, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_mov_b32_e32 v2, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec_x2 v[1:2], off, s[4:7], 0 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v0, 1
+; SI-NEXT:    v_readlane_b32 s6, v0, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i64_noret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec_x2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i64_noret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec_x2 v2, v[0:1], s[4:5] offset:32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst
+  ret void
+}
+
+define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
+; SI-LABEL: global_atomic_udec_wrap_i64_ret_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s35
+; SI-NEXT:    v_mov_b32_e32 v1, s34
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec_x2 v[0:1], off, s[4:7], 0 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i64_ret_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i64_ret_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec_x2 v[0:1], v2, v[0:1], s[4:5] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
+  ret i64 %result
+}
+
+define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
+; SI-LABEL: global_atomic_udec_wrap_i64_ret_offset_scalar:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_writelane_b32 v2, s6, 0
+; SI-NEXT:    v_writelane_b32 v2, s7, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_atomic_dec_x2 v[0:1], off, s[4:7], 0 offset:32 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_wbinvl1
+; SI-NEXT:    v_readlane_b32 s7, v2, 1
+; SI-NEXT:    v_readlane_b32 s6, v2, 0
+; SI-NEXT:    s_xor_saveexec_b64 s[34:35], -1
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT:    s_mov_b64 exec, s[34:35]
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: global_atomic_udec_wrap_i64_ret_offset_scalar:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s34, s4, 32
+; VI-NEXT:    s_addc_u32 s35, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s34
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s35
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_wbinvl1_vol
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_atomic_udec_wrap_i64_ret_offset_scalar:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_atomic_dec_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep = getelementptr i64, ptr addrspace(1) %out, i64 4
+  %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst
+  ret i64 %result
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/global_atomics_min_max_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_min_max_system.ll
deleted file mode 100644
index 8ef22aba69f0c3..00000000000000
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_min_max_system.ll
+++ /dev/null
@@ -1,3312 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -march=amdgcn -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-atomic-optimizer-strategy=None -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
-
-define amdgpu_kernel void @atomic_max_i32_offset(ptr addrspace(1) %out, i32 %in) {
-; SI-LABEL: atomic_max_i32_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s3, s[4:5], 0x4
-; SI-NEXT:    s_mov_b64 s[0:1], 0
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:  .LBB0_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_i32_e32 v0, s2, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; SI-NEXT:    s_cbranch_execnz .LBB0_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i32_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[8:9], 0x10
-; VI-NEXT:    s_add_u32 s0, s8, 16
-; VI-NEXT:    s_addc_u32 s1, s9, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB0_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_i32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB0_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i32_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s5, s[2:3], 0x10
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB0_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_max_i32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB0_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr addrspace(1) %out, i64 4
-  %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
-; SI-LABEL: atomic_max_i32_ret_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s10, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0x4
-; SI-NEXT:    s_mov_b64 s[8:9], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB1_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_i32_e32 v0, s10, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_cbranch_execnz .LBB1_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i32_ret_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s10, s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x10
-; VI-NEXT:    s_add_u32 s0, s4, 16
-; VI-NEXT:    s_addc_u32 s1, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB1_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_i32_e32 v0, s10, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB1_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i32_ret_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x10
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:  .LBB1_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_max_i32_e32 v2, s2, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB1_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr addrspace(1) %out, i64 4
-  %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
-; SI-LABEL: atomic_max_i32_addr64_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s6, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x4
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB2_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_i32_e32 v0, s6, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB2_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i32_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x10
-; VI-NEXT:    s_add_u32 s0, s0, 16
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_addc_u32 s1, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB2_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_i32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB2_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i32_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s6, s0
-; GFX9-NEXT:    s_addc_u32 s1, s7, s1
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x10
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB2_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_max_i32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB2_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
-  %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
-; SI-LABEL: atomic_max_i32_ret_addr64_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s8, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x4
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB3_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_i32_e32 v0, s8, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB3_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i32_ret_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s8, s[0:1], 0x34
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x10
-; VI-NEXT:    s_add_u32 s0, s0, 16
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_addc_u32 s1, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB3_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_i32_e32 v0, s8, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB3_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i32_ret_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x10
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:  .LBB3_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_max_i32_e32 v2, s8, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB3_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
-  %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32(ptr addrspace(1) %out, i32 %in) {
-; SI-LABEL: atomic_max_i32:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s3, s[4:5], 0x0
-; SI-NEXT:    s_mov_b64 s[0:1], 0
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:  .LBB4_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_i32_e32 v0, s2, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; SI-NEXT:    s_cbranch_execnz .LBB4_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i32:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s3, s[4:5], 0x0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:  .LBB4_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_i32_e32 v0, s2, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB4_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i32:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s5, s[2:3], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB4_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_max_i32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB4_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile max ptr addrspace(1) %out, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
-; SI-LABEL: atomic_max_i32_ret:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s10, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0x0
-; SI-NEXT:    s_mov_b64 s[8:9], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB5_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_i32_e32 v0, s10, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_cbranch_execnz .LBB5_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i32_ret:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s10, s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB5_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_i32_e32 v0, s10, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB5_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i32_ret:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:  .LBB5_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_max_i32_e32 v2, s2, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB5_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile max ptr addrspace(1) %out, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
-; SI-LABEL: atomic_max_i32_addr64:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s6, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB6_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_i32_e32 v0, s6, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB6_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i32_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB6_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_i32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB6_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i32_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s6, s0
-; GFX9-NEXT:    s_addc_u32 s1, s7, s1
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB6_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_max_i32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB6_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %val = atomicrmw volatile max ptr addrspace(1) %ptr, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
-; SI-LABEL: atomic_max_i32_ret_addr64:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s8, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB7_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_i32_e32 v0, s8, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB7_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_max_i32_ret_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s8, s[0:1], 0x34
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB7_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_i32_e32 v0, s8, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB7_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_max_i32_ret_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:  .LBB7_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_max_i32_e32 v2, s8, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB7_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %val = atomicrmw volatile max ptr addrspace(1) %ptr, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_offset(ptr addrspace(1) %out, i32 %in) {
-; SI-LABEL: atomic_umax_i32_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s3, s[4:5], 0x4
-; SI-NEXT:    s_mov_b64 s[0:1], 0
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:  .LBB8_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_u32_e32 v0, s2, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; SI-NEXT:    s_cbranch_execnz .LBB8_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i32_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[8:9], 0x10
-; VI-NEXT:    s_add_u32 s0, s8, 16
-; VI-NEXT:    s_addc_u32 s1, s9, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB8_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_u32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB8_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i32_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s5, s[2:3], 0x10
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB8_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_max_u32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB8_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr addrspace(1) %out, i64 4
-  %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
-; SI-LABEL: atomic_umax_i32_ret_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s10, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0x4
-; SI-NEXT:    s_mov_b64 s[8:9], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB9_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_u32_e32 v0, s10, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_cbranch_execnz .LBB9_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i32_ret_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s10, s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x10
-; VI-NEXT:    s_add_u32 s0, s4, 16
-; VI-NEXT:    s_addc_u32 s1, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB9_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_u32_e32 v0, s10, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB9_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i32_ret_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x10
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:  .LBB9_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_max_u32_e32 v2, s2, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB9_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr addrspace(1) %out, i64 4
-  %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
-; SI-LABEL: atomic_umax_i32_addr64_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s6, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x4
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB10_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_u32_e32 v0, s6, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB10_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i32_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x10
-; VI-NEXT:    s_add_u32 s0, s0, 16
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_addc_u32 s1, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB10_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_u32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB10_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i32_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s6, s0
-; GFX9-NEXT:    s_addc_u32 s1, s7, s1
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x10
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB10_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_max_u32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB10_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
-  %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
-; SI-LABEL: atomic_umax_i32_ret_addr64_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s8, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x4
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB11_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_u32_e32 v0, s8, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB11_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i32_ret_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s8, s[0:1], 0x34
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x10
-; VI-NEXT:    s_add_u32 s0, s0, 16
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_addc_u32 s1, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB11_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_u32_e32 v0, s8, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB11_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i32_ret_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x10
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:  .LBB11_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_max_u32_e32 v2, s8, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB11_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
-  %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32(ptr addrspace(1) %out, i32 %in) {
-; SI-LABEL: atomic_umax_i32:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s3, s[4:5], 0x0
-; SI-NEXT:    s_mov_b64 s[0:1], 0
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:  .LBB12_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_u32_e32 v0, s2, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; SI-NEXT:    s_cbranch_execnz .LBB12_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i32:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s3, s[4:5], 0x0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:  .LBB12_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_u32_e32 v0, s2, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB12_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i32:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s5, s[2:3], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB12_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_max_u32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB12_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile umax ptr addrspace(1) %out, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
-; SI-LABEL: atomic_umax_i32_ret:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s10, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0x0
-; SI-NEXT:    s_mov_b64 s[8:9], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB13_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_u32_e32 v0, s10, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_cbranch_execnz .LBB13_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i32_ret:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s10, s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB13_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_u32_e32 v0, s10, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB13_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i32_ret:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:  .LBB13_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_max_u32_e32 v2, s2, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB13_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile umax ptr addrspace(1) %out, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
-; SI-LABEL: atomic_umax_i32_addr64:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s6, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB14_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_u32_e32 v0, s6, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB14_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i32_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB14_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_u32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB14_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i32_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s6, s0
-; GFX9-NEXT:    s_addc_u32 s1, s7, s1
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB14_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_max_u32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB14_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %val = atomicrmw volatile umax ptr addrspace(1) %ptr, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
-; SI-LABEL: atomic_umax_i32_ret_addr64:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s8, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB15_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_max_u32_e32 v0, s8, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB15_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umax_i32_ret_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s8, s[0:1], 0x34
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB15_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_max_u32_e32 v0, s8, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB15_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umax_i32_ret_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:  .LBB15_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_max_u32_e32 v2, s8, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB15_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %val = atomicrmw volatile umax ptr addrspace(1) %ptr, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_offset(ptr addrspace(1) %out, i32 %in) {
-; SI-LABEL: atomic_min_i32_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s3, s[4:5], 0x4
-; SI-NEXT:    s_mov_b64 s[0:1], 0
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:  .LBB16_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_i32_e32 v0, s2, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; SI-NEXT:    s_cbranch_execnz .LBB16_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i32_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[8:9], 0x10
-; VI-NEXT:    s_add_u32 s0, s8, 16
-; VI-NEXT:    s_addc_u32 s1, s9, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB16_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_i32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB16_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i32_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s5, s[2:3], 0x10
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB16_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_min_i32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB16_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr addrspace(1) %out, i64 4
-  %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
-; SI-LABEL: atomic_min_i32_ret_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s10, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0x4
-; SI-NEXT:    s_mov_b64 s[8:9], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB17_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_i32_e32 v0, s10, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_cbranch_execnz .LBB17_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i32_ret_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s10, s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x10
-; VI-NEXT:    s_add_u32 s0, s4, 16
-; VI-NEXT:    s_addc_u32 s1, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB17_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_i32_e32 v0, s10, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB17_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i32_ret_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x10
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:  .LBB17_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_min_i32_e32 v2, s2, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB17_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr addrspace(1) %out, i64 4
-  %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
-; SI-LABEL: atomic_min_i32_addr64_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s6, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x4
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB18_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_i32_e32 v0, s6, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB18_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i32_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x10
-; VI-NEXT:    s_add_u32 s0, s0, 16
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_addc_u32 s1, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB18_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_i32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB18_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i32_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s6, s0
-; GFX9-NEXT:    s_addc_u32 s1, s7, s1
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x10
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB18_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_min_i32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB18_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
-  %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
-; SI-LABEL: atomic_min_i32_ret_addr64_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s8, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x4
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB19_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_i32_e32 v0, s8, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB19_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i32_ret_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s8, s[0:1], 0x34
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x10
-; VI-NEXT:    s_add_u32 s0, s0, 16
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_addc_u32 s1, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB19_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_i32_e32 v0, s8, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB19_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i32_ret_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x10
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:  .LBB19_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_min_i32_e32 v2, s8, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB19_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
-  %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32(ptr addrspace(1) %out, i32 %in) {
-; SI-LABEL: atomic_min_i32:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s3, s[4:5], 0x0
-; SI-NEXT:    s_mov_b64 s[0:1], 0
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:  .LBB20_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_i32_e32 v0, s2, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; SI-NEXT:    s_cbranch_execnz .LBB20_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i32:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s3, s[4:5], 0x0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:  .LBB20_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_i32_e32 v0, s2, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB20_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i32:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s5, s[2:3], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB20_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_min_i32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB20_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile min ptr addrspace(1) %out, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
-; SI-LABEL: atomic_min_i32_ret:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s10, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0x0
-; SI-NEXT:    s_mov_b64 s[8:9], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB21_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_i32_e32 v0, s10, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_cbranch_execnz .LBB21_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i32_ret:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s10, s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB21_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_i32_e32 v0, s10, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB21_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i32_ret:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:  .LBB21_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_min_i32_e32 v2, s2, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB21_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile min ptr addrspace(1) %out, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
-; SI-LABEL: atomic_min_i32_addr64:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s6, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB22_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_i32_e32 v0, s6, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB22_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i32_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB22_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_i32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB22_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i32_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s6, s0
-; GFX9-NEXT:    s_addc_u32 s1, s7, s1
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB22_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_min_i32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB22_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %val = atomicrmw volatile min ptr addrspace(1) %ptr, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
-; SI-LABEL: atomic_min_i32_ret_addr64:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s8, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB23_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_i32_e32 v0, s8, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB23_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_min_i32_ret_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s8, s[0:1], 0x34
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB23_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_i32_e32 v0, s8, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB23_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_min_i32_ret_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:  .LBB23_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_min_i32_e32 v2, s8, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB23_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %val = atomicrmw volatile min ptr addrspace(1) %ptr, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_offset(ptr addrspace(1) %out, i32 %in) {
-; SI-LABEL: atomic_umin_i32_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s3, s[4:5], 0x4
-; SI-NEXT:    s_mov_b64 s[0:1], 0
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:  .LBB24_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_u32_e32 v0, s2, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; SI-NEXT:    s_cbranch_execnz .LBB24_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i32_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[8:9], 0x10
-; VI-NEXT:    s_add_u32 s0, s8, 16
-; VI-NEXT:    s_addc_u32 s1, s9, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB24_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_u32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB24_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i32_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s5, s[2:3], 0x10
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB24_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_min_u32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB24_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr addrspace(1) %out, i64 4
-  %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
-; SI-LABEL: atomic_umin_i32_ret_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s10, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0x4
-; SI-NEXT:    s_mov_b64 s[8:9], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB25_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_u32_e32 v0, s10, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_cbranch_execnz .LBB25_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i32_ret_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s10, s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x10
-; VI-NEXT:    s_add_u32 s0, s4, 16
-; VI-NEXT:    s_addc_u32 s1, s5, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB25_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_u32_e32 v0, s10, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB25_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i32_ret_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x10
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:  .LBB25_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_min_u32_e32 v2, s2, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB25_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %gep = getelementptr i32, ptr addrspace(1) %out, i64 4
-  %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
-; SI-LABEL: atomic_umin_i32_addr64_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s6, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x4
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB26_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_u32_e32 v0, s6, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB26_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i32_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x10
-; VI-NEXT:    s_add_u32 s0, s0, 16
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_addc_u32 s1, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB26_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_u32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB26_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i32_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s6, s0
-; GFX9-NEXT:    s_addc_u32 s1, s7, s1
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x10
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB26_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_min_u32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB26_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
-  %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
-; SI-LABEL: atomic_umin_i32_ret_addr64_offset:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s8, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x4
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB27_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_u32_e32 v0, s8, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB27_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i32_ret_addr64_offset:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s8, s[0:1], 0x34
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x10
-; VI-NEXT:    s_add_u32 s0, s0, 16
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_addc_u32 s1, s1, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB27_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_u32_e32 v0, s8, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB27_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i32_ret_addr64_offset:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x10
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:  .LBB27_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_min_u32_e32 v2, s8, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB27_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
-  %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32(ptr addrspace(1) %out, i32 %in) {
-; SI-LABEL: atomic_umin_i32:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s3, s[4:5], 0x0
-; SI-NEXT:    s_mov_b64 s[0:1], 0
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:  .LBB28_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_u32_e32 v0, s2, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; SI-NEXT:    s_cbranch_execnz .LBB28_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i32:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
-; VI-NEXT:    s_mov_b64 s[0:1], 0
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s3, s[4:5], 0x0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:  .LBB28_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_u32_e32 v0, s2, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT:    s_cbranch_execnz .LBB28_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i32:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s5, s[2:3], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB28_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_min_u32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB28_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile umin ptr addrspace(1) %out, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
-; SI-LABEL: atomic_umin_i32_ret:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s10, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0x0
-; SI-NEXT:    s_mov_b64 s[8:9], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB29_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_u32_e32 v0, s10, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_cbranch_execnz .LBB29_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i32_ret:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s10, s[0:1], 0x34
-; VI-NEXT:    s_mov_b64 s[8:9], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x0
-; VI-NEXT:    s_mov_b64 s[0:1], s[4:5]
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB29_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_u32_e32 v0, s10, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_cbranch_execnz .LBB29_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[8:9]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i32_ret:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
-; GFX9-NEXT:    s_mov_b64 s[0:1], 0
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:  .LBB29_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_min_u32_e32 v2, s2, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    s_cbranch_execnz .LBB29_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %val = atomicrmw volatile umin ptr addrspace(1) %out, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
-; SI-LABEL: atomic_umin_i32_addr64:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s6, s[0:1], 0xb
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB30_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_u32_e32 v0, s6, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB30_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i32_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s6, s[0:1], 0x2c
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB30_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_u32_e32 v0, s6, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB30_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i32_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s6, s0
-; GFX9-NEXT:    s_addc_u32 s1, s7, s1
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:  .LBB30_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_min_u32_e32 v0, s4, v1
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, v0
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB30_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %val = atomicrmw volatile umin ptr addrspace(1) %ptr, i32 %in seq_cst
-  ret void
-}
-
-define amdgpu_kernel void @atomic_umin_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
-; SI-LABEL: atomic_umin_i32_ret_addr64:
-; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xf
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dword s8, s[0:1], 0xd
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; SI-NEXT:    s_add_u32 s0, s4, s0
-; SI-NEXT:    s_addc_u32 s1, s5, s1
-; SI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:  .LBB31_1: ; %atomicrmw.start
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    v_min_u32_e32 v0, s8, v1
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, v1
-; SI-NEXT:    v_mov_b32_e32 v2, v0
-; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_wbinvl1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; SI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v1, v2
-; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execnz .LBB31_1
-; SI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: atomic_umin_i32_ret_addr64:
-; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_load_dword s8, s[0:1], 0x34
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; VI-NEXT:    s_add_u32 s0, s4, s0
-; VI-NEXT:    s_addc_u32 s1, s5, s1
-; VI-NEXT:    s_load_dword s2, s[0:1], 0x0
-; VI-NEXT:    s_mov_b64 s[4:5], 0
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:  .LBB31_1: ; %atomicrmw.start
-; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; VI-NEXT:    v_min_u32_e32 v0, s8, v1
-; VI-NEXT:    v_mov_b32_e32 v3, v1
-; VI-NEXT:    v_mov_b32_e32 v2, v0
-; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_wbinvl1_vol
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
-; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT:    v_mov_b32_e32 v1, v2
-; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_cbranch_execnz .LBB31_1
-; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
-; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    buffer_store_dword v2, off, s[0:3], 0
-; VI-NEXT:    s_endpgm
-;
-; GFX9-LABEL: atomic_umin_i32_ret_addr64:
-; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x3c
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x34
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], s[2:3], 2
-; GFX9-NEXT:    s_add_u32 s0, s4, s0
-; GFX9-NEXT:    s_addc_u32 s1, s5, s1
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x0
-; GFX9-NEXT:    s_mov_b64 s[2:3], 0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:  .LBB31_1: ; %atomicrmw.start
-; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mov_b32_e32 v3, v0
-; GFX9-NEXT:    v_min_u32_e32 v2, s8, v3
-; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    buffer_wbinvl1_vol
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v3
-; GFX9-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT:    s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    s_cbranch_execnz .LBB31_1
-; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_store_dword v1, v0, s[6:7]
-; GFX9-NEXT:    s_endpgm
-entry:
-  %ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
-  %val = atomicrmw volatile umin ptr addrspace(1) %ptr, i32 %in seq_cst
-  store i32 %val, ptr addrspace(1) %out2
-  ret void
-}

diff  --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
new file mode 100644
index 00000000000000..fce2eeef342b70
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
@@ -0,0 +1,696 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
+
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+
+define i16 @test_atomicrmw_xchg_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_xchg_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP6:%.*]] = or i32 [[TMP5]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP7:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP6]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %res = atomicrmw xchg ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_xchg_i16_global_system_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_xchg_i16_global_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[TMP4]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %res = atomicrmw xchg ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_add_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = add i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %res = atomicrmw add ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_add_i16_global_system_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i16_global_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = add i32 [[LOADED]], [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[NEW]], 65535
+; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[TMP5]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %res = atomicrmw add ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_sub_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %res = atomicrmw sub ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_and_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[INV_MASK]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] seq_cst, align 4
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %res = atomicrmw and ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_nand_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_nand_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[NEW:%.*]] = xor i32 [[TMP5]], -1
+; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT:    [[TMP7:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[TMP9:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP8]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP9]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %res = atomicrmw nand ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_or_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_or_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %res = atomicrmw or ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_xor_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_xor_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %res = atomicrmw xor ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_max_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_max_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sgt i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP4]], i16 [[EXTRACTED]], i16 [[VALUE]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
+;
+  %res = atomicrmw max ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_min_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_min_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sle i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP4]], i16 [[EXTRACTED]], i16 [[VALUE]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
+;
+  %res = atomicrmw min ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_umax_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_umax_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP4]], i16 [[EXTRACTED]], i16 [[VALUE]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
+;
+  %res = atomicrmw umax ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_umin_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_umin_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ule i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP4]], i16 [[EXTRACTED]], i16 [[VALUE]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
+;
+  %res = atomicrmw umin ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_cmpxchg_i16_global_system(ptr addrspace(1) %out, i16 %in, i16 %old) {
+; CHECK-LABEL: @test_cmpxchg_i16_global_system(
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i16, ptr addrspace(1) [[OUT:%.*]], i64 4
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[GEP]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[GEP]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[IN:%.*]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP5:%.*]] = zext i16 [[OLD:%.*]] to i32
+; CHECK-NEXT:    [[TMP6:%.*]] = shl i32 [[TMP5]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = and i32 [[TMP7]], [[INV_MASK]]
+; CHECK-NEXT:    br label [[PARTWORD_CMPXCHG_LOOP:%.*]]
+; CHECK:       partword.cmpxchg.loop:
+; CHECK-NEXT:    [[TMP9:%.*]] = phi i32 [ [[TMP8]], [[TMP0:%.*]] ], [ [[TMP15:%.*]], [[PARTWORD_CMPXCHG_FAILURE:%.*]] ]
+; CHECK-NEXT:    [[TMP10:%.*]] = or i32 [[TMP9]], [[TMP4]]
+; CHECK-NEXT:    [[TMP11:%.*]] = or i32 [[TMP9]], [[TMP6]]
+; CHECK-NEXT:    [[TMP12:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[TMP11]], i32 [[TMP10]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = extractvalue { i32, i1 } [[TMP12]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = extractvalue { i32, i1 } [[TMP12]], 1
+; CHECK-NEXT:    br i1 [[TMP14]], label [[PARTWORD_CMPXCHG_END:%.*]], label [[PARTWORD_CMPXCHG_FAILURE]]
+; CHECK:       partword.cmpxchg.failure:
+; CHECK-NEXT:    [[TMP15]] = and i32 [[TMP13]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP9]], [[TMP15]]
+; CHECK-NEXT:    br i1 [[TMP16]], label [[PARTWORD_CMPXCHG_LOOP]], label [[PARTWORD_CMPXCHG_END]]
+; CHECK:       partword.cmpxchg.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP13]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP17:%.*]] = insertvalue { i16, i1 } poison, i16 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = insertvalue { i16, i1 } [[TMP17]], i1 [[TMP14]], 1
+; CHECK-NEXT:    [[EXTRACT:%.*]] = extractvalue { i16, i1 } [[TMP18]], 0
+; CHECK-NEXT:    ret i16 [[EXTRACT]]
+;
+  %gep = getelementptr i16, ptr addrspace(1) %out, i64 4
+  %res = cmpxchg ptr addrspace(1) %gep, i16 %old, i16 %in seq_cst seq_cst
+  %extract = extractvalue {i16, i1} %res, 0
+  ret i16 %extract
+}
+
+define i16 @test_cmpxchg_i16_global_system_align4(ptr addrspace(1) %out, i16 %in, i16 %old) {
+; CHECK-LABEL: @test_cmpxchg_i16_global_system_align4(
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i16, ptr addrspace(1) [[OUT:%.*]], i64 4
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[IN:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[OLD:%.*]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[GEP]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[TMP3]], -65536
+; CHECK-NEXT:    br label [[PARTWORD_CMPXCHG_LOOP:%.*]]
+; CHECK:       partword.cmpxchg.loop:
+; CHECK-NEXT:    [[TMP5:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[TMP11:%.*]], [[PARTWORD_CMPXCHG_FAILURE:%.*]] ]
+; CHECK-NEXT:    [[TMP6:%.*]] = or i32 [[TMP5]], [[TMP1]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP5]], [[TMP2]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[GEP]], i32 [[TMP7]], i32 [[TMP6]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    br i1 [[TMP10]], label [[PARTWORD_CMPXCHG_END:%.*]], label [[PARTWORD_CMPXCHG_FAILURE]]
+; CHECK:       partword.cmpxchg.failure:
+; CHECK-NEXT:    [[TMP11]] = and i32 [[TMP9]], -65536
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP5]], [[TMP11]]
+; CHECK-NEXT:    br i1 [[TMP12]], label [[PARTWORD_CMPXCHG_LOOP]], label [[PARTWORD_CMPXCHG_END]]
+; CHECK:       partword.cmpxchg.end:
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[TMP9]] to i16
+; CHECK-NEXT:    [[TMP13:%.*]] = insertvalue { i16, i1 } poison, i16 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = insertvalue { i16, i1 } [[TMP13]], i1 [[TMP10]], 1
+; CHECK-NEXT:    [[EXTRACT:%.*]] = extractvalue { i16, i1 } [[TMP14]], 0
+; CHECK-NEXT:    ret i16 [[EXTRACT]]
+;
+  %gep = getelementptr i16, ptr addrspace(1) %out, i64 4
+  %res = cmpxchg ptr addrspace(1) %gep, i16 %old, i16 %in seq_cst seq_cst, align 4
+  %extract = extractvalue {i16, i1} %res, 0
+  ret i16 %extract
+}
+
+define i16 @test_atomicrmw_inc_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP4:%.*]] = add i16 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i16 0, i16 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
+;
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_inc_i16_global_system_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i16_global_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT:    [[TMP2:%.*]] = add i16 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP3]], i16 0, i16 [[TMP2]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED1]]
+;
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_inc_i16_flat_system(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i16_flat_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP4:%.*]] = add i16 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i16 0, i16 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
+;
+  %res = atomicrmw uinc_wrap ptr %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_inc_i16_flat_system_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i16_flat_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT:    [[TMP2:%.*]] = add i16 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP3]], i16 0, i16 [[TMP2]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED1]]
+;
+  %res = atomicrmw uinc_wrap ptr %ptr, i16 %value seq_cst, align 4
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_dec_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i16_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP4:%.*]] = sub i16 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i16 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ugt i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP7]], i16 [[VALUE]], i16 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
+;
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_dec_i16_global_system_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i16_global_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i16 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i16 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i16 [[VALUE]], i16 [[TMP2]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED1]]
+;
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_dec_i16_flat_system(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i16_flat_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP4:%.*]] = sub i16 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i16 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ugt i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP7]], i16 [[VALUE]], i16 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
+;
+  %res = atomicrmw udec_wrap ptr %ptr, i16 %value seq_cst
+  ret i16 %res
+}
+
+define i16 @test_atomicrmw_dec_i16_flat_system_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i16_flat_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i16 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i16 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i16 [[VALUE]], i16 [[TMP2]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT:    ret i16 [[EXTRACTED1]]
+;
+  %res = atomicrmw udec_wrap ptr %ptr, i16 %value seq_cst, align 4
+  ret i16 %res
+}

diff  --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index d8090af143ec09..06b20a3089492b 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -4,8 +4,8 @@
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
-define i16 @test_atomicrmw_xchg_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_xchg_i16_global(
+define i16 @test_atomicrmw_xchg_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_xchg_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -21,7 +21,7 @@ define i16 @test_atomicrmw_xchg_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = or i32 [[TMP5]], [[VALOPERAND_SHIFTED]]
-; CHECK-NEXT:    [[TMP7:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP6]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP6]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -30,12 +30,12 @@ define i16 @test_atomicrmw_xchg_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED]]
 ;
-  %res = atomicrmw xchg ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw xchg ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_xchg_i16_global_align4(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_xchg_i16_global_align4(
+define i16 @test_atomicrmw_xchg_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_xchg_i16_global_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
@@ -43,7 +43,7 @@ define i16 @test_atomicrmw_xchg_i16_global_align4(ptr addrspace(1) %ptr, i16 %va
 ; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
 ; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[LOADED]], -65536
 ; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[TMP4]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[TMP4]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -51,12 +51,12 @@ define i16 @test_atomicrmw_xchg_i16_global_align4(ptr addrspace(1) %ptr, i16 %va
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED]]
 ;
-  %res = atomicrmw xchg ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+  %res = atomicrmw xchg ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_add_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_add_i16_global(
+define i16 @test_atomicrmw_add_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -74,7 +74,7 @@ define i16 @test_atomicrmw_add_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -83,12 +83,12 @@ define i16 @test_atomicrmw_add_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED]]
 ;
-  %res = atomicrmw add ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw add ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_add_i16_global_align4(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_add_i16_global_align4(
+define i16 @test_atomicrmw_add_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i16_global_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
@@ -98,7 +98,7 @@ define i16 @test_atomicrmw_add_i16_global_align4(ptr addrspace(1) %ptr, i16 %val
 ; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[NEW]], 65535
 ; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[LOADED]], -65536
 ; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[TMP5]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[TMP5]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -106,12 +106,12 @@ define i16 @test_atomicrmw_add_i16_global_align4(ptr addrspace(1) %ptr, i16 %val
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED]]
 ;
-  %res = atomicrmw add ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+  %res = atomicrmw add ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_sub_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_i16_global(
+define i16 @test_atomicrmw_sub_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -129,7 +129,7 @@ define i16 @test_atomicrmw_sub_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -138,12 +138,12 @@ define i16 @test_atomicrmw_sub_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED]]
 ;
-  %res = atomicrmw sub ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw sub ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_and_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global(
+define i16 @test_atomicrmw_and_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -159,12 +159,12 @@ define i16 @test_atomicrmw_and_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED]]
 ;
-  %res = atomicrmw and ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_nand_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_nand_i16_global(
+define i16 @test_atomicrmw_nand_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_nand_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -183,7 +183,7 @@ define i16 @test_atomicrmw_nand_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[NEW]], [[MASK]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP6]]
-; CHECK-NEXT:    [[TMP9:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP8]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP8]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP9]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP9]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -192,12 +192,12 @@ define i16 @test_atomicrmw_nand_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED]]
 ;
-  %res = atomicrmw nand ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw nand ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_or_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_or_i16_global(
+define i16 @test_atomicrmw_or_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_or_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -212,12 +212,12 @@ define i16 @test_atomicrmw_or_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED]]
 ;
-  %res = atomicrmw or ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw or ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_xor_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_xor_i16_global(
+define i16 @test_atomicrmw_xor_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_xor_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -232,12 +232,12 @@ define i16 @test_atomicrmw_xor_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED]]
 ;
-  %res = atomicrmw xor ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw xor ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_max_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_max_i16_global(
+define i16 @test_atomicrmw_max_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_max_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -257,7 +257,7 @@ define i16 @test_atomicrmw_max_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -266,12 +266,12 @@ define i16 @test_atomicrmw_max_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
 ;
-  %res = atomicrmw max ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw max ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_min_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_min_i16_global(
+define i16 @test_atomicrmw_min_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_min_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -291,7 +291,7 @@ define i16 @test_atomicrmw_min_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -300,12 +300,12 @@ define i16 @test_atomicrmw_min_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
 ;
-  %res = atomicrmw min ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw min ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_umax_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_umax_i16_global(
+define i16 @test_atomicrmw_umax_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_umax_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -325,7 +325,7 @@ define i16 @test_atomicrmw_umax_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -334,12 +334,12 @@ define i16 @test_atomicrmw_umax_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
 ;
-  %res = atomicrmw umax ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw umax ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_umin_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_umin_i16_global(
+define i16 @test_atomicrmw_umin_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_umin_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -359,7 +359,7 @@ define i16 @test_atomicrmw_umin_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -368,12 +368,12 @@ define i16 @test_atomicrmw_umin_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
 ;
-  %res = atomicrmw umin ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw umin ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_cmpxchg_i16_global(ptr addrspace(1) %out, i16 %in, i16 %old) {
-; CHECK-LABEL: @test_cmpxchg_i16_global(
+define i16 @test_cmpxchg_i16_global_agent(ptr addrspace(1) %out, i16 %in, i16 %old) {
+; CHECK-LABEL: @test_cmpxchg_i16_global_agent(
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i16, ptr addrspace(1) [[OUT:%.*]], i64 4
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[GEP]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[GEP]] to i64
@@ -415,8 +415,8 @@ define i16 @test_cmpxchg_i16_global(ptr addrspace(1) %out, i16 %in, i16 %old) {
   ret i16 %extract
 }
 
-define i16 @test_cmpxchg_i16_global_align4(ptr addrspace(1) %out, i16 %in, i16 %old) {
-; CHECK-LABEL: @test_cmpxchg_i16_global_align4(
+define i16 @test_cmpxchg_i16_global_agent_align4(ptr addrspace(1) %out, i16 %in, i16 %old) {
+; CHECK-LABEL: @test_cmpxchg_i16_global_agent_align4(
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i16, ptr addrspace(1) [[OUT:%.*]], i64 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[IN:%.*]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[OLD:%.*]] to i32
@@ -530,8 +530,8 @@ define i16 @test_atomicrmw_xor_i16_local_align4(ptr addrspace(3) %ptr, i16 %valu
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_inc_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i16_global(
+define i16 @test_atomicrmw_inc_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -552,7 +552,7 @@ define i16 @test_atomicrmw_inc_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -561,12 +561,12 @@ define i16 @test_atomicrmw_inc_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
 ;
-  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_inc_i16_global_align4(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i16_global_align4(
+define i16 @test_atomicrmw_inc_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i16_global_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
@@ -578,7 +578,7 @@ define i16 @test_atomicrmw_inc_i16_global_align4(ptr addrspace(1) %ptr, i16 %val
 ; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
-; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -586,7 +586,7 @@ define i16 @test_atomicrmw_inc_i16_global_align4(ptr addrspace(1) %ptr, i16 %val
 ; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED1]]
 ;
-  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
   ret i16 %res
 }
 
@@ -611,7 +611,7 @@ define i16 @test_atomicrmw_inc_i16_local(ptr addrspace(3) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -620,7 +620,7 @@ define i16 @test_atomicrmw_inc_i16_local(ptr addrspace(3) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
 ;
-  %res = atomicrmw uinc_wrap ptr addrspace(3) %ptr, i16 %value seq_cst
+  %res = atomicrmw uinc_wrap ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
@@ -637,7 +637,7 @@ define i16 @test_atomicrmw_inc_i16_local_align4(ptr addrspace(3) %ptr, i16 %valu
 ; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
-; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -645,12 +645,12 @@ define i16 @test_atomicrmw_inc_i16_local_align4(ptr addrspace(3) %ptr, i16 %valu
 ; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED1]]
 ;
-  %res = atomicrmw uinc_wrap ptr addrspace(3) %ptr, i16 %value seq_cst, align 4
+  %res = atomicrmw uinc_wrap ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst, align 4
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_inc_i16_flat(ptr %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i16_flat(
+define i16 @test_atomicrmw_inc_i16_flat_agent(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i16_flat_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -671,7 +671,7 @@ define i16 @test_atomicrmw_inc_i16_flat(ptr %ptr, i16 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -680,12 +680,12 @@ define i16 @test_atomicrmw_inc_i16_flat(ptr %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
 ;
-  %res = atomicrmw uinc_wrap ptr %ptr, i16 %value seq_cst
+  %res = atomicrmw uinc_wrap ptr %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_inc_i16_flat_align4(ptr %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i16_flat_align4(
+define i16 @test_atomicrmw_inc_i16_flat_agent_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i16_flat_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
@@ -697,7 +697,7 @@ define i16 @test_atomicrmw_inc_i16_flat_align4(ptr %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
-; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -705,12 +705,12 @@ define i16 @test_atomicrmw_inc_i16_flat_align4(ptr %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED1]]
 ;
-  %res = atomicrmw uinc_wrap ptr %ptr, i16 %value seq_cst, align 4
+  %res = atomicrmw uinc_wrap ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_dec_i16_global(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i16_global(
+define i16 @test_atomicrmw_dec_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i16_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -733,7 +733,7 @@ define i16 @test_atomicrmw_dec_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -742,12 +742,12 @@ define i16 @test_atomicrmw_dec_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
 ;
-  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i16 %value seq_cst
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_dec_i16_global_align4(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i16_global_align4(
+define i16 @test_atomicrmw_dec_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i16_global_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
@@ -761,7 +761,7 @@ define i16 @test_atomicrmw_dec_i16_global_align4(ptr addrspace(1) %ptr, i16 %val
 ; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -769,7 +769,7 @@ define i16 @test_atomicrmw_dec_i16_global_align4(ptr addrspace(1) %ptr, i16 %val
 ; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED1]]
 ;
-  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
   ret i16 %res
 }
 
@@ -836,8 +836,8 @@ define i16 @test_atomicrmw_dec_i16_local_align4(ptr addrspace(3) %ptr, i16 %valu
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_dec_i16_flat(ptr %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i16_flat(
+define i16 @test_atomicrmw_dec_i16_flat_agent(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i16_flat_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -860,7 +860,7 @@ define i16 @test_atomicrmw_dec_i16_flat(ptr %ptr, i16 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -869,12 +869,12 @@ define i16 @test_atomicrmw_dec_i16_flat(ptr %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
 ;
-  %res = atomicrmw udec_wrap ptr %ptr, i16 %value seq_cst
+  %res = atomicrmw udec_wrap ptr %ptr, i16 %value syncscope("agent") seq_cst
   ret i16 %res
 }
 
-define i16 @test_atomicrmw_dec_i16_flat_align4(ptr %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i16_flat_align4(
+define i16 @test_atomicrmw_dec_i16_flat_agent_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i16_flat_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
@@ -888,7 +888,7 @@ define i16 @test_atomicrmw_dec_i16_flat_align4(ptr %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -896,6 +896,6 @@ define i16 @test_atomicrmw_dec_i16_flat_align4(ptr %ptr, i16 %value) {
 ; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
 ; CHECK-NEXT:    ret i16 [[EXTRACTED1]]
 ;
-  %res = atomicrmw udec_wrap ptr %ptr, i16 %value seq_cst, align 4
+  %res = atomicrmw udec_wrap ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
   ret i16 %res
 }

diff  --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
new file mode 100644
index 00000000000000..f796d3cca3036f
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
@@ -0,0 +1,816 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
+
+define i8 @test_atomicrmw_xchg_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_xchg_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP6:%.*]] = or i32 [[TMP5]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP7:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP6]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %res = atomicrmw xchg ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_add_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = add i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %res = atomicrmw add ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_add_i8_global_system_align2(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i8_global_system_align2(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = add i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %res = atomicrmw add ptr addrspace(1) %ptr, i8 %value seq_cst, align 2
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_add_i8_global_system_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i8_global_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = add i32 [[LOADED]], [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[NEW]], 255
+; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[TMP5]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %res = atomicrmw add ptr addrspace(1) %ptr, i8 %value seq_cst, align 4
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_sub_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %res = atomicrmw sub ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_and_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[INV_MASK]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] seq_cst, align 4
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %res = atomicrmw and ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_nand_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_nand_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[NEW:%.*]] = xor i32 [[TMP5]], -1
+; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT:    [[TMP7:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[TMP9:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP8]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP9]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %res = atomicrmw nand ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_or_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_or_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %res = atomicrmw or ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_xor_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_xor_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %res = atomicrmw xor ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_max_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_max_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sgt i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP4]], i8 [[EXTRACTED]], i8 [[VALUE]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw max ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_min_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_min_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sle i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP4]], i8 [[EXTRACTED]], i8 [[VALUE]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw min ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_umax_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_umax_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP4]], i8 [[EXTRACTED]], i8 [[VALUE]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw umax ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_umin_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_umin_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ule i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP4]], i8 [[EXTRACTED]], i8 [[VALUE]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw umin ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_cmpxchg_i8_global_system(ptr addrspace(1) %out, i8 %in, i8 %old) {
+; CHECK-LABEL: @test_cmpxchg_i8_global_system(
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT:%.*]], i64 4
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[GEP]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[GEP]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[IN:%.*]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP5:%.*]] = zext i8 [[OLD:%.*]] to i32
+; CHECK-NEXT:    [[TMP6:%.*]] = shl i32 [[TMP5]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = and i32 [[TMP7]], [[INV_MASK]]
+; CHECK-NEXT:    br label [[PARTWORD_CMPXCHG_LOOP:%.*]]
+; CHECK:       partword.cmpxchg.loop:
+; CHECK-NEXT:    [[TMP9:%.*]] = phi i32 [ [[TMP8]], [[TMP0:%.*]] ], [ [[TMP15:%.*]], [[PARTWORD_CMPXCHG_FAILURE:%.*]] ]
+; CHECK-NEXT:    [[TMP10:%.*]] = or i32 [[TMP9]], [[TMP4]]
+; CHECK-NEXT:    [[TMP11:%.*]] = or i32 [[TMP9]], [[TMP6]]
+; CHECK-NEXT:    [[TMP12:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[TMP11]], i32 [[TMP10]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = extractvalue { i32, i1 } [[TMP12]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = extractvalue { i32, i1 } [[TMP12]], 1
+; CHECK-NEXT:    br i1 [[TMP14]], label [[PARTWORD_CMPXCHG_END:%.*]], label [[PARTWORD_CMPXCHG_FAILURE]]
+; CHECK:       partword.cmpxchg.failure:
+; CHECK-NEXT:    [[TMP15]] = and i32 [[TMP13]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP9]], [[TMP15]]
+; CHECK-NEXT:    br i1 [[TMP16]], label [[PARTWORD_CMPXCHG_LOOP]], label [[PARTWORD_CMPXCHG_END]]
+; CHECK:       partword.cmpxchg.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP13]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP17:%.*]] = insertvalue { i8, i1 } poison, i8 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = insertvalue { i8, i1 } [[TMP17]], i1 [[TMP14]], 1
+; CHECK-NEXT:    [[EXTRACT:%.*]] = extractvalue { i8, i1 } [[TMP18]], 0
+; CHECK-NEXT:    ret i8 [[EXTRACT]]
+;
+  %gep = getelementptr i8, ptr addrspace(1) %out, i64 4
+  %res = cmpxchg ptr addrspace(1) %gep, i8 %old, i8 %in seq_cst seq_cst
+  %extract = extractvalue {i8, i1} %res, 0
+  ret i8 %extract
+}
+
+define i8 @test_atomicrmw_inc_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = add i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i8 0, i8 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_inc_i8_global_system_align2(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_global_system_align2(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = add i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i8 0, i8 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i8 %value seq_cst, align 2
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_inc_i8_global_system_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_global_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT:    [[TMP2:%.*]] = add i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP3]], i8 0, i8 [[TMP2]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED1]]
+;
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i8 %value seq_cst, align 4
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_inc_i8_flat_system(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_flat_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = add i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i8 0, i8 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw uinc_wrap ptr %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_inc_i8_flat_system_align2(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_flat_system_align2(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = add i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i8 0, i8 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw uinc_wrap ptr %ptr, i8 %value seq_cst, align 2
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_inc_i8_flat_system_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_flat_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT:    [[TMP2:%.*]] = add i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP3]], i8 0, i8 [[TMP2]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED1]]
+;
+  %res = atomicrmw uinc_wrap ptr %ptr, i8 %value seq_cst, align 4
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_dec_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_global_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = sub i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i8 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ugt i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP7]], i8 [[VALUE]], i8 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_dec_i8_global_system_align2(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_global_system_align2(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = sub i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i8 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ugt i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP7]], i8 [[VALUE]], i8 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i8 %value seq_cst, align 2
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_dec_i8_global_system_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_global_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i8 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i8 [[VALUE]], i8 [[TMP2]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED1]]
+;
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i8 %value seq_cst, align 4
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_dec_i8_flat_system(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_flat_system(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = sub i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i8 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ugt i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP7]], i8 [[VALUE]], i8 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw udec_wrap ptr %ptr, i8 %value seq_cst
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_dec_i8_flat_system_align2(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_flat_system_align2(
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP4:%.*]] = sub i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i8 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ugt i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP7]], i8 [[VALUE]], i8 [[TMP4]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
+;
+  %res = atomicrmw udec_wrap ptr %ptr, i8 %value seq_cst, align 2
+  ret i8 %res
+}
+
+define i8 @test_atomicrmw_dec_i8_flat_system_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_flat_system_align4(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i8 [[EXTRACTED]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i8 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP5]], i8 [[VALUE]], i8 [[TMP2]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT:    ret i8 [[EXTRACTED1]]
+;
+  %res = atomicrmw udec_wrap ptr %ptr, i8 %value seq_cst, align 4
+  ret i8 %res
+}

diff  --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
index 314038438bf831..f8fbd0f79f9e6a 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
@@ -2,8 +2,8 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
 ; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s
 
-define i8 @test_atomicrmw_xchg_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_xchg_i8_global(
+define i8 @test_atomicrmw_xchg_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_xchg_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -19,7 +19,7 @@ define i8 @test_atomicrmw_xchg_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = or i32 [[TMP5]], [[VALOPERAND_SHIFTED]]
-; CHECK-NEXT:    [[TMP7:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP6]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP6]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -28,12 +28,12 @@ define i8 @test_atomicrmw_xchg_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED]]
 ;
-  %res = atomicrmw xchg ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw xchg ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_add_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_add_i8_global(
+define i8 @test_atomicrmw_add_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -51,7 +51,7 @@ define i8 @test_atomicrmw_add_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -60,12 +60,12 @@ define i8 @test_atomicrmw_add_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED]]
 ;
-  %res = atomicrmw add ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw add ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_add_i8_global_align2(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_add_i8_global_align2(
+define i8 @test_atomicrmw_add_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i8_global_agent_align2(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -83,7 +83,7 @@ define i8 @test_atomicrmw_add_i8_global_align2(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -92,12 +92,12 @@ define i8 @test_atomicrmw_add_i8_global_align2(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED]]
 ;
-  %res = atomicrmw add ptr addrspace(1) %ptr, i8 %value seq_cst, align 2
+  %res = atomicrmw add ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_add_i8_global_align4(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_add_i8_global_align4(
+define i8 @test_atomicrmw_add_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_add_i8_global_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[VALUE:%.*]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
@@ -107,7 +107,7 @@ define i8 @test_atomicrmw_add_i8_global_align4(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[NEW]], 255
 ; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[LOADED]], -256
 ; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[TMP5]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[TMP5]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -115,12 +115,12 @@ define i8 @test_atomicrmw_add_i8_global_align4(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED]]
 ;
-  %res = atomicrmw add ptr addrspace(1) %ptr, i8 %value seq_cst, align 4
+  %res = atomicrmw add ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_sub_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_i8_global(
+define i8 @test_atomicrmw_sub_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -138,7 +138,7 @@ define i8 @test_atomicrmw_sub_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -147,12 +147,12 @@ define i8 @test_atomicrmw_sub_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED]]
 ;
-  %res = atomicrmw sub ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_and_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i8_global(
+define i8 @test_atomicrmw_and_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -168,12 +168,12 @@ define i8 @test_atomicrmw_and_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED]]
 ;
-  %res = atomicrmw and ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw and ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_nand_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_nand_i8_global(
+define i8 @test_atomicrmw_nand_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_nand_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -192,7 +192,7 @@ define i8 @test_atomicrmw_nand_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[NEW]], [[MASK]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP6]]
-; CHECK-NEXT:    [[TMP9:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP8]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP8]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP9]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP9]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -201,12 +201,12 @@ define i8 @test_atomicrmw_nand_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED]]
 ;
-  %res = atomicrmw nand ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw nand ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_or_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_or_i8_global(
+define i8 @test_atomicrmw_or_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_or_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -221,12 +221,12 @@ define i8 @test_atomicrmw_or_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED]]
 ;
-  %res = atomicrmw or ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw or ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_xor_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_xor_i8_global(
+define i8 @test_atomicrmw_xor_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_xor_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -241,12 +241,12 @@ define i8 @test_atomicrmw_xor_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED]]
 ;
-  %res = atomicrmw xor ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw xor ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_max_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_max_i8_global(
+define i8 @test_atomicrmw_max_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_max_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -266,7 +266,7 @@ define i8 @test_atomicrmw_max_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -275,12 +275,12 @@ define i8 @test_atomicrmw_max_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw max ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw max ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_min_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_min_i8_global(
+define i8 @test_atomicrmw_min_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_min_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -300,7 +300,7 @@ define i8 @test_atomicrmw_min_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -309,12 +309,12 @@ define i8 @test_atomicrmw_min_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw min ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw min ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_umax_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_umax_i8_global(
+define i8 @test_atomicrmw_umax_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_umax_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -334,7 +334,7 @@ define i8 @test_atomicrmw_umax_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -343,12 +343,12 @@ define i8 @test_atomicrmw_umax_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw umax ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw umax ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_umin_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_umin_i8_global(
+define i8 @test_atomicrmw_umin_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_umin_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -368,7 +368,7 @@ define i8 @test_atomicrmw_umin_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -377,12 +377,12 @@ define i8 @test_atomicrmw_umin_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw umin ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw umin ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_cmpxchg_i8_global(ptr addrspace(1) %out, i8 %in, i8 %old) {
-; CHECK-LABEL: @test_cmpxchg_i8_global(
+define i8 @test_cmpxchg_i8_global_agent(ptr addrspace(1) %out, i8 %in, i8 %old) {
+; CHECK-LABEL: @test_cmpxchg_i8_global_agent(
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT:%.*]], i64 4
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[GEP]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[GEP]] to i64
@@ -402,7 +402,7 @@ define i8 @test_cmpxchg_i8_global(ptr addrspace(1) %out, i8 %in, i8 %old) {
 ; CHECK-NEXT:    [[TMP9:%.*]] = phi i32 [ [[TMP8]], [[TMP0:%.*]] ], [ [[TMP15:%.*]], [[PARTWORD_CMPXCHG_FAILURE:%.*]] ]
 ; CHECK-NEXT:    [[TMP10:%.*]] = or i32 [[TMP9]], [[TMP4]]
 ; CHECK-NEXT:    [[TMP11:%.*]] = or i32 [[TMP9]], [[TMP6]]
-; CHECK-NEXT:    [[TMP12:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[TMP11]], i32 [[TMP10]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[TMP11]], i32 [[TMP10]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[TMP13:%.*]] = extractvalue { i32, i1 } [[TMP12]], 0
 ; CHECK-NEXT:    [[TMP14:%.*]] = extractvalue { i32, i1 } [[TMP12]], 1
 ; CHECK-NEXT:    br i1 [[TMP14]], label [[PARTWORD_CMPXCHG_END:%.*]], label [[PARTWORD_CMPXCHG_FAILURE]]
@@ -419,7 +419,7 @@ define i8 @test_cmpxchg_i8_global(ptr addrspace(1) %out, i8 %in, i8 %old) {
 ; CHECK-NEXT:    ret i8 [[EXTRACT]]
 ;
   %gep = getelementptr i8, ptr addrspace(1) %out, i64 4
-  %res = cmpxchg ptr addrspace(1) %gep, i8 %old, i8 %in seq_cst seq_cst
+  %res = cmpxchg ptr addrspace(1) %gep, i8 %old, i8 %in syncscope("agent") seq_cst seq_cst
   %extract = extractvalue {i8, i1} %res, 0
   ret i8 %extract
 }
@@ -467,8 +467,8 @@ define i8 @test_cmpxchg_i8_local_align2(ptr addrspace(3) %out, i8 %in, i8 %old)
   ret i8 %extract
 }
 
-define i8 @test_atomicrmw_inc_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i8_global(
+define i8 @test_atomicrmw_inc_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -489,7 +489,7 @@ define i8 @test_atomicrmw_inc_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -498,12 +498,12 @@ define i8 @test_atomicrmw_inc_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_inc_i8_global_align2(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i8_global_align2(
+define i8 @test_atomicrmw_inc_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_global_agent_align2(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -524,7 +524,7 @@ define i8 @test_atomicrmw_inc_i8_global_align2(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -533,12 +533,12 @@ define i8 @test_atomicrmw_inc_i8_global_align2(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i8 %value seq_cst, align 2
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_inc_i8_global_align4(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i8_global_align4(
+define i8 @test_atomicrmw_inc_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_global_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
@@ -550,7 +550,7 @@ define i8 @test_atomicrmw_inc_i8_global_align4(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
-; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -558,7 +558,7 @@ define i8 @test_atomicrmw_inc_i8_global_align4(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED1]]
 ;
-  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i8 %value seq_cst, align 4
+  %res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
   ret i8 %res
 }
 
@@ -657,8 +657,8 @@ define i8 @test_atomicrmw_inc_i8_local_align4(ptr addrspace(3) %ptr, i8 %value)
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_inc_i8_flat(ptr %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i8_flat(
+define i8 @test_atomicrmw_inc_i8_flat_agent(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_flat_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -679,7 +679,7 @@ define i8 @test_atomicrmw_inc_i8_flat(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -688,12 +688,12 @@ define i8 @test_atomicrmw_inc_i8_flat(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw uinc_wrap ptr %ptr, i8 %value seq_cst
+  %res = atomicrmw uinc_wrap ptr %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_inc_i8_flat_align2(ptr %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i8_flat_align2(
+define i8 @test_atomicrmw_inc_i8_flat_agent_align2(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_flat_agent_align2(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -714,7 +714,7 @@ define i8 @test_atomicrmw_inc_i8_flat_align2(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -723,12 +723,12 @@ define i8 @test_atomicrmw_inc_i8_flat_align2(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw uinc_wrap ptr %ptr, i8 %value seq_cst, align 2
+  %res = atomicrmw uinc_wrap ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_inc_i8_flat_align4(ptr %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_inc_i8_flat_align4(
+define i8 @test_atomicrmw_inc_i8_flat_agent_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_inc_i8_flat_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
@@ -740,7 +740,7 @@ define i8 @test_atomicrmw_inc_i8_flat_align4(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
-; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -748,12 +748,12 @@ define i8 @test_atomicrmw_inc_i8_flat_align4(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED1]]
 ;
-  %res = atomicrmw uinc_wrap ptr %ptr, i8 %value seq_cst, align 4
+  %res = atomicrmw uinc_wrap ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_dec_i8_global(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i8_global(
+define i8 @test_atomicrmw_dec_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_global_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -776,7 +776,7 @@ define i8 @test_atomicrmw_dec_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -785,12 +785,12 @@ define i8 @test_atomicrmw_dec_i8_global(ptr addrspace(1) %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i8 %value seq_cst
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_dec_i8_global_align2(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i8_global_align2(
+define i8 @test_atomicrmw_dec_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_global_agent_align2(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -813,7 +813,7 @@ define i8 @test_atomicrmw_dec_i8_global_align2(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -822,12 +822,12 @@ define i8 @test_atomicrmw_dec_i8_global_align2(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i8 %value seq_cst, align 2
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_dec_i8_global_align4(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i8_global_align4(
+define i8 @test_atomicrmw_dec_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_global_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
@@ -841,7 +841,7 @@ define i8 @test_atomicrmw_dec_i8_global_align4(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -849,7 +849,7 @@ define i8 @test_atomicrmw_dec_i8_global_align4(ptr addrspace(1) %ptr, i8 %value)
 ; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED1]]
 ;
-  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i8 %value seq_cst, align 4
+  %res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
   ret i8 %res
 }
 
@@ -954,8 +954,8 @@ define i8 @test_atomicrmw_dec_i8_local_align4(ptr addrspace(3) %ptr, i8 %value)
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_dec_i8_flat(ptr %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i8_flat(
+define i8 @test_atomicrmw_dec_i8_flat_agent(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_flat_agent(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -978,7 +978,7 @@ define i8 @test_atomicrmw_dec_i8_flat(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -987,12 +987,12 @@ define i8 @test_atomicrmw_dec_i8_flat(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw udec_wrap ptr %ptr, i8 %value seq_cst
+  %res = atomicrmw udec_wrap ptr %ptr, i8 %value syncscope("agent") seq_cst
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_dec_i8_flat_align2(ptr %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i8_flat_align2(
+define i8 @test_atomicrmw_dec_i8_flat_agent_align2(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_flat_agent_align2(
 ; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
 ; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
 ; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -1015,7 +1015,7 @@ define i8 @test_atomicrmw_dec_i8_flat_align2(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1024,12 +1024,12 @@ define i8 @test_atomicrmw_dec_i8_flat_align2(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED3]]
 ;
-  %res = atomicrmw udec_wrap ptr %ptr, i8 %value seq_cst, align 2
+  %res = atomicrmw udec_wrap ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
   ret i8 %res
 }
 
-define i8 @test_atomicrmw_dec_i8_flat_align4(ptr %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_dec_i8_flat_align4(
+define i8 @test_atomicrmw_dec_i8_flat_agent_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_dec_i8_flat_agent_align4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
@@ -1043,7 +1043,7 @@ define i8 @test_atomicrmw_dec_i8_flat_align4(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
 ; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
 ; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
-; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1051,6 +1051,6 @@ define i8 @test_atomicrmw_dec_i8_flat_align4(ptr %ptr, i8 %value) {
 ; CHECK-NEXT:    [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
 ; CHECK-NEXT:    ret i8 [[EXTRACTED1]]
 ;
-  %res = atomicrmw udec_wrap ptr %ptr, i8 %value seq_cst, align 4
+  %res = atomicrmw udec_wrap ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
   ret i8 %res
 }

diff  --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
index 40e2af57c7e2f2..5fa9dcc4ad9bf0 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll
@@ -10,7 +10,7 @@ define i32 @test_atomicrmw_nand_i32_flat(ptr %ptr, i32 %value) {
 ; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[LOADED]], [[VALUE:%.*]]
 ; CHECK-NEXT:    [[NEW:%.*]] = xor i32 [[TMP2]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst
+; CHECK-NEXT:    [[TMP3:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -29,7 +29,7 @@ define i32 @test_atomicrmw_nand_i32_global(ptr addrspace(1) %ptr, i32 %value) {
 ; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[LOADED]], [[VALUE:%.*]]
 ; CHECK-NEXT:    [[NEW:%.*]] = xor i32 [[TMP2]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst
+; CHECK-NEXT:    [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -48,7 +48,7 @@ define i32 @test_atomicrmw_nand_i32_local(ptr addrspace(3) %ptr, i32 %value) {
 ; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[LOADED]], [[VALUE:%.*]]
 ; CHECK-NEXT:    [[NEW:%.*]] = xor i32 [[TMP2]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst
+; CHECK-NEXT:    [[TMP3:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
 ; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]


        


More information about the llvm-commits mailing list