[clang] 86842e1 - [AMDGPU] New clang option for emitting a waitcnt instruction after each memory instruction (#79236)

via cfe-commits cfe-commits at lists.llvm.org
Wed Apr 10 10:47:10 PDT 2024


Author: Jun Wang
Date: 2024-04-10T10:47:04-07:00
New Revision: 86842e1f724fba5abae50ce438553895e69b8141

URL: https://github.com/llvm/llvm-project/commit/86842e1f724fba5abae50ce438553895e69b8141
DIFF: https://github.com/llvm/llvm-project/commit/86842e1f724fba5abae50ce438553895e69b8141.diff

LOG: [AMDGPU] New clang option for emitting a waitcnt instruction after each memory instruction (#79236)

This patch introduces a new command-line option for clang, namely,
amdgpu-precise-mem-op (or precise-memory in the backend). When this option is specified, a waitcnt
instruction is generated after each memory load/store instruction. The
counter values are always 0, but which counters are involved depends on
the memory instruction.

---------

Co-authored-by: Jun Wang <jun.wang7 at amd.com>

Added: 
    llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll

Modified: 
    clang/include/clang/Driver/Options.td
    clang/lib/Driver/ToolChains/AMDGPU.cpp
    clang/test/Driver/amdgpu-features.c
    llvm/lib/Target/AMDGPU/AMDGPU.td
    llvm/lib/Target/AMDGPU/GCNSubtarget.h
    llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 0a74e6c75f95bb..7ac36222644aac 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -4912,6 +4912,9 @@ defm tgsplit : SimpleMFlag<"tgsplit", "Enable", "Disable",
 defm wavefrontsize64 : SimpleMFlag<"wavefrontsize64",
   "Specify wavefront size 64", "Specify wavefront size 32",
   " mode (AMDGPU only)">;
+defm amdgpu_precise_memory_op
+    : SimpleMFlag<"amdgpu-precise-memory-op", "Enable", "Disable",
+                  " precise memory mode (AMDGPU only)">;
 
 defm unsafe_fp_atomics : BoolMOption<"unsafe-fp-atomics",
   TargetOpts<"AllowAMDGPUUnsafeFPAtomics">, DefaultFalse,

diff  --git a/clang/lib/Driver/ToolChains/AMDGPU.cpp b/clang/lib/Driver/ToolChains/AMDGPU.cpp
index e122379e860e20..4e6362a0f40632 100644
--- a/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -670,6 +670,10 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
                    options::OPT_mno_wavefrontsize64, false))
     Features.push_back("+wavefrontsize64");
 
+  if (Args.hasFlag(options::OPT_mamdgpu_precise_memory_op,
+                   options::OPT_mno_amdgpu_precise_memory_op, false))
+    Features.push_back("+precise-memory");
+
   handleTargetFeaturesGroup(D, Triple, Args, Features,
                             options::OPT_m_amdgpu_Features_Group);
 }

diff  --git a/clang/test/Driver/amdgpu-features.c b/clang/test/Driver/amdgpu-features.c
index a516bc6b7ff200..864744db203e91 100644
--- a/clang/test/Driver/amdgpu-features.c
+++ b/clang/test/Driver/amdgpu-features.c
@@ -32,3 +32,9 @@
 
 // RUN: %clang -### -target amdgcn -mcpu=gfx1010 -mno-cumode %s 2>&1 | FileCheck --check-prefix=NO-CUMODE %s
 // NO-CUMODE: "-target-feature" "-cumode"
+
+// RUN: %clang -### -target amdgcn -mcpu=gfx1010 -mamdgpu-precise-memory-op %s 2>&1 | FileCheck --check-prefix=PREC-MEM %s
+// PREC-MEM: "-target-feature" "+precise-memory"
+
+// RUN: %clang -### -target amdgcn -mcpu=gfx1010 -mno-amdgpu-precise-memory-op %s 2>&1 | FileCheck --check-prefix=NO-PREC-MEM %s
+// NO-PREC-MEM-NOT: {{".*precise-memory"}}

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 37dcfef3b2a3da..9b09550159993c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -168,6 +168,10 @@ def FeatureCuMode : SubtargetFeature<"cumode",
   "Enable CU wavefront execution mode"
 >;
 
+def FeaturePreciseMemory
+    : SubtargetFeature<"precise-memory", "EnablePreciseMemory",
+                       "true", "Enable precise memory mode">;
+
 def FeatureSGPRInitBug : SubtargetFeature<"sgpr-init-bug",
   "SGPRInitBug",
   "true",

diff  --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index e24a18a2842f62..04ff53a6647bd5 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -87,6 +87,7 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
   bool EnableTgSplit = false;
   bool EnableCuMode = false;
   bool TrapHandler = false;
+  bool EnablePreciseMemory = false;
 
   // Used as options.
   bool EnableLoadStoreOpt = false;
@@ -599,6 +600,8 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
     return EnableCuMode;
   }
 
+  bool isPreciseMemoryEnabled() const { return EnablePreciseMemory; }
+
   bool hasFlatAddressSpace() const {
     return FlatAddressSpace;
   }

diff  --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index bb499c5c8c578e..556ec3e231ff19 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -2305,6 +2305,14 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
     }
 #endif
 
+    if (ST->isPreciseMemoryEnabled() && Inst.mayLoadOrStore()) {
+      AMDGPU::Waitcnt Wait = WCG->getAllZeroWaitcnt(
+          Inst.mayStore() && !SIInstrInfo::isAtomicRet(Inst));
+      ScoreBrackets.simplifyWaitcnt(Wait);
+      Modified |= generateWaitcnt(Wait, std::next(Inst.getIterator()), Block,
+                                  ScoreBrackets, /*OldWaitcntInstr=*/nullptr);
+    }
+
     LLVM_DEBUG({
       Inst.print(dbgs());
       ScoreBrackets.dump();

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll b/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll
new file mode 100644
index 00000000000000..df03e893703777
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll
@@ -0,0 +1,1658 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -mattr=+precise-memory < %s | FileCheck %s -check-prefixes=GFX9
+; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -mattr=+precise-memory < %s | FileCheck %s -check-prefixes=GFX90A
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=+precise-memory < %s | FileCheck %s -check-prefixes=GFX10
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -mattr=+enable-flat-scratch,+precise-memory < %s | FileCheck --check-prefixes=GFX9-FLATSCR %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+precise-memory < %s | FileCheck %s -check-prefixes=GFX11
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+precise-memory < %s | FileCheck %s -check-prefixes=GFX12
+
+; from atomicrmw-expand.ll
+; covers flat_load, flat_atomic (atomic with return)
+;
+define void @syncscope_workgroup_nortn(ptr %addr, float %val) {
+; GFX9-LABEL: syncscope_workgroup_nortn:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    flat_load_dword v4, v[0:1]
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB0_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_add_f32_e32 v3, v4, v2
+; GFX9-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB0_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: syncscope_workgroup_nortn:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    flat_load_dword v5, v[0:1]
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    s_mov_b64 s[4:5], 0
+; GFX90A-NEXT:  .LBB0_1: ; %atomicrmw.start
+; GFX90A-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT:    v_add_f32_e32 v4, v5, v2
+; GFX90A-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[4:5] glc
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; GFX90A-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT:    v_mov_b32_e32 v5, v3
+; GFX90A-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT:    s_cbranch_execnz .LBB0_1
+; GFX90A-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: syncscope_workgroup_nortn:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    flat_load_dword v4, v[0:1]
+; GFX10-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_mov_b32 s4, 0
+; GFX10-NEXT:  .LBB0_1: ; %atomicrmw.start
+; GFX10-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT:    v_add_f32_e32 v3, v4, v2
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX10-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    buffer_gl0_inv
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-NEXT:    v_mov_b32_e32 v4, v3
+; GFX10-NEXT:    s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT:    s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT:    s_cbranch_execnz .LBB0_1
+; GFX10-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT:    s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-FLATSCR-LABEL: syncscope_workgroup_nortn:
+; GFX9-FLATSCR:       ; %bb.0:
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    flat_load_dword v4, v[0:1]
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_mov_b64 s[0:1], 0
+; GFX9-FLATSCR-NEXT:  .LBB0_1: ; %atomicrmw.start
+; GFX9-FLATSCR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-FLATSCR-NEXT:    v_add_f32_e32 v3, v4, v2
+; GFX9-FLATSCR-NEXT:    flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-FLATSCR-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v4, v3
+; GFX9-FLATSCR-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; GFX9-FLATSCR-NEXT:    s_cbranch_execnz .LBB0_1
+; GFX9-FLATSCR-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-FLATSCR-NEXT:    s_or_b64 exec, exec, s[0:1]
+; GFX9-FLATSCR-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: syncscope_workgroup_nortn:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    flat_load_b32 v4, v[0:1]
+; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    s_mov_b32 s0, 0
+; GFX11-NEXT:  .LBB0_1: ; %atomicrmw.start
+; GFX11-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_add_f32_e32 v3, v4, v2
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    buffer_gl0_inv
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-NEXT:    v_mov_b32_e32 v4, v3
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT:    s_cbranch_execnz .LBB0_1
+; GFX11-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT:    s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: syncscope_workgroup_nortn:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    flat_load_b32 v4, v[0:1]
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_mov_b32 s0, 0
+; GFX12-NEXT:  .LBB0_1: ; %atomicrmw.start
+; GFX12-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT:    v_add_f32_e32 v3, v4, v2
+; GFX12-NEXT:    s_wait_storecnt 0x0
+; GFX12-NEXT:    flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    global_inv scope:SCOPE_SE
+; GFX12-NEXT:    v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-NEXT:    v_mov_b32_e32 v4, v3
+; GFX12-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT:    s_cbranch_execnz .LBB0_1
+; GFX12-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT:    s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %res = atomicrmw fadd ptr %addr, float %val syncscope("workgroup") seq_cst
+  ret void
+}
+
+; from atomicrmw-nand.ll
+; covers global_atomic (atomic with return), global_load
+;
+define i32 @atomic_nand_i32_global(ptr addrspace(1) %ptr) nounwind {
+; GFX9-LABEL: atomic_nand_i32_global:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v2, v[0:1], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB1_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    v_mov_b32_e32 v3, v2
+; GFX9-NEXT:    v_not_b32_e32 v2, v3
+; GFX9-NEXT:    v_or_b32_e32 v2, -5, v2
+; GFX9-NEXT:    global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB1_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: atomic_nand_i32_global:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    global_load_dword v2, v[0:1], off
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    s_mov_b64 s[4:5], 0
+; GFX90A-NEXT:  .LBB1_1: ; %atomicrmw.start
+; GFX90A-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX90A-NEXT:    v_mov_b32_e32 v3, v2
+; GFX90A-NEXT:    v_not_b32_e32 v2, v3
+; GFX90A-NEXT:    v_or_b32_e32 v2, -5, v2
+; GFX90A-NEXT:    buffer_wbl2
+; GFX90A-NEXT:    global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    buffer_invl2
+; GFX90A-NEXT:    buffer_wbinvl1_vol
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX90A-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX90A-NEXT:    s_cbranch_execnz .LBB1_1
+; GFX90A-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX90A-NEXT:    v_mov_b32_e32 v0, v2
+; GFX90A-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: atomic_nand_i32_global:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    global_load_dword v2, v[0:1], off
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    s_mov_b32 s4, 0
+; GFX10-NEXT:  .LBB1_1: ; %atomicrmw.start
+; GFX10-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT:    v_mov_b32_e32 v3, v2
+; GFX10-NEXT:    v_not_b32_e32 v2, v3
+; GFX10-NEXT:    v_or_b32_e32 v2, -5, v2
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    buffer_gl1_inv
+; GFX10-NEXT:    buffer_gl0_inv
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-NEXT:    s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT:    s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT:    s_cbranch_execnz .LBB1_1
+; GFX10-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT:    s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT:    v_mov_b32_e32 v0, v2
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-FLATSCR-LABEL: atomic_nand_i32_global:
+; GFX9-FLATSCR:       ; %bb.0:
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    global_load_dword v2, v[0:1], off
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_mov_b64 s[0:1], 0
+; GFX9-FLATSCR-NEXT:  .LBB1_1: ; %atomicrmw.start
+; GFX9-FLATSCR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v3, v2
+; GFX9-FLATSCR-NEXT:    v_not_b32_e32 v2, v3
+; GFX9-FLATSCR-NEXT:    v_or_b32_e32 v2, -5, v2
+; GFX9-FLATSCR-NEXT:    global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    buffer_wbinvl1_vol
+; GFX9-FLATSCR-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX9-FLATSCR-NEXT:    s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-FLATSCR-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; GFX9-FLATSCR-NEXT:    s_cbranch_execnz .LBB1_1
+; GFX9-FLATSCR-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-FLATSCR-NEXT:    s_or_b64 exec, exec, s[0:1]
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-FLATSCR-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: atomic_nand_i32_global:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    global_load_b32 v2, v[0:1], off
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    s_mov_b32 s0, 0
+; GFX11-NEXT:  .LBB1_1: ; %atomicrmw.start
+; GFX11-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT:    v_mov_b32_e32 v3, v2
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_not_b32_e32 v2, v3
+; GFX11-NEXT:    v_or_b32_e32 v2, -5, v2
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    buffer_gl1_inv
+; GFX11-NEXT:    buffer_gl0_inv
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT:    s_cbranch_execnz .LBB1_1
+; GFX11-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT:    s_or_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT:    v_mov_b32_e32 v0, v2
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: atomic_nand_i32_global:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    global_load_b32 v2, v[0:1], off
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    s_mov_b32 s0, 0
+; GFX12-NEXT:  .LBB1_1: ; %atomicrmw.start
+; GFX12-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT:    v_mov_b32_e32 v3, v2
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT:    v_not_b32_e32 v2, v3
+; GFX12-NEXT:    v_or_b32_e32 v2, -5, v2
+; GFX12-NEXT:    s_wait_storecnt 0x0
+; GFX12-NEXT:    global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    global_inv scope:SCOPE_SYS
+; GFX12-NEXT:    v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT:    s_cbranch_execnz .LBB1_1
+; GFX12-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX12-NEXT:    s_or_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT:    v_mov_b32_e32 v0, v2
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand ptr addrspace(1) %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+; from call-argument-types.ll
+; covers scratch_load, scratch_store, buffer_load, buffer_store
+;
+declare hidden void @byval_align16_f64_arg(<32 x i32>, ptr addrspace(5) byval(double) align 16)
+define void @tail_call_byval_align16(<32 x i32> %val, double %tmp)  {
+; GFX9-LABEL: tail_call_byval_align16:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:28
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_load_dword v33, off, s[0:3], s32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_getpc_b64 s[16:17]
+; GFX9-NEXT:    s_add_u32 s16, s16, byval_align16_f64_arg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s17, s17, byval_align16_f64_arg at rel32@hi+12
+; GFX9-NEXT:    buffer_store_dword v32, off, s[0:3], s32 offset:20
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:24
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_store_dword v32, off, s[0:3], s32 offset:16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    buffer_store_dword v33, off, s[0:3], s32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[16:17]
+;
+; GFX90A-LABEL: tail_call_byval_align16:
+; GFX90A:       ; %bb.0: ; %entry
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:28
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:24
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    buffer_load_dword v34, off, s[0:3], s32
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    s_getpc_b64 s[16:17]
+; GFX90A-NEXT:    s_add_u32 s16, s16, byval_align16_f64_arg at rel32@lo+4
+; GFX90A-NEXT:    s_addc_u32 s17, s17, byval_align16_f64_arg at rel32@hi+12
+; GFX90A-NEXT:    buffer_store_dword v32, off, s[0:3], s32 offset:20
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    buffer_store_dword v33, off, s[0:3], s32 offset:16
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    buffer_store_dword v34, off, s[0:3], s32
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    s_setpc_b64 s[16:17]
+;
+; GFX10-LABEL: tail_call_byval_align16:
+; GFX10:       ; %bb.0: ; %entry
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:28
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:24
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    buffer_load_dword v34, off, s[0:3], s32
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    s_getpc_b64 s[16:17]
+; GFX10-NEXT:    s_add_u32 s16, s16, byval_align16_f64_arg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s17, s17, byval_align16_f64_arg at rel32@hi+12
+; GFX10-NEXT:    buffer_store_dword v32, off, s[0:3], s32 offset:20
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    buffer_store_dword v33, off, s[0:3], s32 offset:16
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    buffer_store_dword v34, off, s[0:3], s32
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_setpc_b64 s[16:17]
+;
+; GFX9-FLATSCR-LABEL: tail_call_byval_align16:
+; GFX9-FLATSCR:       ; %bb.0: ; %entry
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    scratch_load_dword v32, off, s32
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_getpc_b64 s[0:1]
+; GFX9-FLATSCR-NEXT:    s_add_u32 s0, s0, byval_align16_f64_arg at rel32@lo+4
+; GFX9-FLATSCR-NEXT:    s_addc_u32 s1, s1, byval_align16_f64_arg at rel32@hi+12
+; GFX9-FLATSCR-NEXT:    scratch_store_dword off, v32, s32
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    scratch_load_dwordx2 v[32:33], off, s32 offset:24
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    scratch_store_dwordx2 off, v[32:33], s32 offset:16
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_setpc_b64 s[0:1]
+;
+; GFX11-LABEL: tail_call_byval_align16:
+; GFX11:       ; %bb.0: ; %entry
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    scratch_load_b32 v32, off, s32
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    s_getpc_b64 s[0:1]
+; GFX11-NEXT:    s_add_u32 s0, s0, byval_align16_f64_arg at rel32@lo+4
+; GFX11-NEXT:    s_addc_u32 s1, s1, byval_align16_f64_arg at rel32@hi+12
+; GFX11-NEXT:    scratch_store_b32 off, v32, s32
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    scratch_load_b64 v[32:33], off, s32 offset:24
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    scratch_store_b64 off, v[32:33], s32 offset:16
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    s_setpc_b64 s[0:1]
+;
+; GFX12-LABEL: tail_call_byval_align16:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    scratch_load_b32 v32, off, s32
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    s_getpc_b64 s[0:1]
+; GFX12-NEXT:    s_sext_i32_i16 s1, s1
+; GFX12-NEXT:    s_add_co_u32 s0, s0, byval_align16_f64_arg at rel32@lo+8
+; GFX12-NEXT:    s_add_co_ci_u32 s1, s1, byval_align16_f64_arg at rel32@hi+16
+; GFX12-NEXT:    scratch_store_b32 off, v32, s32
+; GFX12-NEXT:    s_wait_storecnt 0x0
+; GFX12-NEXT:    scratch_load_b64 v[32:33], off, s32 offset:24
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    scratch_store_b64 off, v[32:33], s32 offset:16
+; GFX12-NEXT:    s_wait_storecnt 0x0
+; GFX12-NEXT:    s_setpc_b64 s[0:1]
+entry:
+  %alloca = alloca double, align 8, addrspace(5)
+  tail call void @byval_align16_f64_arg(<32 x i32> %val, ptr addrspace(5) byval(double) align 16 %alloca)
+  ret void
+}
+
+; from udiv.ll
+; covers s_load
+;
+define amdgpu_kernel void @udiv_i32(ptr addrspace(1) %out, i32 %x, i32 %y) {
+; GFX9-LABEL: udiv_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
+; GFX9-NEXT:    s_sub_i32 s4, 0, s3
+; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s5, v0
+; GFX9-NEXT:    s_mul_i32 s4, s4, s5
+; GFX9-NEXT:    s_mul_hi_u32 s4, s5, s4
+; GFX9-NEXT:    s_add_i32 s5, s5, s4
+; GFX9-NEXT:    s_mul_hi_u32 s4, s2, s5
+; GFX9-NEXT:    s_mul_i32 s5, s4, s3
+; GFX9-NEXT:    s_sub_i32 s2, s2, s5
+; GFX9-NEXT:    s_add_i32 s6, s4, 1
+; GFX9-NEXT:    s_sub_i32 s5, s2, s3
+; GFX9-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX9-NEXT:    s_cselect_b32 s4, s6, s4
+; GFX9-NEXT:    s_cselect_b32 s2, s5, s2
+; GFX9-NEXT:    s_add_i32 s5, s4, 1
+; GFX9-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX9-NEXT:    s_cselect_b32 s2, s5, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: udiv_i32:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    v_mov_b32_e32 v1, 0
+; GFX90A-NEXT:    v_cvt_f32_u32_e32 v0, s3
+; GFX90A-NEXT:    s_sub_i32 s4, 0, s3
+; GFX90A-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GFX90A-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GFX90A-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX90A-NEXT:    v_readfirstlane_b32 s5, v0
+; GFX90A-NEXT:    s_mul_i32 s4, s4, s5
+; GFX90A-NEXT:    s_mul_hi_u32 s4, s5, s4
+; GFX90A-NEXT:    s_add_i32 s5, s5, s4
+; GFX90A-NEXT:    s_mul_hi_u32 s4, s2, s5
+; GFX90A-NEXT:    s_mul_i32 s5, s4, s3
+; GFX90A-NEXT:    s_sub_i32 s2, s2, s5
+; GFX90A-NEXT:    s_add_i32 s6, s4, 1
+; GFX90A-NEXT:    s_sub_i32 s5, s2, s3
+; GFX90A-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX90A-NEXT:    s_cselect_b32 s4, s6, s4
+; GFX90A-NEXT:    s_cselect_b32 s2, s5, s2
+; GFX90A-NEXT:    s_add_i32 s5, s4, 1
+; GFX90A-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX90A-NEXT:    s_cselect_b32 s2, s5, s4
+; GFX90A-NEXT:    v_mov_b32_e32 v0, s2
+; GFX90A-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX10-LABEL: udiv_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, s3
+; GFX10-NEXT:    s_sub_i32 s5, 0, s3
+; GFX10-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GFX10-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GFX10-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX10-NEXT:    v_mov_b32_e32 v0, 0
+; GFX10-NEXT:    s_mul_i32 s5, s5, s4
+; GFX10-NEXT:    s_mul_hi_u32 s5, s4, s5
+; GFX10-NEXT:    s_add_i32 s4, s4, s5
+; GFX10-NEXT:    s_mul_hi_u32 s4, s2, s4
+; GFX10-NEXT:    s_mul_i32 s5, s4, s3
+; GFX10-NEXT:    s_sub_i32 s2, s2, s5
+; GFX10-NEXT:    s_add_i32 s5, s4, 1
+; GFX10-NEXT:    s_sub_i32 s6, s2, s3
+; GFX10-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX10-NEXT:    s_cselect_b32 s4, s5, s4
+; GFX10-NEXT:    s_cselect_b32 s2, s6, s2
+; GFX10-NEXT:    s_add_i32 s5, s4, 1
+; GFX10-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX10-NEXT:    s_cselect_b32 s2, s5, s4
+; GFX10-NEXT:    v_mov_b32_e32 v1, s2
+; GFX10-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_endpgm
+;
+; GFX9-FLATSCR-LABEL: udiv_i32:
+; GFX9-FLATSCR:       ; %bb.0:
+; GFX9-FLATSCR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-FLATSCR-NEXT:    v_cvt_f32_u32_e32 v0, s3
+; GFX9-FLATSCR-NEXT:    s_sub_i32 s4, 0, s3
+; GFX9-FLATSCR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GFX9-FLATSCR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GFX9-FLATSCR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX9-FLATSCR-NEXT:    v_readfirstlane_b32 s5, v0
+; GFX9-FLATSCR-NEXT:    s_mul_i32 s4, s4, s5
+; GFX9-FLATSCR-NEXT:    s_mul_hi_u32 s4, s5, s4
+; GFX9-FLATSCR-NEXT:    s_add_i32 s5, s5, s4
+; GFX9-FLATSCR-NEXT:    s_mul_hi_u32 s4, s2, s5
+; GFX9-FLATSCR-NEXT:    s_mul_i32 s5, s4, s3
+; GFX9-FLATSCR-NEXT:    s_sub_i32 s2, s2, s5
+; GFX9-FLATSCR-NEXT:    s_add_i32 s6, s4, 1
+; GFX9-FLATSCR-NEXT:    s_sub_i32 s5, s2, s3
+; GFX9-FLATSCR-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX9-FLATSCR-NEXT:    s_cselect_b32 s4, s6, s4
+; GFX9-FLATSCR-NEXT:    s_cselect_b32 s2, s5, s2
+; GFX9-FLATSCR-NEXT:    s_add_i32 s5, s4, 1
+; GFX9-FLATSCR-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX9-FLATSCR-NEXT:    s_cselect_b32 s2, s5, s4
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-FLATSCR-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_endpgm
+;
+; GFX11-LABEL: udiv_i32:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s3
+; GFX11-NEXT:    s_sub_i32 s5, 0, s3
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GFX11-NEXT:    s_waitcnt_depctr 0xfff
+; GFX11-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GFX11-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX11-NEXT:    v_mov_b32_e32 v0, 0
+; GFX11-NEXT:    s_mul_i32 s5, s5, s4
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_mul_hi_u32 s5, s4, s5
+; GFX11-NEXT:    s_add_i32 s4, s4, s5
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_mul_hi_u32 s4, s2, s4
+; GFX11-NEXT:    s_mul_i32 s5, s4, s3
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT:    s_sub_i32 s2, s2, s5
+; GFX11-NEXT:    s_add_i32 s5, s4, 1
+; GFX11-NEXT:    s_sub_i32 s6, s2, s3
+; GFX11-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX11-NEXT:    s_cselect_b32 s4, s5, s4
+; GFX11-NEXT:    s_cselect_b32 s2, s6, s2
+; GFX11-NEXT:    s_add_i32 s5, s4, 1
+; GFX11-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX11-NEXT:    s_cselect_b32 s2, s5, s4
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT:    v_mov_b32_e32 v1, s2
+; GFX11-NEXT:    global_store_b32 v0, v1, s[0:1]
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    s_endpgm
+;
+; GFX12-LABEL: udiv_i32:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_cvt_f32_u32 s4, s3
+; GFX12-NEXT:    s_sub_co_i32 s5, 0, s3
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_2) | instskip(NEXT) | instid1(TRANS32_DEP_1)
+; GFX12-NEXT:    v_rcp_iflag_f32_e32 v0, s4
+; GFX12-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX12-NEXT:    s_mul_f32 s4, s4, 0x4f7ffffe
+; GFX12-NEXT:    s_cvt_u32_f32 s4, s4
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT:    s_mul_i32 s5, s5, s4
+; GFX12-NEXT:    s_mul_hi_u32 s5, s4, s5
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT:    s_add_co_i32 s4, s4, s5
+; GFX12-NEXT:    s_mul_hi_u32 s4, s2, s4
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT:    s_mul_i32 s5, s4, s3
+; GFX12-NEXT:    s_sub_co_i32 s2, s2, s5
+; GFX12-NEXT:    s_add_co_i32 s5, s4, 1
+; GFX12-NEXT:    s_sub_co_i32 s6, s2, s3
+; GFX12-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX12-NEXT:    s_cselect_b32 s4, s5, s4
+; GFX12-NEXT:    s_cselect_b32 s2, s6, s2
+; GFX12-NEXT:    s_add_co_i32 s5, s4, 1
+; GFX12-NEXT:    s_cmp_ge_u32 s2, s3
+; GFX12-NEXT:    s_cselect_b32 s2, s5, s4
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12-NEXT:    global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT:    s_wait_storecnt 0x0
+; GFX12-NEXT:    s_endpgm
+  %r = udiv i32 %x, %y
+  store i32 %r, ptr addrspace(1) %out
+  ret void
+}
+
+declare float @llvm.amdgcn.s.buffer.load.f32(<4 x i32>, i32, i32)
+
+; from smrd.ll
+; covers s_buffer_load
+;
+define amdgpu_ps float @smrd_sgpr_offset(<4 x i32> inreg %desc, i32 inreg %offset) #0 {
+; GFX9-LABEL: smrd_sgpr_offset:
+; GFX9:       ; %bb.0: ; %main_body
+; GFX9-NEXT:    s_buffer_load_dword s0, s[0:3], s4 offset:0x0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX90A-LABEL: smrd_sgpr_offset:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    s_buffer_load_dword s0, s[0:3], s4 offset:0x0
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    v_mov_b32_e32 v0, s0
+; GFX90A-NEXT:    ; return to shader part epilog
+;
+; GFX10-LABEL: smrd_sgpr_offset:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    s_buffer_load_dword s0, s[0:3], s4 offset:0x0
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX9-FLATSCR-LABEL: smrd_sgpr_offset:
+; GFX9-FLATSCR:       ; %bb.0: ; %main_body
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s11, s5
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s10, s4
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s9, s3
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s8, s2
+; GFX9-FLATSCR-NEXT:    s_buffer_load_dword s0, s[8:11], s6 offset:0x0
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-FLATSCR-NEXT:    ; return to shader part epilog
+;
+; GFX11-LABEL: smrd_sgpr_offset:
+; GFX11:       ; %bb.0: ; %main_body
+; GFX11-NEXT:    s_buffer_load_b32 s0, s[0:3], s4 offset:0x0
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    v_mov_b32_e32 v0, s0
+; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX12-LABEL: smrd_sgpr_offset:
+; GFX12:       ; %bb.0: ; %main_body
+; GFX12-NEXT:    s_buffer_load_b32 s0, s[0:3], s4 offset:0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_mov_b32_e32 v0, s0
+; GFX12-NEXT:    ; return to shader part epilog
+main_body:
+  %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %offset, i32 0)
+  ret float %r
+}
+
+; from atomic_load_add.ll
+; covers s_load, ds_add (atomic without return)
+;
+define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
+; GFX9-LABEL: atomic_add_local:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b64 s[2:3], exec
+; GFX9-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT:    s_cbranch_execz .LBB5_2
+; GFX9-NEXT:  ; %bb.1:
+; GFX9-NEXT:    s_load_dword s0, s[0:1], 0x24
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_bcnt1_i32_b64 s1, s[2:3]
+; GFX9-NEXT:    s_mul_i32 s1, s1, 5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    ds_add_u32 v0, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:  .LBB5_2:
+; GFX9-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: atomic_add_local:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_mov_b64 s[2:3], exec
+; GFX90A-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT:    s_cbranch_execz .LBB5_2
+; GFX90A-NEXT:  ; %bb.1:
+; GFX90A-NEXT:    s_load_dword s0, s[0:1], 0x24
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT:    s_mul_i32 s1, s1, 5
+; GFX90A-NEXT:    v_mov_b32_e32 v1, s1
+; GFX90A-NEXT:    v_mov_b32_e32 v0, s0
+; GFX90A-NEXT:    ds_add_u32 v0, v1
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:  .LBB5_2:
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX10-LABEL: atomic_add_local:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_mov_b32 s2, exec_lo
+; GFX10-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT:    s_and_saveexec_b32 s3, vcc_lo
+; GFX10-NEXT:    s_cbranch_execz .LBB5_2
+; GFX10-NEXT:  ; %bb.1:
+; GFX10-NEXT:    s_load_dword s0, s[0:1], 0x24
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_bcnt1_i32_b32 s1, s2
+; GFX10-NEXT:    s_mul_i32 s1, s1, 5
+; GFX10-NEXT:    v_mov_b32_e32 v1, s1
+; GFX10-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-NEXT:    ds_add_u32 v0, v1
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    buffer_gl0_inv
+; GFX10-NEXT:  .LBB5_2:
+; GFX10-NEXT:    s_endpgm
+;
+; GFX9-FLATSCR-LABEL: atomic_add_local:
+; GFX9-FLATSCR:       ; %bb.0:
+; GFX9-FLATSCR-NEXT:    s_mov_b64 s[2:3], exec
+; GFX9-FLATSCR-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-FLATSCR-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-FLATSCR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-FLATSCR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX9-FLATSCR-NEXT:    s_cbranch_execz .LBB5_2
+; GFX9-FLATSCR-NEXT:  ; %bb.1:
+; GFX9-FLATSCR-NEXT:    s_load_dword s0, s[0:1], 0x24
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_bcnt1_i32_b64 s1, s[2:3]
+; GFX9-FLATSCR-NEXT:    s_mul_i32 s1, s1, 5
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-FLATSCR-NEXT:    ds_add_u32 v0, v1
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:  .LBB5_2:
+; GFX9-FLATSCR-NEXT:    s_endpgm
+;
+; GFX11-LABEL: atomic_add_local:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_mov_b32 s2, exec_lo
+; GFX11-NEXT:    s_mov_b32 s3, exec_lo
+; GFX11-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX11-NEXT:    s_cbranch_execz .LBB5_2
+; GFX11-NEXT:  ; %bb.1:
+; GFX11-NEXT:    s_load_b32 s0, s[0:1], 0x24
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    s_bcnt1_i32_b32 s1, s2
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_mul_i32 s1, s1, 5
+; GFX11-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT:    ds_add_u32 v0, v1
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    buffer_gl0_inv
+; GFX11-NEXT:  .LBB5_2:
+; GFX11-NEXT:    s_endpgm
+;
+; GFX12-LABEL: atomic_add_local:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_mov_b32 s2, exec_lo
+; GFX12-NEXT:    s_mov_b32 s3, exec_lo
+; GFX12-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX12-NEXT:    s_cbranch_execz .LBB5_2
+; GFX12-NEXT:  ; %bb.1:
+; GFX12-NEXT:    s_load_b32 s0, s[0:1], 0x24
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_bcnt1_i32_b32 s1, s2
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT:    s_mul_i32 s1, s1, 5
+; GFX12-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
+; GFX12-NEXT:    ds_add_u32 v0, v1
+; GFX12-NEXT:    s_wait_dscnt 0x0
+; GFX12-NEXT:    global_inv scope:SCOPE_SE
+; GFX12-NEXT:  .LBB5_2:
+; GFX12-NEXT:    s_endpgm
+   %unused = atomicrmw volatile add ptr addrspace(3) %local, i32 5 seq_cst
+   ret void
+}
+
+; from flat_atomics_i32_system.ll
+; covers flat_atomic_swap (atomic without return)
+;
+define void @flat_atomic_xchg_i32_noret(ptr %ptr, i32 %in) {
+; GFX9-LABEL: flat_atomic_xchg_i32_noret:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    flat_atomic_swap v[0:1], v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    buffer_wbinvl1_vol
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: flat_atomic_xchg_i32_noret:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    buffer_wbl2
+; GFX90A-NEXT:    flat_atomic_swap v[0:1], v2
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    buffer_invl2
+; GFX90A-NEXT:    buffer_wbinvl1_vol
+; GFX90A-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: flat_atomic_xchg_i32_noret:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    flat_atomic_swap v[0:1], v2
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    buffer_gl1_inv
+; GFX10-NEXT:    buffer_gl0_inv
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-FLATSCR-LABEL: flat_atomic_xchg_i32_noret:
+; GFX9-FLATSCR:       ; %bb.0:
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    flat_atomic_swap v[0:1], v2
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    buffer_wbinvl1_vol
+; GFX9-FLATSCR-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: flat_atomic_xchg_i32_noret:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    flat_atomic_swap_b32 v[0:1], v2
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    buffer_gl1_inv
+; GFX11-NEXT:    buffer_gl0_inv
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: flat_atomic_xchg_i32_noret:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_wait_storecnt 0x0
+; GFX12-NEXT:    flat_atomic_swap_b32 v[0:1], v2
+; GFX12-NEXT:    s_wait_storecnt_dscnt 0x0
+; GFX12-NEXT:    global_inv scope:SCOPE_SYS
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %tmp0 = atomicrmw xchg ptr %ptr, i32 %in seq_cst
+  ret void
+}
+
+; from atomic_load_add.ll
+; covers s_load, ds_add_rtn (atomic with return)
+;
+define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrspace(3) %local) {
+; GFX9-LABEL: atomic_add_ret_local:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX9-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT:    ; implicit-def: $vgpr1
+; GFX9-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT:    s_cbranch_execz .LBB7_2
+; GFX9-NEXT:  ; %bb.1:
+; GFX9-NEXT:    s_load_dword s6, s[0:1], 0x2c
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9-NEXT:    s_mul_i32 s4, s4, 5
+; GFX9-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-NEXT:    ds_add_rtn_u32 v1, v1, v2
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:  .LBB7_2:
+; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX9-NEXT:    global_store_dword v2, v0, s[0:1]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: atomic_add_ret_local:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_mov_b64 s[4:5], exec
+; GFX90A-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX90A-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT:    ; implicit-def: $vgpr1
+; GFX90A-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX90A-NEXT:    s_cbranch_execz .LBB7_2
+; GFX90A-NEXT:  ; %bb.1:
+; GFX90A-NEXT:    s_load_dword s6, s[0:1], 0x2c
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX90A-NEXT:    s_mul_i32 s4, s4, 5
+; GFX90A-NEXT:    v_mov_b32_e32 v2, s4
+; GFX90A-NEXT:    v_mov_b32_e32 v1, s6
+; GFX90A-NEXT:    ds_add_rtn_u32 v1, v1, v2
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:  .LBB7_2:
+; GFX90A-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX90A-NEXT:    v_mov_b32_e32 v2, 0
+; GFX90A-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX90A-NEXT:    global_store_dword v2, v0, s[0:1]
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX10-LABEL: atomic_add_ret_local:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_mov_b32 s3, exec_lo
+; GFX10-NEXT:    ; implicit-def: $vgpr1
+; GFX10-NEXT:    v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT:    s_and_saveexec_b32 s2, vcc_lo
+; GFX10-NEXT:    s_cbranch_execz .LBB7_2
+; GFX10-NEXT:  ; %bb.1:
+; GFX10-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX10-NEXT:    s_mul_i32 s3, s3, 5
+; GFX10-NEXT:    v_mov_b32_e32 v2, s3
+; GFX10-NEXT:    v_mov_b32_e32 v1, s4
+; GFX10-NEXT:    ds_add_rtn_u32 v1, v1, v2
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    buffer_gl0_inv
+; GFX10-NEXT:  .LBB7_2:
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX10-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX10-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_endpgm
+;
+; GFX9-FLATSCR-LABEL: atomic_add_ret_local:
+; GFX9-FLATSCR:       ; %bb.0:
+; GFX9-FLATSCR-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9-FLATSCR-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX9-FLATSCR-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX9-FLATSCR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-FLATSCR-NEXT:    ; implicit-def: $vgpr1
+; GFX9-FLATSCR-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX9-FLATSCR-NEXT:    s_cbranch_execz .LBB7_2
+; GFX9-FLATSCR-NEXT:  ; %bb.1:
+; GFX9-FLATSCR-NEXT:    s_load_dword s6, s[0:1], 0x2c
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9-FLATSCR-NEXT:    s_mul_i32 s4, s4, 5
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v1, s6
+; GFX9-FLATSCR-NEXT:    ds_add_rtn_u32 v1, v1, v2
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:  .LBB7_2:
+; GFX9-FLATSCR-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9-FLATSCR-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-FLATSCR-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX9-FLATSCR-NEXT:    global_store_dword v2, v0, s[0:1]
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_endpgm
+;
+; GFX11-LABEL: atomic_add_ret_local:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_mov_b32 s3, exec_lo
+; GFX11-NEXT:    s_mov_b32 s2, exec_lo
+; GFX11-NEXT:    v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX11-NEXT:    ; implicit-def: $vgpr1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX11-NEXT:    s_cbranch_execz .LBB7_2
+; GFX11-NEXT:  ; %bb.1:
+; GFX11-NEXT:    s_load_b32 s4, s[0:1], 0x2c
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_mul_i32 s3, s3, 5
+; GFX11-NEXT:    v_dual_mov_b32 v2, s3 :: v_dual_mov_b32 v1, s4
+; GFX11-NEXT:    ds_add_rtn_u32 v1, v1, v2
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    buffer_gl0_inv
+; GFX11-NEXT:  .LBB7_2:
+; GFX11-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX11-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    s_endpgm
+;
+; GFX12-LABEL: atomic_add_ret_local:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_mov_b32 s3, exec_lo
+; GFX12-NEXT:    s_mov_b32 s2, exec_lo
+; GFX12-NEXT:    v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX12-NEXT:    ; implicit-def: $vgpr1
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX12-NEXT:    s_cbranch_execz .LBB7_2
+; GFX12-NEXT:  ; %bb.1:
+; GFX12-NEXT:    s_load_b32 s4, s[0:1], 0x2c
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT:    s_mul_i32 s3, s3, 5
+; GFX12-NEXT:    v_dual_mov_b32 v2, s3 :: v_dual_mov_b32 v1, s4
+; GFX12-NEXT:    ds_add_rtn_u32 v1, v1, v2
+; GFX12-NEXT:    s_wait_dscnt 0x0
+; GFX12-NEXT:    global_inv scope:SCOPE_SE
+; GFX12-NEXT:  .LBB7_2:
+; GFX12-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX12-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX12-NEXT:    v_mov_b32_e32 v1, 0
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX12-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX12-NEXT:    s_wait_storecnt 0x0
+; GFX12-NEXT:    s_endpgm
+  %val = atomicrmw volatile add ptr addrspace(3) %local, i32 5 seq_cst
+  store i32 %val, ptr addrspace(1) %out
+  ret void
+}
+
+declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.add(i32, ptr addrspace(8), i32, i32, i32 immarg)
+
+; from atomic_optimizations_buffer.ll
+; covers buffer_atomic (atomic with return)
+;
+define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out, ptr addrspace(8) %inout) {
+; GFX9-LABEL: add_i32_constant:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX9-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT:    ; implicit-def: $vgpr1
+; GFX9-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT:    s_cbranch_execz .LBB8_2
+; GFX9-NEXT:  ; %bb.1:
+; GFX9-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9-NEXT:    s_mul_i32 s4, s4, 5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    buffer_atomic_add v1, off, s[8:11], 0 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:  .LBB8_2:
+; GFX9-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX9-NEXT:    global_store_dword v2, v0, s[0:1]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: add_i32_constant:
+; GFX90A:       ; %bb.0: ; %entry
+; GFX90A-NEXT:    s_mov_b64 s[4:5], exec
+; GFX90A-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX90A-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT:    ; implicit-def: $vgpr1
+; GFX90A-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX90A-NEXT:    s_cbranch_execz .LBB8_2
+; GFX90A-NEXT:  ; %bb.1:
+; GFX90A-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x34
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX90A-NEXT:    s_mul_i32 s4, s4, 5
+; GFX90A-NEXT:    v_mov_b32_e32 v1, s4
+; GFX90A-NEXT:    buffer_atomic_add v1, off, s[8:11], 0 glc
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:  .LBB8_2:
+; GFX90A-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX90A-NEXT:    v_mov_b32_e32 v2, 0
+; GFX90A-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX90A-NEXT:    global_store_dword v2, v0, s[0:1]
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX10-LABEL: add_i32_constant:
+; GFX10:       ; %bb.0: ; %entry
+; GFX10-NEXT:    s_mov_b32 s3, exec_lo
+; GFX10-NEXT:    ; implicit-def: $vgpr1
+; GFX10-NEXT:    v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT:    s_and_saveexec_b32 s2, vcc_lo
+; GFX10-NEXT:    s_cbranch_execz .LBB8_2
+; GFX10-NEXT:  ; %bb.1:
+; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX10-NEXT:    s_mul_i32 s3, s3, 5
+; GFX10-NEXT:    v_mov_b32_e32 v1, s3
+; GFX10-NEXT:    buffer_atomic_add v1, off, s[4:7], 0 glc
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:  .LBB8_2:
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX10-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX10-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_endpgm
+;
+; GFX9-FLATSCR-LABEL: add_i32_constant:
+; GFX9-FLATSCR:       ; %bb.0: ; %entry
+; GFX9-FLATSCR-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9-FLATSCR-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX9-FLATSCR-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX9-FLATSCR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-FLATSCR-NEXT:    ; implicit-def: $vgpr1
+; GFX9-FLATSCR-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX9-FLATSCR-NEXT:    s_cbranch_execz .LBB8_2
+; GFX9-FLATSCR-NEXT:  ; %bb.1:
+; GFX9-FLATSCR-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x34
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9-FLATSCR-NEXT:    s_mul_i32 s4, s4, 5
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-FLATSCR-NEXT:    buffer_atomic_add v1, off, s[8:11], 0 glc
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:  .LBB8_2:
+; GFX9-FLATSCR-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9-FLATSCR-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-FLATSCR-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX9-FLATSCR-NEXT:    global_store_dword v2, v0, s[0:1]
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_endpgm
+;
+; GFX11-LABEL: add_i32_constant:
+; GFX11:       ; %bb.0: ; %entry
+; GFX11-NEXT:    s_mov_b32 s3, exec_lo
+; GFX11-NEXT:    s_mov_b32 s2, exec_lo
+; GFX11-NEXT:    v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX11-NEXT:    ; implicit-def: $vgpr1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX11-NEXT:    s_cbranch_execz .LBB8_2
+; GFX11-NEXT:  ; %bb.1:
+; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x34
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_mul_i32 s3, s3, 5
+; GFX11-NEXT:    v_mov_b32_e32 v1, s3
+; GFX11-NEXT:    buffer_atomic_add_u32 v1, off, s[4:7], 0 glc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:  .LBB8_2:
+; GFX11-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX11-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    s_endpgm
+;
+; GFX12-LABEL: add_i32_constant:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    s_mov_b32 s3, exec_lo
+; GFX12-NEXT:    s_mov_b32 s2, exec_lo
+; GFX12-NEXT:    v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX12-NEXT:    ; implicit-def: $vgpr1
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX12-NEXT:    s_cbranch_execz .LBB8_2
+; GFX12-NEXT:  ; %bb.1:
+; GFX12-NEXT:    s_load_b128 s[4:7], s[0:1], 0x34
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT:    s_mul_i32 s3, s3, 5
+; GFX12-NEXT:    v_mov_b32_e32 v1, s3
+; GFX12-NEXT:    buffer_atomic_add_u32 v1, off, s[4:7], null th:TH_ATOMIC_RETURN
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:  .LBB8_2:
+; GFX12-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX12-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX12-NEXT:    v_mov_b32_e32 v1, 0
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
+; GFX12-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX12-NEXT:    s_wait_storecnt 0x0
+; GFX12-NEXT:    s_endpgm
+entry:
+  %old = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.add(i32 5, ptr addrspace(8) %inout, i32 0, i32 0, i32 0)
+  store i32 %old, ptr addrspace(1) %out
+  ret void
+}
+
+declare <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32, i16, <8 x i32>, i32, i32)
+
+; from llvm.amdgcn.image.load.a16.ll
+; covers image_load
+;
+define amdgpu_ps <4 x float> @load.f32.1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) {
+; GFX9-LABEL: load.f32.1d:
+; GFX9:       ; %bb.0: ; %main_body
+; GFX9-NEXT:    image_load v0, v0, s[0:7] dmask:0x1 unorm a16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX90A-LABEL: load.f32.1d:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    image_load v0, v0, s[0:7] dmask:0x1 unorm a16
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    ; return to shader part epilog
+;
+; GFX10-LABEL: load.f32.1d:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    image_load v0, v0, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm a16
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX9-FLATSCR-LABEL: load.f32.1d:
+; GFX9-FLATSCR:       ; %bb.0: ; %main_body
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s11, s9
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s10, s8
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s9, s7
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s8, s6
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s7, s5
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s6, s4
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s5, s3
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s4, s2
+; GFX9-FLATSCR-NEXT:    image_load v0, v0, s[4:11] dmask:0x1 unorm a16
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    ; return to shader part epilog
+;
+; GFX11-LABEL: load.f32.1d:
+; GFX11:       ; %bb.0: ; %main_body
+; GFX11-NEXT:    image_load v0, v0, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm a16
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX12-LABEL: load.f32.1d:
+; GFX12:       ; %bb.0: ; %main_body
+; GFX12-NEXT:    image_load v0, v0, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D a16
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    ; return to shader part epilog
+main_body:
+  %x = extractelement <2 x i16> %coords, i32 0
+  %v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 1, i16 %x, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+declare void @llvm.amdgcn.image.store.1d.v4f32.i16(<4 x float>, i32, i16, <8 x i32>, i32, i32)
+
+; from llvm.amdgcn.image.store.a16.ll
+; covers image_store
+;
+define amdgpu_ps void @store_f32_1d(<8 x i32> inreg %rsrc, <2 x i16> %coords, <4 x float> %val) {
+; GFX9-LABEL: store_f32_1d:
+; GFX9:       ; %bb.0: ; %main_body
+; GFX9-NEXT:    image_store v[1:4], v0, s[0:7] dmask:0x1 unorm a16
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: store_f32_1d:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    v_mov_b32_e32 v5, v4
+; GFX90A-NEXT:    v_mov_b32_e32 v4, v3
+; GFX90A-NEXT:    v_mov_b32_e32 v3, v2
+; GFX90A-NEXT:    v_mov_b32_e32 v2, v1
+; GFX90A-NEXT:    image_store v[2:5], v0, s[0:7] dmask:0x1 unorm a16
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX10-LABEL: store_f32_1d:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    image_store v[1:4], v0, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm a16
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_endpgm
+;
+; GFX9-FLATSCR-LABEL: store_f32_1d:
+; GFX9-FLATSCR:       ; %bb.0: ; %main_body
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s11, s9
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s10, s8
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s9, s7
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s8, s6
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s7, s5
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s6, s4
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s5, s3
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s4, s2
+; GFX9-FLATSCR-NEXT:    image_store v[1:4], v0, s[4:11] dmask:0x1 unorm a16
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_endpgm
+;
+; GFX11-LABEL: store_f32_1d:
+; GFX11:       ; %bb.0: ; %main_body
+; GFX11-NEXT:    image_store v[1:4], v0, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm a16
+; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT:    s_endpgm
+;
+; GFX12-LABEL: store_f32_1d:
+; GFX12:       ; %bb.0: ; %main_body
+; GFX12-NEXT:    image_store v[1:4], v0, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D a16
+; GFX12-NEXT:    s_wait_storecnt 0x0
+; GFX12-NEXT:    s_endpgm
+
+main_body:
+  %x = extractelement <2 x i16> %coords, i32 0
+  call void @llvm.amdgcn.image.store.1d.v4f32.i16(<4 x float> %val, i32 1, i16 %x, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+declare i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i32(i32, i32, <8 x i32>, i32, i32)
+
+; from llvm.amdgcn.image.atomic.dim.ll
+; covers image_atomic (atomic with return)
+;
+define amdgpu_ps float @atomic_swap_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) {
+; GFX9-LABEL: atomic_swap_1d:
+; GFX9:       ; %bb.0: ; %main_body
+; GFX9-NEXT:    image_atomic_swap v0, v1, s[0:7] dmask:0x1 unorm glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX90A-LABEL: atomic_swap_1d:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    v_mov_b32_e32 v2, v1
+; GFX90A-NEXT:    image_atomic_swap v0, v2, s[0:7] dmask:0x1 unorm glc
+; GFX90A-NEXT:    s_waitcnt vmcnt(0)
+; GFX90A-NEXT:    ; return to shader part epilog
+;
+; GFX10-LABEL: atomic_swap_1d:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm glc
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX9-FLATSCR-LABEL: atomic_swap_1d:
+; GFX9-FLATSCR:       ; %bb.0: ; %main_body
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s11, s9
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s10, s8
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s9, s7
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s8, s6
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s7, s5
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s6, s4
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s5, s3
+; GFX9-FLATSCR-NEXT:    s_mov_b32 s4, s2
+; GFX9-FLATSCR-NEXT:    image_atomic_swap v0, v1, s[4:11] dmask:0x1 unorm glc
+; GFX9-FLATSCR-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT:    ; return to shader part epilog
+;
+; GFX11-LABEL: atomic_swap_1d:
+; GFX11:       ; %bb.0: ; %main_body
+; GFX11-NEXT:    image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm glc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX12-LABEL: atomic_swap_1d:
+; GFX12:       ; %bb.0: ; %main_body
+; GFX12-NEXT:    image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    ; return to shader part epilog
+main_body:
+  %v = call i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
+  %out = bitcast i32 %v to float
+  ret float %out
+}
+
+; from lds-bounds.ll
+; covers ds_write_b64 (atomic without return)
+ at compute_lds = external addrspace(3) global [512 x i32], align 16
+;
+define amdgpu_cs void @store_aligned(ptr addrspace(3) %ptr) #0 {
+; GFX9-LABEL: store_aligned:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    v_mov_b32_e32 v1, 42
+; GFX9-NEXT:    v_mov_b32_e32 v2, 43
+; GFX9-NEXT:    ds_write_b64 v0, v[1:2]
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: store_aligned:
+; GFX90A:       ; %bb.0: ; %entry
+; GFX90A-NEXT:    v_mov_b32_e32 v2, 42
+; GFX90A-NEXT:    v_mov_b32_e32 v3, 43
+; GFX90A-NEXT:    ds_write_b64 v0, v[2:3]
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX10-LABEL: store_aligned:
+; GFX10:       ; %bb.0: ; %entry
+; GFX10-NEXT:    v_mov_b32_e32 v1, 42
+; GFX10-NEXT:    v_mov_b32_e32 v2, 43
+; GFX10-NEXT:    ds_write_b64 v0, v[1:2]
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_endpgm
+;
+; GFX9-FLATSCR-LABEL: store_aligned:
+; GFX9-FLATSCR:       ; %bb.0: ; %entry
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v1, 42
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v2, 43
+; GFX9-FLATSCR-NEXT:    ds_write_b64 v0, v[1:2]
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_endpgm
+;
+; GFX11-LABEL: store_aligned:
+; GFX11:       ; %bb.0: ; %entry
+; GFX11-NEXT:    v_dual_mov_b32 v1, 42 :: v_dual_mov_b32 v2, 43
+; GFX11-NEXT:    ds_store_b64 v0, v[1:2]
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    s_endpgm
+;
+; GFX12-LABEL: store_aligned:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    v_dual_mov_b32 v1, 42 :: v_dual_mov_b32 v2, 43
+; GFX12-NEXT:    ds_store_b64 v0, v[1:2]
+; GFX12-NEXT:    s_wait_dscnt 0x0
+; GFX12-NEXT:    s_endpgm
+entry:
+  %ptr.gep.1 = getelementptr i32, ptr addrspace(3) %ptr, i32 1
+
+  store i32 42, ptr addrspace(3) %ptr, align 8
+  store i32 43, ptr addrspace(3) %ptr.gep.1
+  ret void
+}
+
+
+; from lds-bounds.ll
+; covers ds_read_b64
+;
+define amdgpu_cs <2 x float> @load_aligned(ptr addrspace(3) %ptr) #0 {
+; GFX9-LABEL: load_aligned:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    ds_read_b64 v[0:1], v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX90A-LABEL: load_aligned:
+; GFX90A:       ; %bb.0: ; %entry
+; GFX90A-NEXT:    ds_read_b64 v[0:1], v0
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    ; return to shader part epilog
+;
+; GFX10-LABEL: load_aligned:
+; GFX10:       ; %bb.0: ; %entry
+; GFX10-NEXT:    ds_read_b64 v[0:1], v0
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX9-FLATSCR-LABEL: load_aligned:
+; GFX9-FLATSCR:       ; %bb.0: ; %entry
+; GFX9-FLATSCR-NEXT:    ds_read_b64 v[0:1], v0
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    ; return to shader part epilog
+;
+; GFX11-LABEL: load_aligned:
+; GFX11:       ; %bb.0: ; %entry
+; GFX11-NEXT:    ds_load_b64 v[0:1], v0
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX12-LABEL: load_aligned:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    ds_load_b64 v[0:1], v0
+; GFX12-NEXT:    s_wait_dscnt 0x0
+; GFX12-NEXT:    ; return to shader part epilog
+entry:
+  %ptr.gep.1 = getelementptr i32, ptr addrspace(3) %ptr, i32 1
+
+  %v.0 = load i32, ptr addrspace(3) %ptr, align 8
+  %v.1 = load i32, ptr addrspace(3) %ptr.gep.1
+
+  %r.0 = insertelement <2 x i32> poison, i32 %v.0, i32 0
+  %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+  %bc = bitcast <2 x i32> %r.1 to <2 x float>
+  ret <2 x float> %bc
+}
+
+; from lds-bounds.ll
+; covers ds_write2_b32
+;
+define amdgpu_cs void @store_global_const_idx() #0 {
+; GFX9-LABEL: store_global_const_idx:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX9-NEXT:    v_mov_b32_e32 v1, 42
+; GFX9-NEXT:    v_mov_b32_e32 v2, 43
+; GFX9-NEXT:    ds_write2_b32 v0, v1, v2 offset0:3 offset1:4
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: store_global_const_idx:
+; GFX90A:       ; %bb.0: ; %entry
+; GFX90A-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX90A-NEXT:    v_mov_b32_e32 v1, 42
+; GFX90A-NEXT:    v_mov_b32_e32 v2, 43
+; GFX90A-NEXT:    ds_write2_b32 v0, v1, v2 offset0:3 offset1:4
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX10-LABEL: store_global_const_idx:
+; GFX10:       ; %bb.0: ; %entry
+; GFX10-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX10-NEXT:    v_mov_b32_e32 v1, 42
+; GFX10-NEXT:    v_mov_b32_e32 v2, 43
+; GFX10-NEXT:    ds_write2_b32 v0, v1, v2 offset0:3 offset1:4
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_endpgm
+;
+; GFX9-FLATSCR-LABEL: store_global_const_idx:
+; GFX9-FLATSCR:       ; %bb.0: ; %entry
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v1, 42
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v2, 43
+; GFX9-FLATSCR-NEXT:    ds_write2_b32 v0, v1, v2 offset0:3 offset1:4
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    s_endpgm
+;
+; GFX11-LABEL: store_global_const_idx:
+; GFX11:       ; %bb.0: ; %entry
+; GFX11-NEXT:    v_dual_mov_b32 v0, compute_lds at abs32@lo :: v_dual_mov_b32 v1, 42
+; GFX11-NEXT:    v_mov_b32_e32 v2, 43
+; GFX11-NEXT:    ds_store_2addr_b32 v0, v1, v2 offset0:3 offset1:4
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    s_endpgm
+;
+; GFX12-LABEL: store_global_const_idx:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    v_dual_mov_b32 v0, compute_lds at abs32@lo :: v_dual_mov_b32 v1, 42
+; GFX12-NEXT:    v_mov_b32_e32 v2, 43
+; GFX12-NEXT:    ds_store_2addr_b32 v0, v1, v2 offset0:3 offset1:4
+; GFX12-NEXT:    s_wait_dscnt 0x0
+; GFX12-NEXT:    s_endpgm
+entry:
+  %ptr.a = getelementptr [512 x i32], ptr addrspace(3) @compute_lds, i32 0, i32 3
+  %ptr.b = getelementptr [512 x i32], ptr addrspace(3) @compute_lds, i32 0, i32 4
+
+  store i32 42, ptr addrspace(3) %ptr.a
+  store i32 43, ptr addrspace(3) %ptr.b
+  ret void
+}
+
+; from lds-bounds.ll
+; covers ds_read2_b32
+;
+define amdgpu_cs <2 x float> @load_global_const_idx() #0 {
+; GFX9-LABEL: load_global_const_idx:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX9-NEXT:    ds_read2_b32 v[0:1], v0 offset0:3 offset1:4
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX90A-LABEL: load_global_const_idx:
+; GFX90A:       ; %bb.0: ; %entry
+; GFX90A-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX90A-NEXT:    ds_read2_b32 v[0:1], v0 offset0:3 offset1:4
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    ; return to shader part epilog
+;
+; GFX10-LABEL: load_global_const_idx:
+; GFX10:       ; %bb.0: ; %entry
+; GFX10-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX10-NEXT:    ds_read2_b32 v[0:1], v0 offset0:3 offset1:4
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX9-FLATSCR-LABEL: load_global_const_idx:
+; GFX9-FLATSCR:       ; %bb.0: ; %entry
+; GFX9-FLATSCR-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX9-FLATSCR-NEXT:    ds_read2_b32 v[0:1], v0 offset0:3 offset1:4
+; GFX9-FLATSCR-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-FLATSCR-NEXT:    ; return to shader part epilog
+;
+; GFX11-LABEL: load_global_const_idx:
+; GFX11:       ; %bb.0: ; %entry
+; GFX11-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX11-NEXT:    ds_load_2addr_b32 v[0:1], v0 offset0:3 offset1:4
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX12-LABEL: load_global_const_idx:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    v_mov_b32_e32 v0, compute_lds at abs32@lo
+; GFX12-NEXT:    ds_load_2addr_b32 v[0:1], v0 offset0:3 offset1:4
+; GFX12-NEXT:    s_wait_dscnt 0x0
+; GFX12-NEXT:    ; return to shader part epilog
+entry:
+  %ptr.a = getelementptr [512 x i32], ptr addrspace(3) @compute_lds, i32 0, i32 3
+  %ptr.b = getelementptr [512 x i32], ptr addrspace(3) @compute_lds, i32 0, i32 4
+
+  %v.0 = load i32, ptr addrspace(3) %ptr.a
+  %v.1 = load i32, ptr addrspace(3) %ptr.b
+
+  %r.0 = insertelement <2 x i32> poison, i32 %v.0, i32 0
+  %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+  %bc = bitcast <2 x i32> %r.1 to <2 x float>
+  ret <2 x float> %bc
+}
+


        


More information about the cfe-commits mailing list