[llvm] 740a3ad - AMDGPU: Add codegen for atomicrmw operations usub_cond and usub_sat (#141068)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 5 04:37:38 PST 2025
Author: anjenner
Date: 2025-12-05T12:37:33Z
New Revision: 740a3ad1f744034275297d8ae315abbd904eac8c
URL: https://github.com/llvm/llvm-project/commit/740a3ad1f744034275297d8ae315abbd904eac8c
DIFF: https://github.com/llvm/llvm-project/commit/740a3ad1f744034275297d8ae315abbd904eac8c.diff
LOG: AMDGPU: Add codegen for atomicrmw operations usub_cond and usub_sat (#141068)
Split off from https://github.com/llvm/llvm-project/pull/105553 as per
discussion there.
Added:
llvm/test/CodeGen/AMDGPU/atomicrmw_usub_cond.ll
llvm/test/CodeGen/AMDGPU/atomicrmw_usub_sat.ll
llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-usub_cond.ll
llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-usub_sat.ll
Modified:
llvm/include/llvm/IR/IntrinsicsAMDGPU.td
llvm/lib/Target/AMDGPU/AMDGPU.td
llvm/lib/Target/AMDGPU/AMDGPUGISel.td
llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
llvm/lib/Target/AMDGPU/DSInstructions.td
llvm/lib/Target/AMDGPU/FLATInstructions.td
llvm/lib/Target/AMDGPU/GCNSubtarget.h
llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/lib/Target/AMDGPU/SIInstrInfo.td
llvm/lib/Target/AMDGPU/SIInstructions.td
llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.csub.ll
llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-agent.ll
llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-system.ll
llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-agent.ll
llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-system.ll
llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index c2057ac3a14e6..91d72d5ef9dfc 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1703,6 +1703,7 @@ def int_amdgcn_raw_buffer_atomic_xor : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_inc : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_dec : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_cond_sub_u32 : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_sub_clamp_u32 : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
@@ -1740,6 +1741,7 @@ def int_amdgcn_raw_ptr_buffer_atomic_xor : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_inc : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_dec : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_cond_sub_u32 : AMDGPURawPtrBufferAtomic;
+def int_amdgcn_raw_ptr_buffer_atomic_sub_clamp_u32 : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
@@ -1781,6 +1783,7 @@ def int_amdgcn_struct_buffer_atomic_xor : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_inc : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_dec : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_cond_sub_u32 : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_sub_clamp_u32 : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
@@ -1817,6 +1820,7 @@ def int_amdgcn_struct_ptr_buffer_atomic_xor : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_inc : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_dec : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_cond_sub_u32 : AMDGPUStructPtrBufferAtomic;
+def int_amdgcn_struct_ptr_buffer_atomic_sub_clamp_u32 : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index ed8ae2b16c5d4..b201c85cffc9f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -2596,6 +2596,10 @@ def HasAtomicFMinFMaxF64FlatInsts :
Predicate<"Subtarget->hasAtomicFMinFMaxF64FlatInsts()">,
AssemblerPredicate<(any_of FeatureAtomicFMinFMaxF64FlatInsts)>;
+def HasAtomicCondSubClampFlatInsts :
+ Predicate<"Subtarget->getGeneration() >= AMDGPUSubtarget::GFX12">,
+ AssemblerPredicate<(all_of FeatureGFX12Insts)>;
+
def HasLdsAtomicAddF64 :
Predicate<"Subtarget->hasLdsAtomicAddF64()">,
AssemblerPredicate<(any_of FeatureGFX90AInsts, FeatureGFX1250Insts)>;
@@ -2926,6 +2930,10 @@ def HasFmaLegacy32 : Predicate<"Subtarget->hasGFX10_3Insts()">,
def HasAtomicDsPkAdd16Insts : Predicate<"Subtarget->hasAtomicDsPkAdd16Insts()">,
AssemblerPredicate<(any_of FeatureAtomicDsPkAdd16Insts)>;
+def HasAtomicDsCondSubClampInsts :
+ Predicate<"Subtarget->getGeneration() >= AMDGPUSubtarget::GFX12">,
+ AssemblerPredicate<(all_of FeatureGFX12Insts)>;
+
def HasAtomicFlatPkAdd16Insts : Predicate<"Subtarget->hasAtomicFlatPkAdd16Insts()">,
AssemblerPredicate<(any_of FeatureAtomicFlatPkAdd16Insts)>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index bb4bf742fb861..55ce4f1738e37 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -288,6 +288,8 @@ def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT_D16, SItbuffer_store_d16>;
// FIXME: Check MMO is atomic
def : GINodeEquiv<G_ATOMICRMW_UINC_WRAP, atomic_load_uinc_wrap_glue>;
def : GINodeEquiv<G_ATOMICRMW_UDEC_WRAP, atomic_load_udec_wrap_glue>;
+def : GINodeEquiv<G_ATOMICRMW_USUB_COND, atomic_load_usub_cond_glue>;
+def : GINodeEquiv<G_ATOMICRMW_USUB_SAT, atomic_load_usub_sat_glue>;
def : GINodeEquiv<G_ATOMICRMW_FMIN, atomic_load_fmin_glue>;
def : GINodeEquiv<G_ATOMICRMW_FMAX, atomic_load_fmax_glue>;
@@ -308,6 +310,7 @@ def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_FMIN, SIbuffer_atomic_fmin>;
def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_FMAX, SIbuffer_atomic_fmax>;
def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_CMPSWAP, SIbuffer_atomic_cmpswap>;
def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32, SIbuffer_atomic_cond_sub_u32>;
+def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32, SIbuffer_atomic_csub>;
def : GINodeEquiv<G_AMDGPU_S_BUFFER_LOAD, SIsbuffer_load>;
def : GINodeEquiv<G_AMDGPU_S_BUFFER_LOAD_SBYTE, SIsbuffer_load_byte>;
def : GINodeEquiv<G_AMDGPU_S_BUFFER_LOAD_UBYTE, SIsbuffer_load_ubyte>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index c575714cf61cd..15492144ba615 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -4201,6 +4201,8 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_ATOMICRMW_UMAX:
case TargetOpcode::G_ATOMICRMW_UINC_WRAP:
case TargetOpcode::G_ATOMICRMW_UDEC_WRAP:
+ case TargetOpcode::G_ATOMICRMW_USUB_COND:
+ case TargetOpcode::G_ATOMICRMW_USUB_SAT:
case TargetOpcode::G_ATOMICRMW_FADD:
case TargetOpcode::G_ATOMICRMW_FMIN:
case TargetOpcode::G_ATOMICRMW_FMAX:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index bd443b5b6f1e6..20758699377fa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -695,6 +695,8 @@ defm atomic_load_fmin : binary_atomic_op_fp_all_as<atomic_load_fmin>;
defm atomic_load_fmax : binary_atomic_op_fp_all_as<atomic_load_fmax>;
defm atomic_load_uinc_wrap : binary_atomic_op_all_as<atomic_load_uinc_wrap>;
defm atomic_load_udec_wrap : binary_atomic_op_all_as<atomic_load_udec_wrap>;
+defm atomic_load_usub_cond : binary_atomic_op_all_as<atomic_load_usub_cond>;
+defm atomic_load_usub_sat : binary_atomic_op_all_as<atomic_load_usub_sat>;
defm AMDGPUatomic_cmp_swap : binary_atomic_op_all_as<AMDGPUatomic_cmp_swap>;
def load_align8_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index c5e99a607ce02..6e3a1b6a5563f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1722,6 +1722,13 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
}
+ auto &Atomics32 =
+ getActionDefinitionsBuilder({G_ATOMICRMW_USUB_COND, G_ATOMICRMW_USUB_SAT})
+ .legalFor({{S32, GlobalPtr}, {S32, LocalPtr}, {S32, RegionPtr}});
+ if (ST.hasFlatAddressSpace()) {
+ Atomics32.legalFor({{S32, FlatPtr}});
+ }
+
// TODO: v2bf16 operations, and fat buffer pointer support.
auto &Atomic = getActionDefinitionsBuilder(G_ATOMICRMW_FADD);
if (ST.hasLDSFPAtomicAddF32()) {
@@ -6561,8 +6568,15 @@ static unsigned getBufferAtomicPseudo(Intrinsic::ID IntrID) {
case Intrinsic::amdgcn_struct_buffer_atomic_fmax:
case Intrinsic::amdgcn_struct_ptr_buffer_atomic_fmax:
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX;
+ case Intrinsic::amdgcn_raw_buffer_atomic_sub_clamp_u32:
+ case Intrinsic::amdgcn_raw_ptr_buffer_atomic_sub_clamp_u32:
+ case Intrinsic::amdgcn_struct_buffer_atomic_sub_clamp_u32:
+ case Intrinsic::amdgcn_struct_ptr_buffer_atomic_sub_clamp_u32:
+ return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32;
case Intrinsic::amdgcn_raw_buffer_atomic_cond_sub_u32:
+ case Intrinsic::amdgcn_raw_ptr_buffer_atomic_cond_sub_u32:
case Intrinsic::amdgcn_struct_buffer_atomic_cond_sub_u32:
+ case Intrinsic::amdgcn_struct_ptr_buffer_atomic_cond_sub_u32:
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32;
default:
llvm_unreachable("unhandled atomic opcode");
@@ -7970,6 +7984,14 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
case Intrinsic::amdgcn_raw_ptr_buffer_atomic_fmax:
case Intrinsic::amdgcn_struct_buffer_atomic_fmax:
case Intrinsic::amdgcn_struct_ptr_buffer_atomic_fmax:
+ case Intrinsic::amdgcn_raw_buffer_atomic_sub_clamp_u32:
+ case Intrinsic::amdgcn_raw_ptr_buffer_atomic_sub_clamp_u32:
+ case Intrinsic::amdgcn_struct_buffer_atomic_sub_clamp_u32:
+ case Intrinsic::amdgcn_struct_ptr_buffer_atomic_sub_clamp_u32:
+ case Intrinsic::amdgcn_raw_buffer_atomic_cond_sub_u32:
+ case Intrinsic::amdgcn_raw_ptr_buffer_atomic_cond_sub_u32:
+ case Intrinsic::amdgcn_struct_buffer_atomic_cond_sub_u32:
+ case Intrinsic::amdgcn_struct_ptr_buffer_atomic_cond_sub_u32:
case Intrinsic::amdgcn_raw_buffer_atomic_fadd:
case Intrinsic::amdgcn_raw_ptr_buffer_atomic_fadd:
case Intrinsic::amdgcn_struct_buffer_atomic_fadd:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index e9bcf16025d58..97e7a239e1099 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -1748,6 +1748,12 @@ Value *SplitPtrStructs::handleMemoryInst(Instruction *I, Value *Arg, Value *Ptr,
case AtomicRMWInst::FMin:
IID = Intrinsic::amdgcn_raw_ptr_buffer_atomic_fmin;
break;
+ case AtomicRMWInst::USubCond:
+ IID = Intrinsic::amdgcn_raw_ptr_buffer_atomic_cond_sub_u32;
+ break;
+ case AtomicRMWInst::USubSat:
+ IID = Intrinsic::amdgcn_raw_ptr_buffer_atomic_sub_clamp_u32;
+ break;
case AtomicRMWInst::FSub: {
reportFatalUsageError(
"atomic floating point subtraction not supported for "
@@ -1773,14 +1779,12 @@ Value *SplitPtrStructs::handleMemoryInst(Instruction *I, Value *Arg, Value *Ptr,
break;
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
- reportFatalUsageError("wrapping increment/decrement not supported for "
- "buffer resources and should've ben expanded away");
+ reportFatalUsageError(
+ "wrapping increment/decrement not supported for "
+ "buffer resources and should've been expanded away");
break;
case AtomicRMWInst::BAD_BINOP:
llvm_unreachable("Not sure how we got a bad binop");
- case AtomicRMWInst::USubCond:
- case AtomicRMWInst::USubSat:
- break;
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index a88e4b2a2a31d..a7955ee2dac40 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3111,6 +3111,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC:
+ case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32:
+ case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX: {
@@ -4499,6 +4501,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC:
+ case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32:
+ case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX: {
@@ -5705,6 +5709,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_ATOMICRMW_FMAX:
case AMDGPU::G_ATOMICRMW_UINC_WRAP:
case AMDGPU::G_ATOMICRMW_UDEC_WRAP:
+ case AMDGPU::G_ATOMICRMW_USUB_COND:
+ case AMDGPU::G_ATOMICRMW_USUB_SAT:
case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[1] = getValueMappingForPtr(MRI, MI.getOperand(1).getReg());
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index 040a7112d29c3..4b3bd0c09e076 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -890,10 +890,8 @@ multiclass DSAtomicRetNoRetPatIntrinsic_mc<DS_Pseudo inst, DS_Pseudo noRetInst,
ValueType vt, string frag> {
def : DSAtomicRetPat<inst, vt,
!cast<PatFrag>(frag#"_local_addrspace")>;
-
- let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
- def : DSAtomicRetPat<noRetInst, vt,
- !cast<PatFrag>(frag#"_noret_local_addrspace"), /* complexity */ 1>;
+ def : DSAtomicRetPat<noRetInst, vt,
+ !cast<PatFrag>(frag#"_noret_local_addrspace"), /* complexity */ 1>;
}
defm : DSAtomicRetNoRetPatIntrinsic_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "int_amdgcn_atomic_cond_sub_u32">;
@@ -1279,6 +1277,14 @@ defm : DSAtomicRetNoRetPat_NoM0_mc<DS_PK_ADD_RTN_F16, DS_PK_ADD_F16, v2f16, "ato
defm : DSAtomicRetNoRetPat_NoM0_mc<DS_PK_ADD_RTN_BF16, DS_PK_ADD_BF16, v2bf16, "atomic_load_fadd">;
}
+let SubtargetPredicate = HasAtomicDsCondSubClampInsts in {
+
+defm : DSAtomicRetNoRetPat_NoM0_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "atomic_load_usub_cond">;
+
+defm : DSAtomicRetNoRetPat_NoM0_mc<DS_SUB_CLAMP_RTN_U32, DS_SUB_CLAMP_U32, i32, "atomic_load_usub_sat">;
+
+} // let SubtargetPredicate = HasAtomicDsCondSubClampInsts
+
let SubtargetPredicate = isGFX6GFX7GFX8GFX9GFX10 in {
defm : DSAtomicCmpXChgSwapped_mc<DS_CMPST_RTN_B32, DS_CMPST_B32, i32, "atomic_cmp_swap">;
}
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 4b79ab3c3c1ba..0e60c73aa5db7 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -2179,6 +2179,11 @@ defm : FlatAtomicPat <"FLAT_ATOMIC_MIN_F64", "atomic_load_fmin_"#as, f64>;
defm : FlatAtomicPat <"FLAT_ATOMIC_MAX_F64", "atomic_load_fmax_"#as, f64>;
}
+let SubtargetPredicate = HasAtomicCondSubClampFlatInsts in {
+ defm : FlatAtomicRtnPat<"FLAT_ATOMIC_COND_SUB_U32", "atomic_load_usub_cond_" #as, i32 >;
+
+ defm : FlatAtomicNoRtnPat<"FLAT_ATOMIC_COND_SUB_U32", "atomic_load_usub_cond_"#as, i32>;
+}
} // end foreach as
defm : FlatStorePats <FLAT_STORE_BYTE, truncstorei8_flat, i16>;
@@ -2350,10 +2355,10 @@ defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_OR", "atomic_load_or_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_SWAP", "atomic_swap_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_CMPSWAP", "AMDGPUatomic_cmp_swap_global", i32, v2i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_XOR", "atomic_load_xor_global", i32>;
-defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_CSUB", "int_amdgcn_global_atomic_csub", i32, i32, /* isIntr */ 1>;
+defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_CSUB", "atomic_load_usub_sat_global", i32>;
let SubtargetPredicate = HasAtomicCSubNoRtnInsts in
-defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_CSUB", "int_amdgcn_global_atomic_csub", i32, i32, /* isIntr */ 1>;
+defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_CSUB", "atomic_load_usub_sat_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_ADD_X2", "atomic_load_add_global", i64>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_SUB_X2", "atomic_load_sub_global", i64>;
@@ -2370,10 +2375,8 @@ defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_CMPSWAP_X2", "AMDGPUatomic_cmp_swap_
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_XOR_X2", "atomic_load_xor_global", i64>;
let SubtargetPredicate = isGFX12Plus in {
- defm : GlobalFLATAtomicPatsRtnWithAddrSpace <"GLOBAL_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "global_addrspace", i32>;
-
- let SubtargetPredicate = HasAtomicCSubNoRtnInsts in
- defm : GlobalFLATAtomicPatsNoRtnWithAddrSpace <"GLOBAL_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "global_addrspace", i32>;
+ defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_COND_SUB_U32", "atomic_load_usub_cond_global", i32>;
+ defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_COND_SUB_U32", "atomic_load_usub_cond_global", i32>;
}
let OtherPredicates = [isGFX12PlusNot12_50] in
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 34eb8b2266311..ddff3ad0244f9 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -1623,6 +1623,10 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
return hasKernargPreload() && !GFX1250Insts;
}
+ bool hasCondSubInsts() const { return GFX12Insts; }
+
+ bool hasSubClampInsts() const { return hasGFX10_3Insts(); }
+
/// \returns SGPR allocation granularity supported by the subtarget.
unsigned getSGPRAllocGranule() const {
return AMDGPU::IsaInfo::getSGPRAllocGranule(this);
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index 950a9d8649c93..3c4f115d7251a 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -2185,6 +2185,8 @@ R600TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
case AtomicRMWInst::FSub:
case AtomicRMWInst::FMax:
case AtomicRMWInst::FMin:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat:
return AtomicExpansionKind::CmpXChg;
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 301f2fc8dab45..c95f53413d445 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1059,6 +1059,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
ISD::ATOMIC_LOAD_FMAX,
ISD::ATOMIC_LOAD_UINC_WRAP,
ISD::ATOMIC_LOAD_UDEC_WRAP,
+ ISD::ATOMIC_LOAD_USUB_COND,
+ ISD::ATOMIC_LOAD_USUB_SAT,
ISD::INTRINSIC_VOID,
ISD::INTRINSIC_W_CHAIN});
@@ -10579,9 +10581,6 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_raw_buffer_atomic_dec:
case Intrinsic::amdgcn_raw_ptr_buffer_atomic_dec:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC);
- case Intrinsic::amdgcn_raw_buffer_atomic_cond_sub_u32:
- return lowerRawBufferAtomicIntrin(Op, DAG,
- AMDGPUISD::BUFFER_ATOMIC_COND_SUB_U32);
case Intrinsic::amdgcn_struct_buffer_atomic_swap:
case Intrinsic::amdgcn_struct_ptr_buffer_atomic_swap:
return lowerStructBufferAtomicIntrin(Op, DAG,
@@ -10623,10 +10622,21 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_struct_buffer_atomic_dec:
case Intrinsic::amdgcn_struct_ptr_buffer_atomic_dec:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC);
+ case Intrinsic::amdgcn_raw_buffer_atomic_sub_clamp_u32:
+ case Intrinsic::amdgcn_raw_ptr_buffer_atomic_sub_clamp_u32:
+ return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_CSUB);
+ case Intrinsic::amdgcn_struct_buffer_atomic_sub_clamp_u32:
+ case Intrinsic::amdgcn_struct_ptr_buffer_atomic_sub_clamp_u32:
+ return lowerStructBufferAtomicIntrin(Op, DAG,
+ AMDGPUISD::BUFFER_ATOMIC_CSUB);
+ case Intrinsic::amdgcn_raw_buffer_atomic_cond_sub_u32:
+ case Intrinsic::amdgcn_raw_ptr_buffer_atomic_cond_sub_u32:
+ return lowerRawBufferAtomicIntrin(Op, DAG,
+ AMDGPUISD::BUFFER_ATOMIC_COND_SUB_U32);
case Intrinsic::amdgcn_struct_buffer_atomic_cond_sub_u32:
+ case Intrinsic::amdgcn_struct_ptr_buffer_atomic_cond_sub_u32:
return lowerStructBufferAtomicIntrin(Op, DAG,
AMDGPUISD::BUFFER_ATOMIC_COND_SUB_U32);
-
case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap:
case Intrinsic::amdgcn_raw_ptr_buffer_atomic_cmpswap: {
SDValue Rsrc = bufferRsrcPtrToVector(Op.getOperand(4), DAG);
@@ -18591,7 +18601,19 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
case AtomicRMWInst::UMax:
case AtomicRMWInst::UMin:
case AtomicRMWInst::UIncWrap:
- case AtomicRMWInst::UDecWrap: {
+ case AtomicRMWInst::UDecWrap:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat: {
+ if (Op == AtomicRMWInst::USubCond && !Subtarget->hasCondSubInsts())
+ return AtomicExpansionKind::CmpXChg;
+ if (Op == AtomicRMWInst::USubSat && !Subtarget->hasSubClampInsts())
+ return AtomicExpansionKind::CmpXChg;
+ if (Op == AtomicRMWInst::USubCond || Op == AtomicRMWInst::USubSat) {
+ auto *IT = dyn_cast<IntegerType>(RMW->getType());
+ if (!IT || IT->getBitWidth() != 32)
+ return AtomicExpansionKind::CmpXChg;
+ }
+
if (AMDGPU::isFlatGlobalAddrSpace(AS) ||
AS == AMDGPUAS::BUFFER_FAT_POINTER) {
if (Subtarget->hasEmulatedSystemScopeAtomics())
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 526250a04e001..628b972f97086 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -809,6 +809,8 @@ defm atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">;
defm atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
defm atomic_load_uinc_wrap : SIAtomicM0Glue2 <"LOAD_UINC_WRAP">;
defm atomic_load_udec_wrap : SIAtomicM0Glue2 <"LOAD_UDEC_WRAP">;
+defm atomic_load_usub_cond : SIAtomicM0Glue2 <"LOAD_USUB_COND">;
+defm atomic_load_usub_sat : SIAtomicM0Glue2 <"LOAD_USUB_SAT">;
defm atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">;
defm atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">;
defm atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">;
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index ca5a4d7301bda..2d1d6261948ad 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4559,6 +4559,7 @@ def G_AMDGPU_BUFFER_ATOMIC_SMAX : BufferAtomicGenericInstruction;
def G_AMDGPU_BUFFER_ATOMIC_UMAX : BufferAtomicGenericInstruction;
def G_AMDGPU_BUFFER_ATOMIC_AND : BufferAtomicGenericInstruction;
def G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32 : BufferAtomicGenericInstruction;
+def G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32 : BufferAtomicGenericInstruction;
def G_AMDGPU_BUFFER_ATOMIC_OR : BufferAtomicGenericInstruction;
def G_AMDGPU_BUFFER_ATOMIC_XOR : BufferAtomicGenericInstruction;
def G_AMDGPU_BUFFER_ATOMIC_INC : BufferAtomicGenericInstruction;
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index bcfdb2ca5a3da..c6e061f368aef 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -729,6 +729,8 @@ bool isGenericAtomic(unsigned Opc) {
Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
+ Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32 ||
+ Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32 ||
Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
}
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
index 6729faf233c35..d7862e8373121 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
@@ -82,6 +82,12 @@ body: |
; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_UDEC_WRAP
%20:_(s32) = G_ATOMICRMW_UDEC_WRAP %1, %5
+ ; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_USUB_COND
+ %21:_(s32) = G_ATOMICRMW_USUB_COND %1, %5
+
+ ; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_USUB_SAT
+ %22:_(s32) = G_ATOMICRMW_USUB_SAT %1, %5
+
$vgpr0 = COPY %4(s32)
SI_RETURN implicit $vgpr0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.csub.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.csub.ll
index 70bfb2ed70ebd..bff4771319454 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.csub.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.csub.ll
@@ -8,15 +8,21 @@ define i32 @global_atomic_csub(ptr addrspace(1) %ptr, i32 %data) {
; GFX10-LABEL: global_atomic_csub:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: global_atomic_csub:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: global_atomic_csub:
@@ -26,10 +32,14 @@ define i32 @global_atomic_csub(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_setpc_b64 s[30:31]
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret i32 %ret
}
@@ -39,8 +49,11 @@ define i32 @global_atomic_csub_offset(ptr addrspace(1) %ptr, i32 %data) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
; GFX10-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: global_atomic_csub_offset:
@@ -49,8 +62,11 @@ define i32 @global_atomic_csub_offset(ptr addrspace(1) %ptr, i32 %data) {
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: global_atomic_csub_offset:
@@ -60,11 +76,15 @@ define i32 @global_atomic_csub_offset(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret i32 %ret
}
@@ -72,15 +92,21 @@ define void @global_atomic_csub_nortn(ptr addrspace(1) %ptr, i32 %data) {
; GFX10-LABEL: global_atomic_csub_nortn:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: global_atomic_csub_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: global_atomic_csub_nortn:
@@ -90,10 +116,14 @@ define void @global_atomic_csub_nortn(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_setpc_b64 s[30:31]
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -103,8 +133,11 @@ define void @global_atomic_csub_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
; GFX10-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: global_atomic_csub_offset_nortn:
@@ -113,8 +146,11 @@ define void @global_atomic_csub_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: global_atomic_csub_offset_nortn:
@@ -124,11 +160,15 @@ define void @global_atomic_csub_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -143,6 +183,8 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset(ptr addrspace(1)
; GFX10-NEXT: v_mov_b32_e32 v0, s2
; GFX10-NEXT: global_atomic_csub v0, v1, v0, s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v[0:1], v0, off
; GFX10-NEXT: s_endpgm
;
@@ -155,6 +197,8 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset(ptr addrspace(1)
; GFX11-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s2
; GFX11-NEXT: global_atomic_csub_u32 v0, v1, v0, s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v[0:1], v0, off
; GFX11-NEXT: s_endpgm
;
@@ -163,12 +207,15 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset(ptr addrspace(1)
; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: global_store_b32 v[0:1], v0, off
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data seq_cst, !amdgpu.no.remote.memory !0
store i32 %ret, ptr addrspace(1) poison
ret void
}
@@ -183,6 +230,9 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset_nortn(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v0, s2
; GFX10-NEXT: global_atomic_csub v0, v1, v0, s[0:1] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_csub_sgpr_base_offset_nortn:
@@ -193,6 +243,9 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset_nortn(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s2
; GFX11-NEXT: global_atomic_csub_u32 v0, v1, v0, s[0:1] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
;
; GFX12-LABEL: global_atomic_csub_sgpr_base_offset_nortn:
@@ -200,14 +253,18 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset_nortn(ptr addrspa
; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret void
}
-declare i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) nocapture, i32) #1
-
attributes #0 = { nounwind willreturn }
attributes #1 = { argmemonly nounwind }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/atomicrmw_usub_cond.ll b/llvm/test/CodeGen/AMDGPU/atomicrmw_usub_cond.ll
new file mode 100644
index 0000000000000..ce8ffab77ac85
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/atomicrmw_usub_cond.ll
@@ -0,0 +1,1315 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9-SDAG %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9-GISEL %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12-GISEL %s
+
+define amdgpu_kernel void @flat_atomic_usub_cond_no_rtn_u32(ptr %addr, i32 %in) {
+; GFX9-SDAG-LABEL: flat_atomic_usub_cond_no_rtn_u32:
+; GFX9-SDAG: ; %bb.0: ; %entry
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_load_dword s2, s[4:5], 0x2c
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_add_u32 s0, s0, -16
+; GFX9-SDAG-NEXT: s_addc_u32 s1, s1, -1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-SDAG-NEXT: flat_load_dword v3, v[0:1]
+; GFX9-SDAG-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-SDAG-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_subrev_u32_e32 v2, s2, v3
+; GFX9-SDAG-NEXT: v_cmp_le_u32_e32 vcc, s2, v3
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-SDAG-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX9-SDAG-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB0_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: flat_atomic_usub_cond_no_rtn_u32:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], -16
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-SDAG-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: flat_atomic_cond_sub_u32 v[0:1], v2 scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX9-GISEL-LABEL: flat_atomic_usub_cond_no_rtn_u32:
+; GFX9-GISEL: ; %bb.0: ; %entry
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-GISEL-NEXT: s_load_dword s2, s[4:5], 0x2c
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_add_u32 s0, s0, -16
+; GFX9-GISEL-NEXT: s_addc_u32 s1, s1, -1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-GISEL-NEXT: flat_load_dword v3, v[0:1]
+; GFX9-GISEL-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-GISEL-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_subrev_u32_e32 v2, s2, v3
+; GFX9-GISEL-NEXT: v_cmp_le_u32_e32 vcc, s2, v3
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-GISEL-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX9-GISEL-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB0_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: flat_atomic_usub_cond_no_rtn_u32:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_add_co_u32 s0, s0, -16
+; GFX12-GISEL-NEXT: s_add_co_ci_u32 s1, s1, -1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-GISEL-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v[0:1], v2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, ptr %addr, i32 -4
+ %unused = atomicrmw usub_cond ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define amdgpu_kernel void @flat_atomic_usub_cond_rtn_u32(ptr %addr, i32 %in, ptr %use) {
+; GFX9-SDAG-LABEL: flat_atomic_usub_cond_rtn_u32:
+; GFX9-SDAG: ; %bb.0: ; %entry
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_add_u32 s2, s0, 16
+; GFX9-SDAG-NEXT: s_addc_u32 s3, s1, 0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-SDAG-NEXT: flat_load_dword v0, v[0:1]
+; GFX9-SDAG-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-SDAG-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: s_add_u32 s8, s0, 16
+; GFX9-SDAG-NEXT: v_subrev_u32_e32 v0, s6, v1
+; GFX9-SDAG-NEXT: s_addc_u32 s9, s1, 0
+; GFX9-SDAG-NEXT: v_cmp_le_u32_e32 vcc, s6, v1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-SDAG-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB1_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-SDAG-NEXT: flat_store_dword v[1:2], v0
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: flat_atomic_usub_cond_rtn_u32:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_clause 0x1
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 16
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-SDAG-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: flat_atomic_cond_sub_u32 v2, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX12-SDAG-NEXT: flat_store_b32 v[0:1], v2
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX9-GISEL-LABEL: flat_atomic_usub_cond_rtn_u32:
+; GFX9-GISEL: ; %bb.0: ; %entry
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-GISEL-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_add_u32 s2, s0, 16
+; GFX9-GISEL-NEXT: s_addc_u32 s3, s1, 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-GISEL-NEXT: flat_load_dword v0, v[0:1]
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-GISEL-NEXT: s_add_u32 s8, s0, 16
+; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, s6, v1
+; GFX9-GISEL-NEXT: s_addc_u32 s9, s1, 0
+; GFX9-GISEL-NEXT: v_cmp_le_u32_e32 vcc, s6, v1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-GISEL-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB1_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-GISEL-NEXT: flat_store_dword v[1:2], v0
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: flat_atomic_usub_cond_rtn_u32:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_clause 0x1
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_add_co_u32 s0, s0, 16
+; GFX12-GISEL-NEXT: s_add_co_ci_u32 s1, s1, 0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-GISEL-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v2, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX12-GISEL-NEXT: flat_store_b32 v[0:1], v2
+; GFX12-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, ptr %addr, i32 4
+ %val = atomicrmw usub_cond ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
+ store i32 %val, ptr %use
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_cond_no_rtn_u32(ptr addrspace(1) %addr, i32 %in) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond_no_rtn_u32:
+; GFX9-SDAG: ; %bb.0: ; %entry
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_add_u32 s2, s0, -16
+; GFX9-SDAG-NEXT: s_addc_u32 s3, s1, -1
+; GFX9-SDAG-NEXT: s_load_dword s4, s[2:3], 0x0
+; GFX9-SDAG-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-SDAG-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_subrev_u32_e32 v0, s6, v1
+; GFX9-SDAG-NEXT: v_cmp_le_u32_e32 vcc, s6, v1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:-16 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB2_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_no_rtn_u32:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12-SDAG-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v1, s[0:1] offset:-16 scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond_no_rtn_u32:
+; GFX9-GISEL: ; %bb.0: ; %entry
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-GISEL-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_add_u32 s2, s0, -16
+; GFX9-GISEL-NEXT: s_addc_u32 s3, s1, -1
+; GFX9-GISEL-NEXT: s_load_dword s4, s[2:3], 0x0
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-GISEL-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, s6, v1
+; GFX9-GISEL-NEXT: v_cmp_le_u32_e32 vcc, s6, v1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:-16 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB2_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_no_rtn_u32:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
+; GFX12-GISEL-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v1, v0, s[0:1] offset:-16 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, ptr addrspace(1) %addr, i32 -4
+ %unused = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_cond_rtn_u32(ptr addrspace(1) %addr, i32 %in, ptr addrspace(1) %use) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond_rtn_u32:
+; GFX9-SDAG: ; %bb.0: ; %entry
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-SDAG-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_load_dword s7, s[0:1], 0x10
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-SDAG-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-SDAG-NEXT: v_subrev_u32_e32 v1, s6, v2
+; GFX9-SDAG-NEXT: v_cmp_le_u32_e32 vcc, s6, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[0:1] offset:16 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX9-SDAG-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB3_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_rtn_u32:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-SDAG-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v1, v0, v1, s[0:1] offset:16 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[4:5]
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond_rtn_u32:
+; GFX9-GISEL: ; %bb.0: ; %entry
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-GISEL-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_load_dword s7, s[0:1], 0x10
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-GISEL-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, s6, v2
+; GFX9-GISEL-NEXT: v_cmp_le_u32_e32 vcc, s6, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[0:1] offset:16 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB3_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_rtn_u32:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_clause 0x1
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
+; GFX12-GISEL-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v1, v0, s[0:1] offset:16 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: global_store_b32 v1, v0, s[4:5]
+; GFX12-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, ptr addrspace(1) %addr, i32 4
+ %val = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
+ store i32 %val, ptr addrspace(1) %use
+ ret void
+}
+
+define amdgpu_kernel void @ds_usub_cond_no_rtn_u32(ptr addrspace(3) %addr, i32 %in) {
+; GFX9-SDAG-LABEL: ds_usub_cond_no_rtn_u32:
+; GFX9-SDAG: ; %bb.0: ; %entry
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_add_i32 s0, s0, -16
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-SDAG-NEXT: ds_read_b32 v1, v0
+; GFX9-SDAG-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_subrev_u32_e32 v2, s1, v1
+; GFX9-SDAG-NEXT: v_cmp_le_u32_e32 vcc, s1, v1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc
+; GFX9-SDAG-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB4_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: ds_usub_cond_no_rtn_u32:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_add_co_i32 s0, s0, -16
+; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
+; GFX12-SDAG-NEXT: ds_cond_sub_u32 v0, v1
+; GFX12-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SE
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX9-GISEL-LABEL: ds_usub_cond_no_rtn_u32:
+; GFX9-GISEL: ; %bb.0: ; %entry
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_add_u32 s0, s0, -16
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-GISEL-NEXT: ds_read_b32 v1, v0
+; GFX9-GISEL-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_subrev_u32_e32 v2, s1, v1
+; GFX9-GISEL-NEXT: v_cmp_le_u32_e32 vcc, s1, v1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc
+; GFX9-GISEL-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB4_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: ds_usub_cond_no_rtn_u32:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_add_co_u32 s0, s0, -16
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
+; GFX12-GISEL-NEXT: ds_cond_sub_u32 v0, v1
+; GFX12-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SE
+; GFX12-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, ptr addrspace(3) %addr, i32 -4
+ %unused = atomicrmw usub_cond ptr addrspace(3) %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @ds_usub_cond_rtn_u32(ptr addrspace(3) %addr, i32 %in, ptr addrspace(3) %use) {
+; GFX9-SDAG-LABEL: ds_usub_cond_rtn_u32:
+; GFX9-SDAG: ; %bb.0: ; %entry
+; GFX9-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-SDAG-NEXT: ds_read_b32 v1, v0 offset:16
+; GFX9-SDAG-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-SDAG-NEXT: v_subrev_u32_e32 v1, s1, v2
+; GFX9-SDAG-NEXT: v_cmp_le_u32_e32 vcc, s1, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-SDAG-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:16
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB5_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-SDAG-NEXT: ds_write_b32 v0, v1
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: ds_usub_cond_rtn_u32:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-SDAG-NEXT: ds_cond_sub_rtn_u32 v0, v0, v1 offset:16
+; GFX12-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SE
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-SDAG-NEXT: ds_store_b32 v1, v0
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX9-GISEL-LABEL: ds_usub_cond_rtn_u32:
+; GFX9-GISEL: ; %bb.0: ; %entry
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-GISEL-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-GISEL-NEXT: ds_read_b32 v1, v0 offset:16
+; GFX9-GISEL-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, s1, v2
+; GFX9-GISEL-NEXT: v_cmp_le_u32_e32 vcc, s1, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-GISEL-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:16
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB5_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-GISEL-NEXT: ds_write_b32 v0, v1
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: ds_usub_cond_rtn_u32:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s1 :: v_dual_mov_b32 v1, s0
+; GFX12-GISEL-NEXT: ds_cond_sub_rtn_u32 v0, v1, v0 offset:16
+; GFX12-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SE
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-GISEL-NEXT: ds_store_b32 v1, v0
+; GFX12-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, ptr addrspace(3) %addr, i32 4
+ %val = atomicrmw usub_cond ptr addrspace(3) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
+ store i32 %val, ptr addrspace(3) %use
+ ret void
+}
+
+define i32 @global_atomic_usub_cond(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-SDAG-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB6_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB6_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
+ ret i32 %ret
+}
+
+define i32 @global_atomic_usub_cond_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond_offset:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_add_co_u32_e32 v3, vcc, 0x1000, v0
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], vcc
+; GFX9-SDAG-NEXT: v_addc_co_u32_e64 v4, s[4:5], 0, v1, s[4:5]
+; GFX9-SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: global_load_dword v0, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: v_sub_u32_e32 v0, v1, v2
+; GFX9-SDAG-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v[3:4], v[0:1], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_offset:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond_offset:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_add_co_u32_e32 v3, vcc, 0x1000, v0
+; GFX9-GISEL-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v1, vcc
+; GFX9-GISEL-NEXT: global_load_dword v0, v[3:4], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-GISEL-NEXT: v_sub_u32_e32 v0, v1, v2
+; GFX9-GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v0, v[3:4], v[0:1], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_offset:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ ret i32 %ret
+}
+
+define void @global_atomic_usub_cond_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond_nortn:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v4, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-SDAG-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB8_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_nortn:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v[0:1], v2, off scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond_nortn:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v4, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB8_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_nortn:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v[0:1], v2, off scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
+ ret void
+}
+
+define void @global_atomic_usub_cond_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond_offset_nortn:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_add_co_u32_e32 v3, vcc, 0x1000, v0
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], vcc
+; GFX9-SDAG-NEXT: v_addc_co_u32_e64 v4, s[4:5], 0, v1, s[4:5]
+; GFX9-SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: global_load_dword v1, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_sub_u32_e32 v0, v1, v2
+; GFX9-SDAG-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v[3:4], v[0:1], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_offset_nortn:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v[0:1], v2, off offset:4096 scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond_offset_nortn:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0x1000, v0
+; GFX9-GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-GISEL-NEXT: global_load_dword v4, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_offset_nortn:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v[0:1], v2, off offset:4096 scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_cond_sgpr_base_offset(ptr addrspace(1) %ptr, i32 %data, ptr addrspace(1) %dst) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond_sgpr_base_offset:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-SDAG-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_load_dword s7, s[2:3], 0x1000
+; GFX9-SDAG-NEXT: s_add_u32 s2, s2, 0x1000
+; GFX9-SDAG-NEXT: s_addc_u32 s3, s3, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-SDAG-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-SDAG-NEXT: v_subrev_u32_e32 v1, s6, v2
+; GFX9-SDAG-NEXT: v_cmp_le_u32_e32 vcc, s6, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[2:3] glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX9-SDAG-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_sgpr_base_offset:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v1, v0, v1, s[0:1] offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[4:5]
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond_sgpr_base_offset:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-GISEL-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0x1000
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_load_dword s7, s[0:1], 0x1000
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-GISEL-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, s6, v2
+; GFX9-GISEL-NEXT: v_cmp_le_u32_e32 vcc, s6, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[0:1] glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_sgpr_base_offset:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_clause 0x1
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: global_store_b32 v1, v0, s[4:5]
+; GFX12-GISEL-NEXT: s_endpgm
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ store i32 %ret, ptr addrspace(1) %dst
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_cond_sgpr_base_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond_sgpr_base_offset_nortn:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-SDAG-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_load_dword s4, s[2:3], 0x1000
+; GFX9-SDAG-NEXT: s_add_u32 s2, s2, 0x1000
+; GFX9-SDAG-NEXT: s_addc_u32 s3, s3, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-SDAG-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_subrev_u32_e32 v0, s6, v1
+; GFX9-SDAG-NEXT: v_cmp_le_u32_e32 vcc, s6, v1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_sgpr_base_offset_nortn:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v1, s[0:1] offset:4096 scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond_sgpr_base_offset_nortn:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-GISEL-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, 0x1000
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_load_dword s4, s[0:1], 0x1000
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-GISEL-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, s6, v1
+; GFX9-GISEL-NEXT: v_cmp_le_u32_e32 vcc, s6, v1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_sgpr_base_offset_nortn:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v1, v0, s[0:1] offset:4096 scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_endpgm
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ ret void
+}
+
+define i32 @global_atomic_usub_cond__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond__amdgpu_no_remote_memory:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-SDAG-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond__amdgpu_no_remote_memory:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond__amdgpu_no_remote_memory:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond__amdgpu_no_remote_memory:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret i32 %ret
+}
+
+define i32 @global_atomic_usub_cond__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond__amdgpu_no_fine_grained_memory:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-SDAG-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB13_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond__amdgpu_no_fine_grained_memory:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond__amdgpu_no_fine_grained_memory:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB13_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond__amdgpu_no_fine_grained_memory:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
+ ret i32 %ret
+}
+
+define i32 @global_atomic_usub_cond__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-SDAG-LABEL: global_atomic_usub_cond__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-SDAG-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_cond__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-GISEL-LABEL: global_atomic_usub_cond__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: v_sub_u32_e32 v3, v4, v2
+; GFX9-GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v4, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_cond__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i32 %ret
+}
+
+attributes #0 = { nounwind willreturn }
+attributes #1 = { argmemonly nounwind }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/atomicrmw_usub_sat.ll b/llvm/test/CodeGen/AMDGPU/atomicrmw_usub_sat.ll
new file mode 100644
index 0000000000000..708556aae76fe
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/atomicrmw_usub_sat.ll
@@ -0,0 +1,4091 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s -check-prefix=GFX9-GISEL
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 < %s | FileCheck %s -check-prefix=GFX10-GISEL
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck %s -check-prefix=GFX11-GISEL
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck %s -check-prefix=GFX12-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s -check-prefix=GFX9-SDAG
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 < %s | FileCheck %s -check-prefix=GFX10-SDAG
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck %s -check-prefix=GFX11-SDAG
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck %s -check-prefix=GFX12-SDAG
+
+define i32 @global_atomic_usub_sat(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB0_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB0_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret i32 %ret
+}
+
+define i32 @global_atomic_usub_sat_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_offset:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_add_co_u32_e32 v3, vcc, 0x1000, v0
+; GFX9-GISEL-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v1, vcc
+; GFX9-GISEL-NEXT: global_load_dword v0, v[3:4], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-GISEL-NEXT: v_sub_u32_e64 v0, v1, v2 clamp
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v0, v[3:4], v[0:1], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB1_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_offset:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
+; GFX10-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_offset:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_offset:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_offset:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_add_co_u32_e32 v3, vcc, 0x1000, v0
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], vcc
+; GFX9-SDAG-NEXT: v_addc_co_u32_e64 v4, s[4:5], 0, v1, s[4:5]
+; GFX9-SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: global_load_dword v0, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: v_sub_u32_e64 v0, v1, v2 clamp
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v[3:4], v[0:1], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB1_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_offset:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
+; GFX10-SDAG-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_offset:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_offset:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret i32 %ret
+}
+
+define void @global_atomic_usub_sat_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_nortn:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v4, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB2_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_nortn:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_nortn:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_nortn:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_nortn:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v4, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB2_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_nortn:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_nortn:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_nortn:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define void @global_atomic_usub_sat_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_offset_nortn:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0x1000, v0
+; GFX9-GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-GISEL-NEXT: global_load_dword v4, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB3_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_offset_nortn:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
+; GFX10-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_offset_nortn:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_offset_nortn:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_offset_nortn:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_add_co_u32_e32 v3, vcc, 0x1000, v0
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], vcc
+; GFX9-SDAG-NEXT: v_addc_co_u32_e64 v4, s[4:5], 0, v1, s[4:5]
+; GFX9-SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: global_load_dword v1, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_sub_u32_e64 v0, v1, v2 clamp
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v[3:4], v[0:1], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB3_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_offset_nortn:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
+; GFX10-SDAG-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_offset_nortn:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_offset_nortn:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_sat_sgpr_base_offset(ptr addrspace(1) %ptr, i32 %data, ptr addrspace(1) %dst) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-GISEL-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0x1000
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_load_dword s5, s[0:1], 0x1000
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-GISEL-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-GISEL-NEXT: v_sub_u32_e64 v1, v2, s4 clamp
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[0:1] glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB4_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_clause 0x2
+; GFX10-GISEL-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX10-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-GISEL-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x10
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, 0x1000
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX10-GISEL-NEXT: global_atomic_csub v0, v1, v0, s[0:1] glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-GISEL-NEXT: global_store_dword v1, v0, s[2:3]
+; GFX10-GISEL-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_clause 0x2
+; GFX11-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x8
+; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-GISEL-NEXT: s_load_b64 s[2:3], s[4:5], 0x10
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s6
+; GFX11-GISEL-NEXT: global_atomic_csub_u32 v0, v1, v0, s[0:1] glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-GISEL-NEXT: global_store_b32 v1, v0, s[2:3]
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_clause 0x1
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
+; GFX12-GISEL-NEXT: global_atomic_sub_clamp_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: global_store_b32 v1, v0, s[4:5]
+; GFX12-GISEL-NEXT: s_endpgm
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0
+; GFX9-SDAG-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-SDAG-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_load_dword s5, s[2:3], 0x1000
+; GFX9-SDAG-NEXT: s_add_u32 s2, s2, 0x1000
+; GFX9-SDAG-NEXT: s_addc_u32 s3, s3, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-SDAG-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-SDAG-NEXT: v_sub_u32_e64 v2, v3, s4 clamp
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[2:3] glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-SDAG-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB4_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_clause 0x2
+; GFX10-SDAG-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX10-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-SDAG-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x10
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, 0x1000
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, s4
+; GFX10-SDAG-NEXT: global_atomic_csub v0, v0, v1, s[0:1] glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-SDAG-NEXT: global_store_dword v1, v0, s[2:3]
+; GFX10-SDAG-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_clause 0x2
+; GFX11-SDAG-NEXT: s_load_b32 s6, s[4:5], 0x8
+; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-SDAG-NEXT: s_load_b64 s[2:3], s[4:5], 0x10
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0x1000 :: v_dual_mov_b32 v1, s6
+; GFX11-SDAG-NEXT: global_atomic_csub_u32 v0, v0, v1, s[0:1] glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[2:3]
+; GFX11-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-SDAG-NEXT: global_atomic_sub_clamp_u32 v1, v0, v1, s[0:1] offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[4:5]
+; GFX12-SDAG-NEXT: s_endpgm
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ store i32 %ret, ptr addrspace(1) %dst
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_sat_sgpr_base_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-GISEL-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, 0x1000
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_load_dword s5, s[0:1], 0x1000
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-GISEL-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_sub_u32_e64 v0, v1, s4 clamp
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB5_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_clause 0x1
+; GFX10-GISEL-NEXT: s_load_dword s2, s[8:9], 0x8
+; GFX10-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, 0x1000
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-GISEL-NEXT: global_atomic_csub v0, v1, v0, s[0:1] glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b32 s2, s[4:5], 0x8
+; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s2
+; GFX11-GISEL-NEXT: global_atomic_csub_u32 v0, v1, v0, s[0:1] glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
+; GFX12-GISEL-NEXT: global_atomic_sub_clamp_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_endpgm
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0
+; GFX9-SDAG-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-SDAG-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_load_dword s5, s[2:3], 0x1000
+; GFX9-SDAG-NEXT: s_add_u32 s2, s2, 0x1000
+; GFX9-SDAG-NEXT: s_addc_u32 s3, s3, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-SDAG-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_sub_u32_e64 v0, v1, s4 clamp
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB5_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_clause 0x1
+; GFX10-SDAG-NEXT: s_load_dword s2, s[8:9], 0x8
+; GFX10-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, 0x1000
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, s2
+; GFX10-SDAG-NEXT: global_atomic_csub v0, v0, v1, s[0:1] glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_clause 0x1
+; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x8
+; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0x1000 :: v_dual_mov_b32 v1, s2
+; GFX11-SDAG-NEXT: global_atomic_csub_u32 v0, v0, v1, s[0:1] glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12-SDAG-NEXT: global_atomic_sub_clamp_u32 v0, v0, v1, s[0:1] offset:4096 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_endpgm
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define i16 @global_atomic_usub_sat_16(ptr addrspace(1) %ptr, i16 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_16:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, 0xffff0000
+; GFX9-GISEL-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v6, v3
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v3, v6, v2 clamp
+; GFX9-GISEL-NEXT: v_and_or_b32 v5, v6, v4, v3
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[5:6], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v6
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB6_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_16:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX10-GISEL-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX10-GISEL-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX10-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB6_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_16:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX11-GISEL-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-GISEL-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v3.l, v4.l, v2.l clamp
+; GFX11-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_16:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX12-GISEL-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX12-GISEL-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_16:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s6, 0xffff0000
+; GFX9-SDAG-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u16_e64 v3, v4, v2 clamp
+; GFX9-SDAG-NEXT: v_and_or_b32 v3, v4, s6, v3
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB6_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-SDAG-NEXT: s_mov_b32 s4, 0
+; GFX10-SDAG-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX10-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-SDAG-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB6_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX11-SDAG-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v3.l, v4.l, v2.l clamp
+; GFX11-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_16:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX12-SDAG-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX12-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i16 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret i16 %ret
+}
+
+define i16 @global_atomic_usub_sat_offset_16(ptr addrspace(1) %ptr, i16 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_offset_16:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off offset:2048
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, 0xffff0000
+; GFX9-GISEL-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v6, v3
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v3, v6, v2 clamp
+; GFX9-GISEL-NEXT: v_and_or_b32 v5, v6, v4, v3
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[5:6], off offset:2048 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v6
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_offset_16:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_add_co_u32 v3, vcc_lo, 0x800, v0
+; GFX10-GISEL-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX10-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX10-GISEL-NEXT: global_load_dword v0, v[3:4], off
+; GFX10-GISEL-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v0, v1, v2 clamp
+; GFX10-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX10-GISEL-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v0, v[3:4], v[0:1], off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX10-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB7_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_offset_16:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_load_b32 v3, v[0:1], off offset:2048
+; GFX11-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX11-GISEL-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-GISEL-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v3.l, v4.l, v2.l clamp
+; GFX11-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2048 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_offset_16:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_load_b32 v3, v[0:1], off offset:2048
+; GFX12-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX12-GISEL-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX12-GISEL-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_offset_16:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off offset:2048
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s6, 0xffff0000
+; GFX9-SDAG-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u16_e64 v3, v4, v2 clamp
+; GFX9-SDAG-NEXT: v_and_or_b32 v3, v4, s6, v3
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:2048 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_offset_16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_add_co_u32 v3, vcc_lo, 0x800, v0
+; GFX10-SDAG-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX10-SDAG-NEXT: s_mov_b32 s4, 0
+; GFX10-SDAG-NEXT: global_load_dword v0, v[3:4], off
+; GFX10-SDAG-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v0, v1, v2 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX10-SDAG-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v0, v[3:4], v[0:1], off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX10-SDAG-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB7_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_offset_16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_load_b32 v3, v[0:1], off offset:2048
+; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX11-SDAG-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v3.l, v4.l, v2.l clamp
+; GFX11-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2048 glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_offset_16:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_load_b32 v3, v[0:1], off offset:2048
+; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX12-SDAG-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX12-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i16, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i16 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret i16 %ret
+}
+
+define void @global_atomic_usub_sat_nortn_16(ptr addrspace(1) %ptr, i16 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_nortn_16:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v4, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v5, 0xffff0000
+; GFX9-GISEL-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v3, v4, v2 clamp
+; GFX9-GISEL-NEXT: v_and_or_b32 v3, v4, v5, v3
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB8_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_nortn_16:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: global_load_dword v4, v[0:1], off
+; GFX10-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX10-GISEL-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX10-GISEL-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX10-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB8_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_nortn_16:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX11-GISEL-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v3.l, v4.l, v2.l clamp
+; GFX11-GISEL-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_nortn_16:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX12-GISEL-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_nortn_16:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v4, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s6, 0xffff0000
+; GFX9-SDAG-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_sub_u16_e64 v3, v4, v2 clamp
+; GFX9-SDAG-NEXT: v_and_or_b32 v3, v4, s6, v3
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB8_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_nortn_16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: global_load_dword v4, v[0:1], off
+; GFX10-SDAG-NEXT: s_mov_b32 s4, 0
+; GFX10-SDAG-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX10-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-SDAG-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB8_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_nortn_16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX11-SDAG-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v3.l, v4.l, v2.l clamp
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_nortn_16:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX12-SDAG-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i16 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define void @global_atomic_usub_sat_offset_nortn_16(ptr addrspace(1) %ptr, i16 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_offset_nortn_16:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v4, v[0:1], off offset:2048
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v5, 0xffff0000
+; GFX9-GISEL-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v3, v4, v2 clamp
+; GFX9-GISEL-NEXT: v_and_or_b32 v3, v4, v5, v3
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:2048 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_offset_nortn_16:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, 0x800, v0
+; GFX10-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX10-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX10-GISEL-NEXT: global_load_dword v4, v[0:1], off
+; GFX10-GISEL-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX10-GISEL-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX10-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB9_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_offset_nortn_16:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_load_b32 v4, v[0:1], off offset:2048
+; GFX11-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX11-GISEL-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v3.l, v4.l, v2.l clamp
+; GFX11-GISEL-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2048 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_offset_nortn_16:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_load_b32 v4, v[0:1], off offset:2048
+; GFX12-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX12-GISEL-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-GISEL-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_offset_nortn_16:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v4, v[0:1], off offset:2048
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s6, 0xffff0000
+; GFX9-SDAG-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_sub_u16_e64 v3, v4, v2 clamp
+; GFX9-SDAG-NEXT: v_and_or_b32 v3, v4, s6, v3
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:2048 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_offset_nortn_16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_add_co_u32 v0, vcc_lo, 0x800, v0
+; GFX10-SDAG-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX10-SDAG-NEXT: s_mov_b32 s4, 0
+; GFX10-SDAG-NEXT: global_load_dword v4, v[0:1], off
+; GFX10-SDAG-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX10-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-SDAG-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB9_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_offset_nortn_16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_load_b32 v4, v[0:1], off offset:2048
+; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX11-SDAG-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v3.l, v4.l, v2.l clamp
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2048 glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_offset_nortn_16:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_load_b32 v4, v[0:1], off offset:2048
+; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX12-SDAG-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v3, v4, v2 clamp
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-SDAG-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i16, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i16 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_sat_sgpr_base_offset_16(ptr addrspace(1) %ptr, i16 %data, ptr addrspace(1) %dst) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_16:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-GISEL-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0xffff0000
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_load_dword s5, s[0:1], 0x800
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, s5
+; GFX9-GISEL-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v2, v3, s4 clamp
+; GFX9-GISEL-NEXT: v_and_or_b32 v2, v3, v0, v2
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v2, v1, v[2:3], s[0:1] offset:2048 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_store_short v0, v2, s[0:1]
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_16:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_clause 0x1
+; GFX10-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-GISEL-NEXT: s_load_dword s2, s[8:9], 0x8
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: s_load_dword s3, s[0:1], 0x800
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-GISEL-NEXT: s_mov_b32 s3, 0
+; GFX10-GISEL-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v1, v2, s2 clamp
+; GFX10-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-GISEL-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[0:1] glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX10-GISEL-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB10_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s3
+; GFX10-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: global_store_short v0, v1, s[0:1]
+; GFX10-GISEL-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_16:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-GISEL-NEXT: s_load_b32 s2, s[4:5], 0x8
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_load_b32 s3, s[0:1], 0x800
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s3
+; GFX11-GISEL-NEXT: s_mov_b32 s3, 0
+; GFX11-GISEL-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-GISEL-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v1.l, v2.l, s2 clamp
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:2048 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-GISEL-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s3
+; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x10
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_store_b16 v0, v1, s[0:1]
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_16:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_load_b32 s3, s[0:1], 0x800
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s3
+; GFX12-GISEL-NEXT: s_mov_b32 s3, 0
+; GFX12-GISEL-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v1, v2, s2 clamp
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-GISEL-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s3
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x10
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b16 v0, v1, s[0:1]
+; GFX12-GISEL-NEXT: s_endpgm
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_16:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-SDAG-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-SDAG-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, 0xffff0000
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_load_dword s6, s[0:1], 0x800
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-SDAG-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-SDAG-NEXT: v_sub_u16_e64 v0, v3, s4 clamp
+; GFX9-SDAG-NEXT: v_and_or_b32 v2, v3, s5, v0
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:2048 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-SDAG-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_store_short v1, v0, s[0:1]
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_clause 0x1
+; GFX10-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-SDAG-NEXT: s_load_dword s2, s[8:9], 0x8
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_load_dword s3, s[0:1], 0x800
+; GFX10-SDAG-NEXT: s_add_u32 s0, s0, 0x800
+; GFX10-SDAG-NEXT: s_addc_u32 s1, s1, 0
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0
+; GFX10-SDAG-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v1, v2, s2 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-SDAG-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[0:1] glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX10-SDAG-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB10_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s3
+; GFX10-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: global_store_short v0, v1, s[0:1]
+; GFX10-SDAG-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_clause 0x1
+; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x8
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_load_b32 s3, s[0:1], 0x800
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX11-SDAG-NEXT: s_mov_b32 s3, 0
+; GFX11-SDAG-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v1.l, v2.l, s2 clamp
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:2048 glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-SDAG-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s3
+; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x10
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_store_b16 v0, v1, s[0:1]
+; GFX11-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_16:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_load_b32 s3, s[0:1], 0x800
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX12-SDAG-NEXT: s_mov_b32 s3, 0
+; GFX12-SDAG-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v1, v2, s2 clamp
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-SDAG-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s3
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x10
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b16 v0, v1, s[0:1]
+; GFX12-SDAG-NEXT: s_endpgm
+ %gep = getelementptr i16, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i16 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ store i16 %ret, ptr addrspace(1) %dst
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_sat_sgpr_base_offset_nortn_16(ptr addrspace(1) %ptr, i16 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_16:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-GISEL-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, 0xffff0000
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_load_dword s5, s[0:1], 0x800
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-GISEL-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v0, v1, s4 clamp
+; GFX9-GISEL-NEXT: v_and_or_b32 v0, v1, v2, v0
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v0, v3, v[0:1], s[0:1] offset:2048 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_16:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_clause 0x1
+; GFX10-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-GISEL-NEXT: s_load_dword s2, s[8:9], 0x8
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v2, 0x800
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: s_load_dword s3, s[0:1], 0x800
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-GISEL-NEXT: s_mov_b32 s3, 0
+; GFX10-GISEL-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v0, v1, s2 clamp
+; GFX10-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX10-GISEL-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX10-GISEL-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB11_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_16:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-GISEL-NEXT: s_load_b32 s2, s[4:5], 0x8
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_load_b32 s3, s[0:1], 0x800
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX11-GISEL-NEXT: s_mov_b32 s3, 0
+; GFX11-GISEL-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v0.l, v1.l, s2 clamp
+; GFX11-GISEL-NEXT: v_mov_b16_e32 v0.h, 0
+; GFX11-GISEL-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:2048 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-GISEL-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_16:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_load_b32 s3, s[0:1], 0x800
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX12-GISEL-NEXT: s_mov_b32 s3, 0
+; GFX12-GISEL-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v0, v1, s2 clamp
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_endpgm
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_16:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-SDAG-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-SDAG-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, 0xffff0000
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_load_dword s6, s[0:1], 0x800
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-SDAG-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_sub_u16_e64 v0, v1, s4 clamp
+; GFX9-SDAG-NEXT: v_and_or_b32 v0, v1, s5, v0
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:2048 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_clause 0x1
+; GFX10-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-SDAG-NEXT: s_load_dword s2, s[8:9], 0x8
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_load_dword s3, s[0:1], 0x800
+; GFX10-SDAG-NEXT: s_add_u32 s0, s0, 0x800
+; GFX10-SDAG-NEXT: s_addc_u32 s1, s1, 0
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0
+; GFX10-SDAG-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v0, v1, s2 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX10-SDAG-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX10-SDAG-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB11_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_clause 0x1
+; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x8
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_load_b32 s3, s[0:1], 0x800
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX11-SDAG-NEXT: s_mov_b32 s3, 0
+; GFX11-SDAG-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v0.l, v1.l, s2 clamp
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v0.h, 0
+; GFX11-SDAG-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:2048 glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-SDAG-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_16:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_load_b32 s3, s[0:1], 0x800
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX12-SDAG-NEXT: s_mov_b32 s3, 0
+; GFX12-SDAG-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v0, v1, s2 clamp
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_endpgm
+ %gep = getelementptr i16, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i16 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+
+define i8 @global_atomic_usub_sat_8(ptr addrspace(1) %ptr, i8 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_8:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v2, 8, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v5, 0xffffff00
+; GFX9-GISEL-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v3, 8, v7
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v3, v3, v2 clamp
+; GFX9-GISEL-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-GISEL-NEXT: v_and_or_b32 v6, v7, v5, v3
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[6:7], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v7
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_8:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v2, 8, v2
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX10-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX10-GISEL-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v6, v3
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v3, 8, v6
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v3, v3, v2 clamp
+; GFX10-GISEL-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-GISEL-NEXT: v_and_or_b32 v5, 0xffffff00, v6, v3
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[5:6], off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX10-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB12_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_8:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v2.l, 8, v2.l
+; GFX11-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX11-GISEL-NEXT: .p2align 6
+; GFX11-GISEL-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v2.h, 8, v4.l
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v2.h, v2.h, v2.l clamp
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_lshrrev_b16 v3.l, 8, v2.h
+; GFX11-GISEL-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_8:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v2, 8, v2
+; GFX12-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX12-GISEL-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v3, 8, v4
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v3, v3, v2 clamp
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshrrev_b16 v3, 8, v3
+; GFX12-GISEL-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_8:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: s_movk_i32 s6, 0xff00
+; GFX9-SDAG-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u16_sdwa v3, v4, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_or_b32 v3, v4, s6, v3
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_8:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-SDAG-NEXT: s_mov_b32 s4, 0
+; GFX10-SDAG-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v3, v3, v2 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX10-SDAG-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-SDAG-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB12_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_8:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-SDAG-NEXT: v_and_b16 v2.l, 0xff, v2.l
+; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX11-SDAG-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_and_b16 v2.h, 0xff, v4.l
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v3.l, v2.h, v2.l clamp
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_8:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX12-SDAG-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v3, v3, v2 clamp
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-SDAG-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret i8 %ret
+}
+
+define i8 @global_atomic_usub_sat_offset_8(ptr addrspace(1) %ptr, i8 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_offset_8:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off offset:1024
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v2, 8, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v5, 0xffffff00
+; GFX9-GISEL-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v3, 8, v7
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v3, v3, v2 clamp
+; GFX9-GISEL-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-GISEL-NEXT: v_and_or_b32 v6, v7, v5, v3
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[6:7], off offset:1024 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v7
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB13_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_offset_8:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: global_load_dword v3, v[0:1], off offset:1024
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v2, 8, v2
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX10-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX10-GISEL-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v6, v3
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v3, 8, v6
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v3, v3, v2 clamp
+; GFX10-GISEL-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-GISEL-NEXT: v_and_or_b32 v5, 0xffffff00, v6, v3
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[5:6], off offset:1024 glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX10-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB13_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_offset_8:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_load_b32 v3, v[0:1], off offset:1024
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v2.l, 8, v2.l
+; GFX11-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX11-GISEL-NEXT: .p2align 6
+; GFX11-GISEL-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v2.h, 8, v4.l
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v2.h, v2.h, v2.l clamp
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_lshrrev_b16 v3.l, 8, v2.h
+; GFX11-GISEL-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:1024 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_offset_8:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_load_b32 v3, v[0:1], off offset:1024
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v2, 8, v2
+; GFX12-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX12-GISEL-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v3, 8, v4
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v3, v3, v2 clamp
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshrrev_b16 v3, 8, v3
+; GFX12-GISEL-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:1024 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_offset_8:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off offset:1024
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: s_movk_i32 s6, 0xff00
+; GFX9-SDAG-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u16_sdwa v3, v4, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_or_b32 v3, v4, s6, v3
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:1024 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB13_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_offset_8:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: global_load_dword v3, v[0:1], off offset:1024
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-SDAG-NEXT: s_mov_b32 s4, 0
+; GFX10-SDAG-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v3, v3, v2 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX10-SDAG-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:1024 glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX10-SDAG-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB13_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_offset_8:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_load_b32 v3, v[0:1], off offset:1024
+; GFX11-SDAG-NEXT: v_and_b16 v2.l, 0xff, v2.l
+; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX11-SDAG-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_and_b16 v2.h, 0xff, v4.l
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v3.l, v2.h, v2.l clamp
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:1024 glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_offset_8:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_load_b32 v3, v[0:1], off offset:1024
+; GFX12-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX12-SDAG-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v3, v3, v2 clamp
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-SDAG-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:1024 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i8, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i8 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret i8 %ret
+}
+
+define void @global_atomic_usub_sat_nortn_8(ptr addrspace(1) %ptr, i8 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_nortn_8:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v4, 8, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v5, 0xff
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v6, 0xffffff00
+; GFX9-GISEL-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v2, 8, v3
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v2, v2, v4 clamp
+; GFX9-GISEL-NEXT: v_and_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-GISEL-NEXT: v_and_or_b32 v2, v3, v6, v2
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_nortn_8:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v4, 8, v2
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v5, 0xff
+; GFX10-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX10-GISEL-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v2, 8, v3
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v2, v2, v4 clamp
+; GFX10-GISEL-NEXT: v_and_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-GISEL-NEXT: v_and_or_b32 v2, 0xffffff00, v3, v2
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB14_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_nortn_8:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v2.l, 8, v2.l
+; GFX11-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX11-GISEL-NEXT: .p2align 6
+; GFX11-GISEL-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v2.h, 8, v4.l
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v2.h, v2.h, v2.l clamp
+; GFX11-GISEL-NEXT: v_lshrrev_b16 v3.l, 8, v2.h
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-GISEL-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_nortn_8:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v4, 8, v2
+; GFX12-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX12-GISEL-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v2, 8, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v2, v2, v4 clamp
+; GFX12-GISEL-NEXT: v_lshrrev_b16 v2, 8, v2
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-GISEL-NEXT: v_and_or_b32 v2, 0xffffff00, v3, v2
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_nortn_8:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v4, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: s_movk_i32 s6, 0xff00
+; GFX9-SDAG-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_sub_u16_sdwa v3, v4, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_or_b32 v3, v4, s6, v3
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_nortn_8:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX10-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX10-SDAG-NEXT: s_mov_b32 s4, 0
+; GFX10-SDAG-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v2, v2, v4 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX10-SDAG-NEXT: v_and_or_b32 v2, 0xffffff00, v3, v2
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-SDAG-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB14_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_nortn_8:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-SDAG-NEXT: v_and_b16 v2.l, 0xff, v2.l
+; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX11-SDAG-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: v_and_b16 v2.h, 0xff, v4.l
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v3.l, v2.h, v2.l clamp
+; GFX11-SDAG-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_nortn_8:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX12-SDAG-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v2, v2, v4 clamp
+; GFX12-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_or_b32 v2, 0xffffff00, v3, v2
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define void @global_atomic_usub_sat_offset_nortn_8(ptr addrspace(1) %ptr, i8 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_offset_nortn_8:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off offset:1024
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v4, 8, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v5, 0xff
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v6, 0xffffff00
+; GFX9-GISEL-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v2, 8, v3
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v2, v2, v4 clamp
+; GFX9-GISEL-NEXT: v_and_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-GISEL-NEXT: v_and_or_b32 v2, v3, v6, v2
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:1024 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_offset_nortn_8:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: global_load_dword v3, v[0:1], off offset:1024
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v4, 8, v2
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v5, 0xff
+; GFX10-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX10-GISEL-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v2, 8, v3
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v2, v2, v4 clamp
+; GFX10-GISEL-NEXT: v_and_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-GISEL-NEXT: v_and_or_b32 v2, 0xffffff00, v3, v2
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:1024 glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB15_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_offset_nortn_8:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_load_b32 v4, v[0:1], off offset:1024
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v2.l, 8, v2.l
+; GFX11-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX11-GISEL-NEXT: .p2align 6
+; GFX11-GISEL-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v2.h, 8, v4.l
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v2.h, v2.h, v2.l clamp
+; GFX11-GISEL-NEXT: v_lshrrev_b16 v3.l, 8, v2.h
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-GISEL-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:1024 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_offset_nortn_8:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_load_b32 v3, v[0:1], off offset:1024
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v4, 8, v2
+; GFX12-GISEL-NEXT: s_mov_b32 s0, 0
+; GFX12-GISEL-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v2, 8, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v2, v2, v4 clamp
+; GFX12-GISEL-NEXT: v_lshrrev_b16 v2, 8, v2
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-GISEL-NEXT: v_and_or_b32 v2, 0xffffff00, v3, v2
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:1024 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_offset_nortn_8:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v4, v[0:1], off offset:1024
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: s_movk_i32 s6, 0xff00
+; GFX9-SDAG-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_sub_u16_sdwa v3, v4, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_or_b32 v3, v4, s6, v3
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:1024 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_offset_nortn_8:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: global_load_dword v3, v[0:1], off offset:1024
+; GFX10-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX10-SDAG-NEXT: s_mov_b32 s4, 0
+; GFX10-SDAG-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v2, v2, v4 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX10-SDAG-NEXT: v_and_or_b32 v2, 0xffffff00, v3, v2
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:1024 glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-SDAG-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB15_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_offset_nortn_8:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_load_b32 v4, v[0:1], off offset:1024
+; GFX11-SDAG-NEXT: v_and_b16 v2.l, 0xff, v2.l
+; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX11-SDAG-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: v_and_b16 v2.h, 0xff, v4.l
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v3.l, v2.h, v2.l clamp
+; GFX11-SDAG-NEXT: v_and_or_b32 v3, 0xffffff00, v4, v3
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:1024 glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_offset_nortn_8:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_load_b32 v3, v[0:1], off offset:1024
+; GFX12-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX12-SDAG-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v2, v2, v4 clamp
+; GFX12-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_or_b32 v2, 0xffffff00, v3, v2
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:1024 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i8, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i8 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_sat_sgpr_base_offset_8(ptr addrspace(1) %ptr, i8 %data, ptr addrspace(1) %dst) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_8:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-GISEL-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0xff
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0xffffff00
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_load_dword s5, s[0:1], 0x400
+; GFX9-GISEL-NEXT: s_lshl_b32 s4, s4, 8
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-GISEL-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v3, 8, v4
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v3, v3, s4 clamp
+; GFX9-GISEL-NEXT: v_and_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-GISEL-NEXT: v_and_or_b32 v3, v4, v1, v3
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v2, v[3:4], s[0:1] offset:1024 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB16_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_store_byte v0, v3, s[0:1]
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_8:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_clause 0x1
+; GFX10-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-GISEL-NEXT: s_load_dword s3, s[8:9], 0x8
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, 0xff
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: s_load_dword s2, s[0:1], 0x400
+; GFX10-GISEL-NEXT: s_lshl_b32 s3, s3, 8
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX10-GISEL-NEXT: s_mov_b32 s2, 0
+; GFX10-GISEL-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v2, 8, v3
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v2, v2, s3 clamp
+; GFX10-GISEL-NEXT: v_and_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-GISEL-NEXT: v_and_or_b32 v2, 0xffffff00, v3, v2
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v2, v0, v[2:3], s[0:1] offset:1024 glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX10-GISEL-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB16_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX10-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: global_store_byte v0, v2, s[0:1]
+; GFX10-GISEL-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_8:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-GISEL-NEXT: s_load_b32 s3, s[4:5], 0x8
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_load_b32 s2, s[0:1], 0x400
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-GISEL-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-GISEL-NEXT: s_mov_b32 s2, 0
+; GFX11-GISEL-NEXT: .p2align 6
+; GFX11-GISEL-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v1.l, 8, v2.l
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v1.l, v1.l, s3 clamp
+; GFX11-GISEL-NEXT: v_lshrrev_b16 v1.l, 8, v1.l
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-GISEL-NEXT: v_and_or_b32 v1, 0xffffff00, v2, v1
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:1024 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-GISEL-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x10
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: global_store_b8 v0, v1, s[0:1]
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_8:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_load_b32 s3, s[0:1], 0x400
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-GISEL-NEXT: s_lshl_b32 s2, s2, 8
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX12-GISEL-NEXT: s_mov_b32 s3, 0
+; GFX12-GISEL-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v1, 8, v2
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v1, v1, s2 clamp
+; GFX12-GISEL-NEXT: v_lshrrev_b16 v1, 8, v1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-GISEL-NEXT: v_and_or_b32 v1, 0xffffff00, v2, v1
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:1024 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s3
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x10
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b8 v0, v1, s[0:1]
+; GFX12-GISEL-NEXT: s_endpgm
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_8:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-SDAG-NEXT: s_load_dword s5, s[8:9], 0x8
+; GFX9-SDAG-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-SDAG-NEXT: s_movk_i32 s4, 0xff00
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_load_dword s6, s[0:1], 0x400
+; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0xff
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-SDAG-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-SDAG-NEXT: v_sub_u16_sdwa v0, v3, s5 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-SDAG-NEXT: v_and_or_b32 v2, v3, s4, v0
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:1024 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-SDAG-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB16_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_store_byte v1, v0, s[0:1]
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_8:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_clause 0x1
+; GFX10-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-SDAG-NEXT: s_load_dword s3, s[8:9], 0x8
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_load_dword s2, s[0:1], 0x400
+; GFX10-SDAG-NEXT: s_and_b32 s3, s3, 0xff
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s2, 0
+; GFX10-SDAG-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v1, v1, s3 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-SDAG-NEXT: v_and_or_b32 v1, 0xffffff00, v2, v1
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[0:1] offset:1024 glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX10-SDAG-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB16_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX10-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x10
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: global_store_byte v0, v1, s[0:1]
+; GFX10-SDAG-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_8:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_clause 0x1
+; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-SDAG-NEXT: s_load_b32 s3, s[4:5], 0x8
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_load_b32 s2, s[0:1], 0x400
+; GFX11-SDAG-NEXT: s_and_b32 s3, s3, 0xff
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s2, 0
+; GFX11-SDAG-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX11-SDAG-NEXT: v_and_b16 v1.l, 0xff, v2.l
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v1.l, v1.l, s3 clamp
+; GFX11-SDAG-NEXT: v_and_or_b32 v1, 0xffffff00, v2, v1
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:1024 glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-SDAG-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x10
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: global_store_b8 v0, v1, s[0:1]
+; GFX11-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_8:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_load_b32 s3, s[0:1], 0x400
+; GFX12-SDAG-NEXT: s_and_b32 s2, s2, 0xff
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX12-SDAG-NEXT: s_mov_b32 s3, 0
+; GFX12-SDAG-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v1, v1, s2 clamp
+; GFX12-SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_or_b32 v1, 0xffffff00, v2, v1
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:1024 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s3
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x10
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b8 v0, v1, s[0:1]
+; GFX12-SDAG-NEXT: s_endpgm
+ %gep = getelementptr i8, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i8 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ store i8 %ret, ptr addrspace(1) %dst
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_usub_sat_sgpr_base_offset_nortn_8(ptr addrspace(1) %ptr, i8 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_8:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-GISEL-NEXT: s_load_dword s4, s[8:9], 0x8
+; GFX9-GISEL-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, 0xffffff00
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: s_load_dword s5, s[0:1], 0x400
+; GFX9-GISEL-NEXT: s_lshl_b32 s4, s4, 8
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-GISEL-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: v_lshlrev_b16_e32 v0, 8, v1
+; GFX9-GISEL-NEXT: v_sub_u16_e64 v0, v0, s4 clamp
+; GFX9-GISEL-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-GISEL-NEXT: v_and_or_b32 v0, v1, v3, v0
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v0, v4, v[0:1], s[0:1] offset:1024 glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-GISEL-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_8:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_clause 0x1
+; GFX10-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-GISEL-NEXT: s_load_dword s3, s[8:9], 0x8
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v3, 0xff
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: s_load_dword s2, s[0:1], 0x400
+; GFX10-GISEL-NEXT: s_lshl_b32 s3, s3, 8
+; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, s2
+; GFX10-GISEL-NEXT: s_mov_b32 s2, 0
+; GFX10-GISEL-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX10-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-GISEL-NEXT: v_lshlrev_b16 v0, 8, v1
+; GFX10-GISEL-NEXT: v_sub_nc_u16 v0, v0, s3 clamp
+; GFX10-GISEL-NEXT: v_and_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-GISEL-NEXT: v_and_or_b32 v0, 0xffffff00, v1, v0
+; GFX10-GISEL-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:1024 glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX10-GISEL-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX10-GISEL-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX10-GISEL-NEXT: s_cbranch_execnz .LBB17_1
+; GFX10-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-GISEL-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_8:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-GISEL-NEXT: s_load_b32 s3, s[4:5], 0x8
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_load_b32 s2, s[0:1], 0x400
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-GISEL-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-GISEL-NEXT: s_mov_b32 s2, 0
+; GFX11-GISEL-NEXT: .p2align 6
+; GFX11-GISEL-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_lshlrev_b16 v0.l, 8, v1.l
+; GFX11-GISEL-NEXT: v_sub_nc_u16 v0.l, v0.l, s3 clamp
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_lshrrev_b16 v0.l, 8, v0.l
+; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_and_or_b32 v0, 0xffffff00, v1, v0
+; GFX11-GISEL-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:1024 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-GISEL-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_8:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_load_b32 s3, s[0:1], 0x400
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-GISEL-NEXT: s_lshl_b32 s2, s2, 8
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX12-GISEL-NEXT: s_mov_b32 s3, 0
+; GFX12-GISEL-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b16 v0, 8, v1
+; GFX12-GISEL-NEXT: v_sub_nc_u16 v0, v0, s2 clamp
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshrrev_b16 v0, 8, v0
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_and_or_b32 v0, 0xffffff00, v1, v0
+; GFX12-GISEL-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:1024 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, v0
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX12-GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX12-GISEL-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-GISEL-NEXT: s_endpgm
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_8:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX9-SDAG-NEXT: s_load_dword s5, s[8:9], 0x8
+; GFX9-SDAG-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-SDAG-NEXT: s_movk_i32 s4, 0xff00
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: s_load_dword s6, s[0:1], 0x400
+; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0xff
+; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-SDAG-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: v_sub_u16_sdwa v0, v1, s5 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-SDAG-NEXT: v_and_or_b32 v0, v1, s4, v0
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:1024 glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_endpgm
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_8:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_clause 0x1
+; GFX10-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-SDAG-NEXT: s_load_dword s3, s[8:9], 0x8
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_load_dword s2, s[0:1], 0x400
+; GFX10-SDAG-NEXT: s_and_b32 s3, s3, 0xff
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s2, 0
+; GFX10-SDAG-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX10-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v1
+; GFX10-SDAG-NEXT: v_sub_nc_u16 v0, v0, s3 clamp
+; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX10-SDAG-NEXT: v_and_or_b32 v0, 0xffffff00, v1, v0
+; GFX10-SDAG-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:1024 glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX10-SDAG-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX10-SDAG-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX10-SDAG-NEXT: s_cbranch_execnz .LBB17_1
+; GFX10-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-SDAG-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_8:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_clause 0x1
+; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-SDAG-NEXT: s_load_b32 s3, s[4:5], 0x8
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_load_b32 s2, s[0:1], 0x400
+; GFX11-SDAG-NEXT: s_and_b32 s3, s3, 0xff
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s2, 0
+; GFX11-SDAG-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_and_b16 v0.l, 0xff, v1.l
+; GFX11-SDAG-NEXT: v_mov_b16_e32 v0.h, 0
+; GFX11-SDAG-NEXT: v_sub_nc_u16 v0.l, v0.l, s3 clamp
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_and_or_b32 v0, 0xffffff00, v1, v0
+; GFX11-SDAG-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:1024 glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-SDAG-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-SDAG-NEXT: s_endpgm
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn_8:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_load_b32 s3, s[0:1], 0x400
+; GFX12-SDAG-NEXT: s_and_b32 s2, s2, 0xff
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX12-SDAG-NEXT: s_mov_b32 s3, 0
+; GFX12-SDAG-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v1
+; GFX12-SDAG-NEXT: v_sub_nc_u16 v0, v0, s2 clamp
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-SDAG-NEXT: v_and_or_b32 v0, 0xffffff00, v1, v0
+; GFX12-SDAG-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:1024 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, v0
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_or_b32 s3, vcc_lo, s3
+; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12-SDAG-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
+; GFX12-SDAG-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-SDAG-NEXT: s_endpgm
+ %gep = getelementptr i8, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i8 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define i32 @global_atomic_usub_sat__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_remote_memory:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB18_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_remote_memory:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_remote_memory:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_remote_memory:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_remote_memory:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB18_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_remote_memory:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_remote_memory:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_remote_memory:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
+ ret i32 %ret
+}
+
+define i32 @global_atomic_usub_sat__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB19_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB19_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
+ ret i32 %ret
+}
+
+define i32 @global_atomic_usub_sat__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %data) {
+; GFX9-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-GISEL-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX9-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-GISEL-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-GISEL-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: buffer_wbinvl1_vol
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: s_cbranch_execnz .LBB20_1
+; GFX9-GISEL-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-GISEL-NEXT: buffer_gl1_inv
+; GFX10-GISEL-NEXT: buffer_gl0_inv
+; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-GISEL-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: buffer_gl1_inv
+; GFX11-GISEL-NEXT: buffer_gl0_inv
+; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-GISEL-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT: global_load_dword v3, v[0:1], off
+; GFX9-SDAG-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-SDAG-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX9-SDAG-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX9-SDAG-NEXT: v_sub_u32_e64 v3, v4, v2 clamp
+; GFX9-SDAG-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
+; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX9-SDAG-NEXT: buffer_wbinvl1_vol
+; GFX9-SDAG-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
+; GFX9-SDAG-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-SDAG-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB20_1
+; GFX9-SDAG-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-SDAG-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX10-SDAG-NEXT: buffer_gl1_inv
+; GFX10-SDAG-NEXT: buffer_gl0_inv
+; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: buffer_gl1_inv
+; GFX11-SDAG-NEXT: buffer_gl0_inv
+; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-SDAG-LABEL: global_atomic_usub_sat__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i32 %ret
+}
+
+attributes #0 = { nounwind willreturn }
+attributes #1 = { argmemonly nounwind }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll b/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
index 3266fde10f9fb..4af2d58b01518 100644
--- a/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
@@ -94,16 +94,22 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32(ptr addrspace(1) %a
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
-; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v0, v1, s[0:1] offset:-16 th:TH_ATOMIC_RETURN
+; GFX12-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], -16
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-SDAG-NEXT: flat_atomic_cond_sub_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: global_atomic_cond_sub_no_rtn_u32:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v1, v0, s[0:1] offset:-16 th:TH_ATOMIC_RETURN
+; GFX12-GISEL-NEXT: s_add_co_u32 s0, s0, -16
+; GFX12-GISEL-NEXT: s_add_co_ci_u32 s1, s1, -1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %addr, i32 -4
@@ -116,16 +122,22 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32_forced(ptr addrspac
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
-; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v1, s[0:1] offset:-16
+; GFX12-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], -16
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-SDAG-NEXT: flat_atomic_cond_sub_u32 v[0:1], v2
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: global_atomic_cond_sub_no_rtn_u32_forced:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v1, v0, s[0:1] offset:-16
+; GFX12-GISEL-NEXT: s_add_co_u32 s0, s0, -16
+; GFX12-GISEL-NEXT: s_add_co_ci_u32 s1, s1, -1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v[0:1], v2
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %addr, i32 -4
@@ -136,14 +148,17 @@ entry:
define amdgpu_kernel void @global_atomic_cond_sub_rtn_u32(ptr addrspace(1) %addr, i32 %in, ptr addrspace(1) %use) {
; GFX12-SDAG-LABEL: global_atomic_cond_sub_rtn_u32:
; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_clause 0x1
; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 0
; GFX12-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s2
-; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v1, v0, v1, s[0:1] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 16
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-SDAG-NEXT: flat_atomic_cond_sub_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[4:5]
+; GFX12-SDAG-NEXT: global_store_b32 v1, v0, s[4:5]
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: global_atomic_cond_sub_rtn_u32:
@@ -152,8 +167,13 @@ define amdgpu_kernel void @global_atomic_cond_sub_rtn_u32(ptr addrspace(1) %addr
; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
; GFX12-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v1, v0, s[0:1] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-GISEL-NEXT: s_add_co_u32 s0, s0, 16
+; GFX12-GISEL-NEXT: s_add_co_ci_u32 s1, s1, 0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX12-GISEL-NEXT: global_store_b32 v1, v0, s[4:5]
; GFX12-GISEL-NEXT: s_endpgm
@@ -172,7 +192,7 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32(ptr addrspace(3) %addr, i32 %i
; GFX12-SDAG-NEXT: s_add_co_i32 s0, s0, -16
; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
-; GFX12-SDAG-NEXT: ds_cond_sub_rtn_u32 v0, v0, v1
+; GFX12-SDAG-NEXT: ds_cond_sub_u32 v0, v1
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: ds_cond_sub_no_rtn_u32:
@@ -182,7 +202,7 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32(ptr addrspace(3) %addr, i32 %i
; GFX12-GISEL-NEXT: s_add_co_u32 s0, s0, -16
; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
-; GFX12-GISEL-NEXT: ds_cond_sub_rtn_u32 v0, v0, v1
+; GFX12-GISEL-NEXT: ds_cond_sub_u32 v0, v1
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(3) %addr, i32 -4
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-usub_cond.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-usub_cond.ll
new file mode 100644
index 0000000000000..25bad218926f3
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-usub_cond.ll
@@ -0,0 +1,271 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+
+
+; --------------------------------------------------------------------
+; i32
+; --------------------------------------------------------------------
+
+define i32 @buffer_fat_ptr_agent_atomic_usub_cond_ret_u32__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_usub_cond_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_usub_cond_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
+; GFX11-NEXT: v_mov_b32_e32 v0, s16
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_sub_nc_u32_e32 v0, v5, v2
+; GFX11-NEXT: v_cmp_ge_u32_e32 vcc_lo, v5, v2
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %result = atomicrmw usub_cond ptr addrspace(7) %gep, i32 %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i32 %result
+}
+
+define void @buffer_fat_ptr_agent_atomic_usub_cond_noret_u32__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_usub_cond_noret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_usub_cond_noret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s16
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024
+; GFX11-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_sub_nc_u32_e32 v1, v2, v0
+; GFX11-NEXT: v_cmp_ge_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc_lo
+; GFX11-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %unused = atomicrmw usub_cond ptr addrspace(7) %gep, i32 %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define i32 @buffer_fat_ptr_agent_atomic_usub_cond_ret_u32__offset__amdgpu_no_remote_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_usub_cond_ret_u32__offset__amdgpu_no_remote_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_usub_cond_ret_u32__offset__amdgpu_no_remote_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
+; GFX11-NEXT: v_mov_b32_e32 v0, s16
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_sub_nc_u32_e32 v0, v5, v2
+; GFX11-NEXT: v_cmp_ge_u32_e32 vcc_lo, v5, v2
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %result = atomicrmw usub_cond ptr addrspace(7) %gep, i32 %val syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret i32 %result
+}
+
+define i32 @buffer_fat_ptr_agent_atomic_usub_cond_ret_u32__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_usub_cond_ret_u32__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_usub_cond_ret_u32__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
+; GFX11-NEXT: v_mov_b32_e32 v0, s16
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_sub_nc_u32_e32 v0, v5, v2
+; GFX11-NEXT: v_cmp_ge_u32_e32 vcc_lo, v5, v2
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %result = atomicrmw usub_cond ptr addrspace(7) %gep, i32 %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i32 %result
+}
+
+; --------------------------------------------------------------------
+; misc
+; --------------------------------------------------------------------
+
+define i32 @buffer_fat_ptr_system_atomic_usub_cond_ret_u32__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_system_atomic_usub_cond_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_system_atomic_usub_cond_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
+; GFX11-NEXT: v_mov_b32_e32 v0, s16
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_sub_nc_u32_e32 v0, v5, v2
+; GFX11-NEXT: v_cmp_ge_u32_e32 vcc_lo, v5, v2
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %result = atomicrmw usub_cond ptr addrspace(7) %gep, i32 %val seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i32 %result
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-usub_sat.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-usub_sat.ll
new file mode 100644
index 0000000000000..836830cfa418a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-usub_sat.ll
@@ -0,0 +1,301 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s
+
+; --------------------------------------------------------------------
+; i32
+; --------------------------------------------------------------------
+
+define i32 @buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_sub_clamp_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s16
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: buffer_atomic_csub_u32 v0, v1, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: v_mov_b32_e32 v0, s20
+; GFX9-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s20
+; GFX9-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_sub_u32_e64 v4, v5, v2 clamp
+; GFX9-NEXT: v_mov_b32_e32 v0, v4
+; GFX9-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB0_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %result = atomicrmw usub_sat ptr addrspace(7) %gep, i32 %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i32 %result
+}
+
+define void @buffer_fat_ptr_agent_atomic_usub_sat_noret_u32__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_noret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_sub_clamp_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_noret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s16
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: buffer_atomic_csub_u32 v0, v1, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_noret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s20
+; GFX9-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s20
+; GFX9-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_sub_u32_e64 v1, v2, v0 clamp
+; GFX9-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB1_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %unused = atomicrmw usub_sat ptr addrspace(7) %gep, i32 %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret void
+}
+
+define i32 @buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_remote_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_remote_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_sub_clamp_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_remote_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s16
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: buffer_atomic_csub_u32 v0, v1, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_remote_memory:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: v_mov_b32_e32 v0, s20
+; GFX9-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s20
+; GFX9-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_sub_u32_e64 v4, v5, v2 clamp
+; GFX9-NEXT: v_mov_b32_e32 v0, v4
+; GFX9-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB2_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %result = atomicrmw usub_sat ptr addrspace(7) %gep, i32 %val syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret i32 %result
+}
+
+define i32 @buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_sub_clamp_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s16
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: buffer_atomic_csub_u32 v0, v1, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: buffer_fat_ptr_agent_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: v_mov_b32_e32 v0, s20
+; GFX9-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s20
+; GFX9-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_sub_u32_e64 v4, v5, v2 clamp
+; GFX9-NEXT: v_mov_b32_e32 v0, v4
+; GFX9-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB3_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %result = atomicrmw usub_sat ptr addrspace(7) %gep, i32 %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i32 %result
+}
+
+; --------------------------------------------------------------------
+; misc
+; --------------------------------------------------------------------
+
+define i32 @buffer_fat_ptr_system_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, i32 %val) #0 {
+; GFX12-LABEL: buffer_fat_ptr_system_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s16
+; GFX12-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: buffer_atomic_sub_clamp_u32 v0, v1, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: buffer_fat_ptr_system_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s16
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: buffer_atomic_csub_u32 v0, v1, s[0:3], 0 offen offset:1024 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: buffer_fat_ptr_system_atomic_usub_sat_ret_u32__offset__amdgpu_no_fine_grained_memory:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: v_mov_b32_e32 v0, s20
+; GFX9-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s20
+; GFX9-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_sub_u32_e64 v4, v5, v2 clamp
+; GFX9-NEXT: v_mov_b32_e32 v0, v4
+; GFX9-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB4_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(7) %ptr, i32 256
+ %result = atomicrmw usub_sat ptr addrspace(7) %gep, i32 %val seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i32 %result
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
index 81f768f303ca1..8e6e19f4de82a 100644
--- a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
+++ b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
@@ -7,12 +7,12 @@
define amdgpu_kernel void @test_sink_small_offset_global_atomic_csub_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
; OPT-LABEL: @test_sink_small_offset_global_atomic_csub_i32(
; OPT-NEXT: entry:
-; OPT-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #[[ATTR3:[0-9]+]]
+; OPT-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #[[ATTR2:[0-9]+]]
; OPT-NEXT: [[CMP:%.*]] = icmp eq i32 [[TID]], 0
; OPT-NEXT: br i1 [[CMP]], label [[ENDIF:%.*]], label [[IF:%.*]]
; OPT: if:
; OPT-NEXT: [[IN_GEP:%.*]] = getelementptr i32, ptr addrspace(1) [[IN:%.*]], i32 7
-; OPT-NEXT: [[VAL:%.*]] = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) [[IN_GEP]], i32 2)
+; OPT-NEXT: [[VAL:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[IN_GEP]], i32 2 seq_cst, align 4
; OPT-NEXT: br label [[ENDIF]]
; OPT: endif:
; OPT-NEXT: [[X:%.*]] = phi i32 [ [[VAL]], [[IF]] ], [ 0, [[ENTRY:%.*]] ]
@@ -35,10 +35,13 @@ define amdgpu_kernel void @test_sink_small_offset_global_atomic_csub_i32(ptr add
; GCN-NEXT: v_mov_b32_e32 v1, 2
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] offset:28 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: .LBB0_2: ; %endif
; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GCN-NEXT: v_mov_b32_e32 v1, 0x3d0800
-; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_store_dword v1, v0, s[0:1] offset:252
; GCN-NEXT: s_endpgm
entry:
@@ -48,7 +51,7 @@ entry:
if:
%in.gep = getelementptr i32, ptr addrspace(1) %in, i32 7
- %val = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %in.gep, i32 2)
+ %val = atomicrmw usub_sat ptr addrspace(1) %in.gep, i32 2 seq_cst, !amdgpu.no.remote.memory !0
br label %endif
endif:
@@ -61,9 +64,10 @@ done:
ret void
}
-declare i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) nocapture, i32) #0
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
attributes #0 = { argmemonly nounwind }
attributes #1 = { nounwind readnone willreturn }
attributes #2 = { argmemonly nounwind willreturn }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
index 79de55eb63bf8..8dff14f1b41a0 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
@@ -10,10 +10,12 @@ define amdgpu_ps float @global_csub_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GCN: ; %bb.0:
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] glc
; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep0, i32 %data)
+ %rtn = atomicrmw usub_sat ptr addrspace(1) %gep0, i32 %data seq_cst, !amdgpu.no.remote.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -23,11 +25,13 @@ define amdgpu_ps float @global_csub_saddr_i32_rtn_neg128(ptr addrspace(1) inreg
; GCN: ; %bb.0:
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] offset:-128 glc
; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep1, i32 %data)
+ %rtn = atomicrmw usub_sat ptr addrspace(1) %gep1, i32 %data seq_cst, !amdgpu.no.remote.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -36,10 +40,13 @@ define amdgpu_ps void @global_csub_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GCN-LABEL: global_csub_saddr_i32_nortn:
; GCN: ; %bb.0:
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep0, i32 %data)
+ %unused = atomicrmw usub_sat ptr addrspace(1) %gep0, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -47,14 +54,19 @@ define amdgpu_ps void @global_csub_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GCN-LABEL: global_csub_saddr_i32_nortn_neg128:
; GCN: ; %bb.0:
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] offset:-128 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep1, i32 %data)
+ %unused = atomicrmw usub_sat ptr addrspace(1) %gep1, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret void
}
declare i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) nocapture, i32) #0
attributes #0 = { argmemonly nounwind willreturn }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
index b05f141b76009..49db2eec83ed9 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
@@ -2,14 +2,12 @@
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1031 | FileCheck %s -check-prefixes=GCN,PREGFX12
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 | FileCheck %s -check-prefixes=GCN,GFX12PLUS
-declare i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1), i32)
-
; GCN-LABEL: {{^}}global_atomic_csub_rtn:
; PREGFX12: global_atomic_csub v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9:]+}}, s{{\[[0-9]+:[0-9]+\]}} glc
; GFX12PLUS: global_atomic_sub_clamp_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} th:TH_ATOMIC_RETURN
define amdgpu_kernel void @global_atomic_csub_rtn(ptr addrspace(1) %ptr, i32 %data) {
main_body:
- %ret = call i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1) %ptr, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -18,7 +16,7 @@ main_body:
; GFX12PLUS: global_atomic_sub_clamp_u32 v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}
define amdgpu_kernel void @global_atomic_csub_no_rtn(ptr addrspace(1) %ptr, i32 %data) #0 {
main_body:
- %ret = call i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1) %ptr, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -28,7 +26,7 @@ main_body:
define amdgpu_kernel void @global_atomic_csub_off4_rtn(ptr addrspace(1) %ptr, i32 %data) {
main_body:
%p = getelementptr i32, ptr addrspace(1) %ptr, i64 1
- %ret = call i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1) %p, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %p, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -38,8 +36,10 @@ main_body:
define amdgpu_kernel void @global_atomic_csub_off4_no_rtn(ptr addrspace(1) %ptr, i32 %data) #0 {
main_body:
%p = getelementptr i32, ptr addrspace(1) %ptr, i64 1
- %ret = call i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1) %p, i32 %data)
+ %ret = atomicrmw usub_sat ptr addrspace(1) %p, i32 %data seq_cst, !amdgpu.no.remote.memory !0
ret void
}
attributes #0 = { "target-features"="+atomic-csub-no-rtn-insts" }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
index 4ea58a5890d35..b43454840ee16 100644
--- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -630,3 +630,49 @@ define i32 @atomicrmw_dec_private_i32(ptr addrspace(5) %ptr) {
%result = atomicrmw udec_wrap ptr addrspace(5) %ptr, i32 4 seq_cst
ret i32 %result
}
+
+define i32 @atomicrmw_usub_cond_private_i32(ptr addrspace(5) %ptr) {
+; IR-LABEL: @atomicrmw_usub_cond_private_i32(
+; IR-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[PTR:%.*]], align 4
+; IR-NEXT: [[TMP2:%.*]] = icmp uge i32 [[TMP1]], 4
+; IR-NEXT: [[TMP3:%.*]] = sub i32 [[TMP1]], 4
+; IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[TMP1]]
+; IR-NEXT: store i32 [[NEW]], ptr addrspace(5) [[PTR]], align 4
+; IR-NEXT: ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_usub_cond_private_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_add_i32_e32 v2, vcc, -4, v1
+; GCN-NEXT: v_min_u32_e32 v2, v2, v1
+; GCN-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT: v_mov_b32_e32 v0, v1
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw usub_cond ptr addrspace(5) %ptr, i32 4 seq_cst
+ ret i32 %result
+}
+
+define i32 @atomicrmw_usub_sat_private_i32(ptr addrspace(5) %ptr) {
+; IR-LABEL: @atomicrmw_usub_sat_private_i32(
+; IR-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[PTR:%.*]], align 4
+; IR-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[TMP1]], i32 4)
+; IR-NEXT: store i32 [[NEW]], ptr addrspace(5) [[PTR]], align 4
+; IR-NEXT: ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_usub_sat_private_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_max_u32_e32 v2, 4, v1
+; GCN-NEXT: v_add_i32_e32 v2, vcc, -4, v2
+; GCN-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT: v_mov_b32_e32 v0, v1
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw usub_sat ptr addrspace(5) %ptr, i32 4 seq_cst
+ ret i32 %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
index d0377b426cc10..a0b53fb371dcf 100644
--- a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
@@ -12,7 +12,7 @@ define i32 @shl_base_atomicrmw_global_atomic_csub_ptr(ptr addrspace(1) %out, ptr
%cast = ptrtoint ptr addrspace(1) %arrayidx0 to i64
%shl = shl i64 %cast, 2
%castback = inttoptr i64 %shl to ptr addrspace(1)
- %val = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %castback, i32 43)
+ %val = atomicrmw usub_sat ptr addrspace(1) %castback, i32 43 seq_cst, !amdgpu.no.remote.memory !0
store volatile i64 %cast, ptr addrspace(1) %extra.use, align 4
ret i32 %val
}
@@ -20,3 +20,5 @@ define i32 @shl_base_atomicrmw_global_atomic_csub_ptr(ptr addrspace(1) %out, ptr
declare i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) nocapture, i32) #0
attributes #0 = { argmemonly nounwind }
+
+!0 = !{}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
index d821caa5b1c6b..3f55f037aef29 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll
@@ -884,3 +884,236 @@ define bfloat @test_atomicrmw_xchg_bf16_flat_system_align4(ptr %ptr, bfloat %val
%res = atomicrmw xchg ptr %ptr, bfloat %value seq_cst, align 4
ret bfloat %res
}
+
+define i16 @test_atomicrmw_usub_cond_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_global_system(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i16 [[TMP5]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i16 %value seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_cond_i16_global_system_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_global_system_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i16 [[TMP3]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_cond_i16_flat_system(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_flat_system(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i16 [[TMP5]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr %ptr, i16 %value seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_cond_i16_flat_system_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_flat_system_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i16 [[TMP3]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr %ptr, i16 %value seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_global_system(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_global_system(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i16 %value seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_global_system_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_global_system_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i16 %value seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_flat_system(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_flat_system(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i16 %value seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_flat_system_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_flat_system_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i16 %value seq_cst, align 4
+ ret i16 %res
+}
+
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index 72fc4f468543a..b2ba5346318f4 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -140,30 +140,30 @@ define i16 @test_atomicrmw_sub_i16_global_agent(ptr addrspace(1) %ptr, i16 %valu
}
define i16 @test_atomicrmw_and_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent(
-; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
@@ -180,27 +180,26 @@ define i16 @test_atomicrmw_and_i16_global_agent(ptr addrspace(1) %ptr, i16 %valu
; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4(
-; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4(
; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
@@ -208,37 +207,36 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4(ptr addrspace(1) %ptr, i1
; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
ret i16 %res
}
; Drop unknown metadata and noundef
define i16 @test_atomicrmw_and_i16_global_agent_drop_md(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_drop_md(
-; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_drop_md(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_drop_md(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
@@ -255,28 +253,27 @@ define i16 @test_atomicrmw_and_i16_global_agent_drop_md(ptr addrspace(1) %ptr, i
; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !noundef !0, !some.unknown.md !0
ret i16 %res
}
; Drop unknown metadata
define i16 @test_atomicrmw_and_i16_global_agent_align4_drop_md(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_drop_md(
-; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_drop_md(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_drop_md(
; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
@@ -284,37 +281,36 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4_drop_md(ptr addrspace(1)
; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !noundef !0, !some.unknown.md !0
ret i16 %res
}
; Drop noundef, preserve mmra
define i16 @test_atomicrmw_and_i16_global_agent_preserve_mmra(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_preserve_mmra(
-; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4, !mmra [[META0:![0-9]+]]
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !mmra [[META0]]
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_preserve_mmra(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4, !mmra [[META0:![0-9]+]]
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !mmra [[META0]]
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_preserve_mmra(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
@@ -331,28 +327,27 @@ define i16 @test_atomicrmw_and_i16_global_agent_preserve_mmra(ptr addrspace(1) %
; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !noundef !0, !mmra !1
ret i16 %res
}
; Drop noundef, preserve mmra
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(
-; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4, !mmra [[META0]]
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !mmra [[META0]]
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4, !mmra [[META0]]
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !mmra [[META0]]
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(
; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
@@ -360,27 +355,26 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(ptr addrspa
; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !mmra [[META0]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !noundef !0, !mmra !1
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(
-; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !alias.scope [[META1:![0-9]+]]
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(
; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
@@ -388,27 +382,26 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(ptr
; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !alias.scope [[META1:![0-9]+]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !alias.scope !2
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(
-; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !noalias [[META1]]
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(
; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
@@ -416,27 +409,26 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(ptr addr
; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !noalias [[META1]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !noalias !2
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(
-; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !tbaa.struct [[TBAA_STRUCT4:![0-9]+]]
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(
; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
@@ -444,27 +436,26 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(ptr
; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !tbaa.struct [[TBAA_STRUCT4:![0-9]+]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !tbaa.struct !5
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(
-; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
-; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !tbaa [[TBAA5:![0-9]+]]
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(
; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
@@ -472,27 +463,26 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(ptr addrspa
; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !tbaa [[TBAA5:![0-9]+]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !tbaa !6
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(
-; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; GCN-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META1:![0-9]+]]
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META8:![0-9]+]]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
@@ -509,18 +499,17 @@ define i16 @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(ptr add
; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(
-; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; GCN-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META1]]
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META8]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(
; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
@@ -528,27 +517,26 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(
; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META8]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(
-; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; GCN-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META1]]
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
@@ -565,18 +553,17 @@ define i16 @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(p
; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_memory(
-; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; GCN-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META1]]
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_memory(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_memory(
; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
@@ -584,7 +571,6 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_m
; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
ret i16 %res
}
@@ -623,29 +609,29 @@ define i16 @test_atomicrmw_nand_i16_global_agent(ptr addrspace(1) %ptr, i16 %val
}
define i16 @test_atomicrmw_or_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_or_i16_global_agent(
-; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
-; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_or_i16_global_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_or_i16_global_agent(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
@@ -661,35 +647,34 @@ define i16 @test_atomicrmw_or_i16_global_agent(ptr addrspace(1) %ptr, i16 %value
; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw or ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
define i16 @test_atomicrmw_xor_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
-; GCN-LABEL: @test_atomicrmw_xor_i16_global_agent(
-; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
-; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
-; GCN: atomicrmw.start:
-; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; GCN-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
-; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
-; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
-; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
-; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; GCN: atomicrmw.end:
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
-; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; GCN-NEXT: ret i16 [[EXTRACTED]]
+; CHECK-LABEL: @test_atomicrmw_xor_i16_global_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
; R600-LABEL: @test_atomicrmw_xor_i16_global_agent(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
@@ -705,7 +690,6 @@ define i16 @test_atomicrmw_xor_i16_global_agent(ptr addrspace(1) %ptr, i16 %valu
; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; R600-NEXT: ret i16 [[EXTRACTED]]
-;
%res = atomicrmw xor ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
@@ -1662,7 +1646,7 @@ define i16 @test_atomicrmw_sub_i16_global_agent__amdgpu_no_remote_memory(ptr add
; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
; CHECK-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META8]]
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1694,7 +1678,7 @@ define i16 @test_atomicrmw_sub_i16_global_agent__amdgpu_no_fine_grained_memory(p
; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
; CHECK-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1726,7 +1710,7 @@ define i16 @test_atomicrmw_sub_i16_global_agent__amdgpu_no_fine_grained_memory__
; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
; CHECK-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META8]], !amdgpu.no.fine.grained.memory [[META8]]
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1739,6 +1723,352 @@ define i16 @test_atomicrmw_sub_i16_global_agent__amdgpu_no_fine_grained_memory__
ret i16 %res
}
+define i16 @test_atomicrmw_usub_cond_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_global_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i16 [[TMP5]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_cond_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_global_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i16 [[TMP3]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_cond_i16_local(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_local(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i16 [[TMP5]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_cond_i16_local_align4(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_local_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i16 [[TMP3]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_cond_i16_flat_agent(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_flat_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i16 [[TMP5]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_cond_i16_flat_agent_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_flat_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i16 [[TMP3]], i16 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_global_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_global_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_local(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_local(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i16 %value seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_local_align4(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_local_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i16 %value seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_flat_agent(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_flat_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_usub_sat_i16_flat_agent_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_flat_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[NEW:%.*]] = call i16 @llvm.usub.sat.i16(i16 [[EXTRACTED]], i16 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
!0 = !{}
!1 = !{!"foo", !"bar"}
!2 = !{!3}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-agent.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-agent.ll
index 1440045d11e2d..2b1c9028213e0 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-agent.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-agent.ll
@@ -166,10 +166,10 @@ define i32 @test_atomicrmw_sub_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -204,6 +204,10 @@ define i32 @test_atomicrmw_sub_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw sub ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -300,10 +304,10 @@ define i32 @test_atomicrmw_and_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -338,6 +342,10 @@ define i32 @test_atomicrmw_and_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw and ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -519,10 +527,10 @@ define i32 @test_atomicrmw_or_i32_global_agent(ptr addrspace(1) %ptr, i32 %value
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -557,6 +565,10 @@ define i32 @test_atomicrmw_or_i32_global_agent(ptr addrspace(1) %ptr, i32 %value
; GFX12-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw or ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -653,10 +665,10 @@ define i32 @test_atomicrmw_xor_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -691,6 +703,10 @@ define i32 @test_atomicrmw_xor_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw xor ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -791,10 +807,10 @@ define i32 @test_atomicrmw_max_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -831,6 +847,10 @@ define i32 @test_atomicrmw_max_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw max ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -931,10 +951,10 @@ define i32 @test_atomicrmw_min_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -971,6 +991,10 @@ define i32 @test_atomicrmw_min_i32_global_agent(ptr addrspace(1) %ptr, i32 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw min ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -1071,10 +1095,10 @@ define i32 @test_atomicrmw_umax_i32_global_agent(ptr addrspace(1) %ptr, i32 %val
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -1111,6 +1135,10 @@ define i32 @test_atomicrmw_umax_i32_global_agent(ptr addrspace(1) %ptr, i32 %val
; GFX12-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw umax ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -1211,10 +1239,10 @@ define i32 @test_atomicrmw_umin_i32_global_agent(ptr addrspace(1) %ptr, i32 %val
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -1251,6 +1279,10 @@ define i32 @test_atomicrmw_umin_i32_global_agent(ptr addrspace(1) %ptr, i32 %val
; GFX12-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw umin ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -1355,10 +1387,10 @@ define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(ptr addrspace(1) %ptr, i32
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -1397,6 +1429,10 @@ define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(ptr addrspace(1) %ptr, i32
; GFX12-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -1509,10 +1545,10 @@ define i32 @test_atomicrmw_udec_wrap_i32_global_agent(ptr addrspace(1) %ptr, i32
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i32 [[NEWLOADED]]
;
-; GFX940-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; GFX940-NEXT: ret i32 [[RES]]
+; GFX942-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
;
; GFX10-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -1555,6 +1591,10 @@ define i32 @test_atomicrmw_udec_wrap_i32_global_agent(ptr addrspace(1) %ptr, i32
; GFX12-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
; GFX12-NEXT: ret i32 [[RES]]
;
+; GFX940-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
}
@@ -1589,6 +1629,882 @@ define i32 @test_atomicrmw_udec_wrap_i32_global_agent__amdgpu_no_fine_grained_me
ret i32 %res
}
+;---------------------------------------------------------------------
+; atomicrmw usub_cond
+;---------------------------------------------------------------------
+
+; expansion is necessary, operation not supported over PCIe
+define i32 @test_atomicrmw_usub_cond_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX803-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX906-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX908-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX90A-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX942-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX10-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX11-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_cond ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX803-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX906-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX908-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX90A-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX942-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX10-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX11-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_cond ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_remote_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX803-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_remote_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX906-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_remote_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX908-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_remote_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX90A-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_remote_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX942-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_remote_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX10-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_remote_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX11-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_remote_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_cond ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX803-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX906-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX908-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX90A-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX942-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX10-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX11-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_cond ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i32 %res
+}
+
+;---------------------------------------------------------------------
+; atomicrmw usub_sat
+;---------------------------------------------------------------------
+
+; expansion is necessary, operation not supported over PCIe
+define i32 @test_atomicrmw_usub_sat_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX942-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[RES:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX10-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX10-NEXT: [[RES]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[RES]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[RES:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX11-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX11-NEXT: [[RES]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[RES]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX942-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX10-NEXT: ret i32 [[RES]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX11-NEXT: ret i32 [[RES]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_remote_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_remote_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_remote_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_remote_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_remote_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX942-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_remote_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX10-NEXT: ret i32 [[RES]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_remote_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX11-NEXT: ret i32 [[RES]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_remote_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX942-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX10-NEXT: ret i32 [[RES]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX11-NEXT: ret i32 [[RES]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i32 %res
+}
+
!0 = !{}
;.
; GFX803: [[META0]] = !{}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-system.ll
index b8e7c3eb4673a..cd6aa62e04b5b 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-system.ll
@@ -739,6 +739,692 @@ define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_fine_grained_m
ret i32 %res
}
+;---------------------------------------------------------------------
+; atomicrmw usub_cond
+;---------------------------------------------------------------------
+
+; expansion is necessary, operation not supported over PCIe
+define i32 @test_atomicrmw_usub_cond_i32_global_system(ptr addrspace(1) %ptr, i32 %value) {
+; COMMON-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %value seq_cst
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX803-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX906-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX908-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX90A-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX942-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX10-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX11-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_cond ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_remote_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX803-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_remote_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX906-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_remote_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX908-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_remote_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX90A-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_remote_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX942-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_remote_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX10-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_remote_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX11-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_remote_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_cond ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX803-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX906-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX908-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX90A-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX942-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX942-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX10-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 [[LOADED]]
+; GFX11-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_cond_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_cond ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i32 %res
+}
+
+;---------------------------------------------------------------------
+; atomicrmw usub_sat
+;---------------------------------------------------------------------
+
+; expansion is necessary, operation not supported over PCIe
+define i32 @test_atomicrmw_usub_sat_i32_global_system(ptr addrspace(1) %ptr, i32 %value) {
+; COMMON-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %value seq_cst
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX942-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX10-NEXT: ret i32 [[RES]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX11-NEXT: ret i32 [[RES]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_remote_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_remote_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_remote_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_remote_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_remote_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX942-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_remote_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX10-NEXT: ret i32 [[RES]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_remote_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX11-NEXT: ret i32 [[RES]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_remote_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
+ ret i32 %res
+}
+
+define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
+; GFX803-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX942-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX942: atomicrmw.start:
+; GFX942-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX942-NEXT: [[NEW:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[LOADED]], i32 [[VALUE]])
+; GFX942-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX942-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX942-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX942-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX942: atomicrmw.end:
+; GFX942-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX10-NEXT: ret i32 [[RES]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX11-NEXT: ret i32 [[RES]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_usub_sat_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; GFX12-NEXT: ret i32 [[RES]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i32 %res
+}
+
!0 = !{}
;.
; GFX803: [[META0]] = !{}
@@ -757,12 +1443,3 @@ define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_fine_grained_m
;.
; GFX12: [[META0]] = !{}
;.
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX10: {{.*}}
-; GFX11: {{.*}}
-; GFX12: {{.*}}
-; GFX803: {{.*}}
-; GFX906: {{.*}}
-; GFX908: {{.*}}
-; GFX90A: {{.*}}
-; GFX942: {{.*}}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-agent.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-agent.ll
index 8bc481408fe73..0a5ae0df170d1 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-agent.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-agent.ll
@@ -166,10 +166,10 @@ define i64 @test_atomicrmw_sub_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -204,6 +204,10 @@ define i64 @test_atomicrmw_sub_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw sub ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -300,10 +304,10 @@ define i64 @test_atomicrmw_and_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -338,6 +342,10 @@ define i64 @test_atomicrmw_and_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw and ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -519,10 +527,10 @@ define i64 @test_atomicrmw_or_i64_global_agent(ptr addrspace(1) %ptr, i64 %value
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -557,6 +565,10 @@ define i64 @test_atomicrmw_or_i64_global_agent(ptr addrspace(1) %ptr, i64 %value
; GFX12-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw or ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -653,10 +665,10 @@ define i64 @test_atomicrmw_xor_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -691,6 +703,10 @@ define i64 @test_atomicrmw_xor_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw xor ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -791,10 +807,10 @@ define i64 @test_atomicrmw_max_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -831,6 +847,10 @@ define i64 @test_atomicrmw_max_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw max ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -931,10 +951,10 @@ define i64 @test_atomicrmw_min_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -971,6 +991,10 @@ define i64 @test_atomicrmw_min_i64_global_agent(ptr addrspace(1) %ptr, i64 %valu
; GFX12-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw min ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -1071,10 +1095,10 @@ define i64 @test_atomicrmw_umax_i64_global_agent(ptr addrspace(1) %ptr, i64 %val
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -1111,6 +1135,10 @@ define i64 @test_atomicrmw_umax_i64_global_agent(ptr addrspace(1) %ptr, i64 %val
; GFX12-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw umax ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -1211,10 +1239,10 @@ define i64 @test_atomicrmw_umin_i64_global_agent(ptr addrspace(1) %ptr, i64 %val
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -1251,6 +1279,10 @@ define i64 @test_atomicrmw_umin_i64_global_agent(ptr addrspace(1) %ptr, i64 %val
; GFX12-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw umin ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -1355,10 +1387,10 @@ define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(ptr addrspace(1) %ptr, i64
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -1397,6 +1429,10 @@ define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(ptr addrspace(1) %ptr, i64
; GFX12-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -1509,10 +1545,10 @@ define i64 @test_atomicrmw_udec_wrap_i64_global_agent(ptr addrspace(1) %ptr, i64
; GFX90A: atomicrmw.end:
; GFX90A-NEXT: ret i64 [[NEWLOADED]]
;
-; GFX940-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
-; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; GFX940-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; GFX940-NEXT: ret i64 [[RES]]
+; GFX942-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX942-NEXT: ret i64 [[RES]]
;
; GFX10-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
@@ -1555,6 +1591,10 @@ define i64 @test_atomicrmw_udec_wrap_i64_global_agent(ptr addrspace(1) %ptr, i64
; GFX12-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
; GFX12-NEXT: ret i64 [[RES]]
;
+; GFX940-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
}
@@ -1589,6 +1629,176 @@ define i64 @test_atomicrmw_udec_wrap_i64_global_agent__amdgpu_no_fine_grained_me
ret i64 %res
}
+;---------------------------------------------------------------------
+; atomicrmw usub_cond
+;---------------------------------------------------------------------
+
+; expansion is necessary, operation not supported over PCIe
+define i64 @test_atomicrmw_usub_cond_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_cond_i64_global_agent(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[TMP2:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[TMP3:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[TMP3]], i64 [[LOADED]]
+; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_cond_i64_global_agent__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_cond_i64_global_agent__amdgpu_no_fine_grained_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[TMP2:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[TMP3:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[TMP3]], i64 [[LOADED]]
+; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_cond_i64_global_agent__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_cond_i64_global_agent__amdgpu_no_remote_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[TMP2:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[TMP3:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[TMP3]], i64 [[LOADED]]
+; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_cond_i64_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_cond_i64_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[TMP2:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[TMP3:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[TMP3]], i64 [[LOADED]]
+; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i64 %res
+}
+
+;---------------------------------------------------------------------
+; atomicrmw usub_sat
+;---------------------------------------------------------------------
+
+; expansion is necessary, operation not supported over PCIe
+define i64 @test_atomicrmw_usub_sat_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_sat_i64_global_agent(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[NEW:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[LOADED]], i64 [[VALUE]])
+; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_sat_i64_global_agent__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_sat_i64_global_agent__amdgpu_no_fine_grained_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[NEW:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[LOADED]], i64 [[VALUE]])
+; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_sat_i64_global_agent__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_sat_i64_global_agent__amdgpu_no_remote_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[NEW:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[LOADED]], i64 [[VALUE]])
+; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_sat_i64_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_sat_i64_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[NEW:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[LOADED]], i64 [[VALUE]])
+; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i64 %res
+}
+
!0 = !{}
;.
; GFX803: [[META0]] = !{}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-system.ll
index c895eebcf0f8d..cd7035857a667 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-system.ll
@@ -739,6 +739,176 @@ define i64 @test_atomicrmw_udec_wrap_i64_global_system__amdgpu_no_fine_grained_m
ret i64 %res
}
+;---------------------------------------------------------------------
+; atomicrmw usub_cond
+;---------------------------------------------------------------------
+
+; expansion is necessary, operation not supported over PCIe
+define i64 @test_atomicrmw_usub_cond_i64_global_system(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_cond_i64_global_system(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[TMP2:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[TMP3:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[TMP3]], i64 [[LOADED]]
+; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i64 %value seq_cst
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_cond_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_cond_i64_global_system__amdgpu_no_fine_grained_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[TMP2:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[TMP3:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[TMP3]], i64 [[LOADED]]
+; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_cond_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_cond_i64_global_system__amdgpu_no_remote_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[TMP2:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[TMP3:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[TMP3]], i64 [[LOADED]]
+; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_cond_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_cond_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[TMP2:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[TMP3:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[TMP3]], i64 [[LOADED]]
+; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i64 %res
+}
+
+;---------------------------------------------------------------------
+; atomicrmw usub_sat
+;---------------------------------------------------------------------
+
+; expansion is necessary, operation not supported over PCIe
+define i64 @test_atomicrmw_usub_sat_i64_global_system(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_sat_i64_global_system(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[NEW:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[LOADED]], i64 [[VALUE]])
+; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i64 %value seq_cst
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_sat_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_sat_i64_global_system__amdgpu_no_fine_grained_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[NEW:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[LOADED]], i64 [[VALUE]])
+; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_sat_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_sat_i64_global_system__amdgpu_no_remote_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[NEW:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[LOADED]], i64 [[VALUE]])
+; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
+ ret i64 %res
+}
+
+define i64 @test_atomicrmw_usub_sat_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
+; COMMON-LABEL: define i64 @test_atomicrmw_usub_sat_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
+; COMMON: atomicrmw.start:
+; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; COMMON-NEXT: [[NEW:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[LOADED]], i64 [[VALUE]])
+; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; COMMON: atomicrmw.end:
+; COMMON-NEXT: ret i64 [[NEWLOADED]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i64 %res
+}
+
!0 = !{}
;.
; GFX803: [[META0]] = !{}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
index b548943a326b8..b27d25101ff03 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll
@@ -841,3 +841,371 @@ define i8 @test_atomicrmw_dec_i8_flat_system_align4(ptr %ptr, i8 %value) {
%res = atomicrmw udec_wrap ptr %ptr, i8 %value seq_cst, align 4
ret i8 %res
}
+
+define i8 @test_atomicrmw_usub_cond_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_global_system(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i8 %value seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_global_system_align2(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_global_system_align2(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i8 %value seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_global_system_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_global_system_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i8 [[TMP3]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i8 %value seq_cst, align 4
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_flat_system(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_flat_system(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr %ptr, i8 %value seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_flat_system_align2(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_flat_system_align2(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr %ptr, i8 %value seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_flat_system_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_flat_system_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i8 [[TMP3]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr %ptr, i8 %value seq_cst, align 4
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_global_system(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_global_system(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %value seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_global_system_align2(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_global_system_align2(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %value seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_global_system_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_global_system_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %value seq_cst, align 4
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_flat_system(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_flat_system(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i8 %value seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_flat_system_align2(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_flat_system_align2(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i8 %value seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_flat_system_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_flat_system_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i8 %value seq_cst, align 4
+ ret i8 %res
+}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
index b19a717b56938..ad3b6c412b865 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
@@ -1759,7 +1759,7 @@ define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_remote_memory(ptr addrs
; GCN-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
; GCN-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
; GCN-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0:![0-9]+]]
; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1785,7 +1785,7 @@ define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_remote_memory(ptr addrs
; R600-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
; R600-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
; R600-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0:![0-9]+]]
; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1817,7 +1817,7 @@ define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory(ptr
; GCN-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
; GCN-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
; GCN-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1843,7 +1843,7 @@ define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory(ptr
; R600-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
; R600-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
; R600-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1875,7 +1875,7 @@ define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory__am
; GCN-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
; GCN-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
; GCN-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]], !amdgpu.no.fine.grained.memory [[META0]]
; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1901,7 +1901,7 @@ define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory__am
; R600-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
; R600-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
; R600-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
-; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]], !amdgpu.no.fine.grained.memory [[META0]]
; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
@@ -1914,4 +1914,777 @@ define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory__am
ret i8 %res
}
+define i8 @test_atomicrmw_usub_cond_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_cond_i8_global_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_usub_cond_i8_global_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_cond_i8_global_agent_align2(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_usub_cond_i8_global_agent_align2(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_global_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i8 [[TMP3]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_local(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_local(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i8 %value seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_local_align2(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_local_align2(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i8 %value seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_local_align4(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_local_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i8 [[TMP3]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i8 %value seq_cst, align 4
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_flat_agent(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_cond ptr %ptr, i8 %value syncscope("agent") seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_flat_agent_align2(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent_align2(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent_align2(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 [[EXTRACTED]]
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+ %res = atomicrmw usub_cond ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_cond_i8_flat_agent_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i8 [[TMP3]], i8 [[EXTRACTED]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_cond ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_sat_i8_global_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_usub_sat_i8_global_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_sat_i8_global_agent_align2(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_usub_sat_i8_global_agent_align2(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_global_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_local(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_local(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i8 %value seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_local_align2(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_local_align2(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i8 %value seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_local_align4(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_local_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i8 %value seq_cst, align 4
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_flat_agent(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i8 %value syncscope("agent") seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_flat_agent_align2(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent_align2(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent_align2(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_usub_sat_i8_flat_agent_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[NEW:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[EXTRACTED]], i8 [[VALUE:%.*]])
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw usub_sat ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ ret i8 %res
+}
+
!0 = !{}
More information about the llvm-commits
mailing list