[Mlir-commits] [llvm] [mlir] Add cond_sub and sub_clamp operations to atomicrmw (PR #96661)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Wed Jul 31 08:25:44 PDT 2024
https://github.com/anjenner updated https://github.com/llvm/llvm-project/pull/96661
>From c141aad755291f21c3f8594661e4c99e6343672b Mon Sep 17 00:00:00 2001
From: Andrew Jenner <Andrew.Jenner at amd.com>
Date: Mon, 24 Jun 2024 13:05:02 -0400
Subject: [PATCH 1/2] Add cond_sub and sub_clamp operations to atomicrmw
These both perform conditional subtraction, returning the minuend and zero
respectively, if the difference is negative.
AMDGPU has instructions for these. Currently we use target intrinsics for
these, but those do not carry the ordering and syncscope. Add these to
atomicrmw so we can carry these and benefit from the regular legalization
processes. Drop and upgrade llvm.amdgcn.atomic.cond.sub/csub to atomicrmw.
Note that in GFX12 onwards "csub" is renamed to "sub_clamp" - the atomicrmw
operation uses the newer name.
---
llvm/bindings/ocaml/llvm/llvm.ml | 6 +
llvm/bindings/ocaml/llvm/llvm.mli | 6 +
llvm/docs/AMDGPUUsage.rst | 5 -
llvm/docs/GlobalISel/GenericOpcode.rst | 4 +-
llvm/docs/LangRef.rst | 4 +
llvm/docs/ReleaseNotes.rst | 6 +
llvm/include/llvm/AsmParser/LLToken.h | 2 +
llvm/include/llvm/Bitcode/LLVMBitCodes.h | 4 +-
llvm/include/llvm/CodeGen/ISDOpcodes.h | 2 +
llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 40 +-
llvm/include/llvm/IR/Instructions.h | 10 +-
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 8 -
llvm/include/llvm/Support/TargetOpcodes.def | 4 +-
llvm/include/llvm/Target/GenericOpcodes.td | 2 +
.../Target/GlobalISel/SelectionDAGCompat.td | 2 +
.../include/llvm/Target/TargetSelectionDAG.td | 4 +
llvm/lib/AsmParser/LLLexer.cpp | 2 +
llvm/lib/AsmParser/LLParser.cpp | 6 +
llvm/lib/Bitcode/Reader/BitcodeReader.cpp | 4 +
llvm/lib/Bitcode/Writer/BitcodeWriter.cpp | 4 +
llvm/lib/CodeGen/AtomicExpandPass.cpp | 8 +-
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 6 +
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 24 +-
.../SelectionDAG/SelectionDAGBuilder.cpp | 6 +
.../SelectionDAG/SelectionDAGDumper.cpp | 4 +
llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp | 2 +
llvm/lib/IR/AutoUpgrade.cpp | 11 +-
llvm/lib/IR/Instructions.cpp | 4 +
llvm/lib/Target/AMDGPU/AMDGPUGISel.td | 2 +
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 9 +
.../AMDGPU/AMDGPUInstructionSelector.cpp | 2 +
llvm/lib/Target/AMDGPU/AMDGPUInstructions.td | 10 +-
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 10 +-
.../AMDGPU/AMDGPULowerBufferFatPointers.cpp | 10 +-
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 4 +-
.../Target/AMDGPU/AMDGPUSearchableTables.td | 6 -
llvm/lib/Target/AMDGPU/BUFInstructions.td | 2 +-
llvm/lib/Target/AMDGPU/DSInstructions.td | 50 +-
llvm/lib/Target/AMDGPU/FLATInstructions.td | 23 +-
llvm/lib/Target/AMDGPU/R600ISelLowering.cpp | 2 +
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 22 +-
llvm/lib/Target/AMDGPU/SIInstrInfo.td | 2 +
.../LoongArch/LoongArchISelLowering.cpp | 4 +-
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 2 +
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 2 +
.../InstCombine/InstCombineAtomicRMW.cpp | 2 +
llvm/lib/Transforms/Utils/LowerAtomic.cpp | 11 +
.../AMDGPU/MIR/atomics-gmir.mir | 6 +
.../UniformityAnalysis/AMDGPU/atomics.ll | 57 -
llvm/test/Assembler/atomic.ll | 10 +
llvm/test/Bitcode/amdgcn-atomic.ll | 81 +
llvm/test/Bitcode/compatibility.ll | 28 +
.../GlobalISel/legalizer-info-validation.mir | 6 +
.../AArch64/atomicrmw-cond-sub-clamp.ll | 142 ++
.../AMDGPU/GlobalISel/atomicrmw_cond_sub.ll | 109 ++
....atomic.csub.ll => atomicrmw_sub_clamp.ll} | 107 +-
llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll | 64 +-
.../AMDGPU/cgp-addressing-modes-gfx1030.ll | 10 +-
.../AMDGPU/global-saddr-atomics.gfx1030.ll | 18 +-
.../AMDGPU/llvm.amdgcn.atomic.cond.sub.ll | 219 ---
.../AMDGPU/llvm.amdgcn.global.atomic.csub.ll | 10 +-
.../CodeGen/AMDGPU/private-memory-atomics.ll | 52 +
llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll | 2 +-
.../CodeGen/ARM/atomicrmw-cond-sub-clamp.ll | 186 +++
.../Hexagon/atomicrmw-cond-sub-clamp.ll | 355 +++++
.../LoongArch/atomicrmw-cond-sub-clamp.ll | 362 +++++
.../PowerPC/atomicrmw-cond-sub-clamp.ll | 396 +++++
.../CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll | 1412 +++++++++++++++++
.../CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll | 326 ++++
.../VE/Scalar/atomicrmw-cond-sub-clamp.ll | 240 +++
.../WebAssembly/atomicrmw-cond-sub-clamp.ll | 355 +++++
.../CodeGen/X86/atomicrmw-cond-sub-clamp.ll | 153 ++
.../match-table-cxx.td | 38 +-
.../GlobalISelCombinerEmitter/match-table.td | 62 +-
llvm/test/TableGen/GlobalISelEmitter.td | 2 +-
.../AtomicExpand/AMDGPU/expand-atomic-i16.ll | 358 +++++
.../AtomicExpand/AMDGPU/expand-atomic-i8.ll | 798 ++++++++++
mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td | 6 +-
.../test/Target/LLVMIR/Import/instructions.ll | 6 +-
mlir/test/Target/LLVMIR/llvmir.mlir | 6 +-
81 files changed, 5832 insertions(+), 519 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/atomicrmw-cond-sub-clamp.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_cond_sub.ll
rename llvm/test/CodeGen/AMDGPU/GlobalISel/{llvm.amdgcn.global.atomic.csub.ll => atomicrmw_sub_clamp.ll} (66%)
delete mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.cond.sub.ll
create mode 100644 llvm/test/CodeGen/ARM/atomicrmw-cond-sub-clamp.ll
create mode 100644 llvm/test/CodeGen/Hexagon/atomicrmw-cond-sub-clamp.ll
create mode 100644 llvm/test/CodeGen/LoongArch/atomicrmw-cond-sub-clamp.ll
create mode 100644 llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll
create mode 100644 llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
create mode 100644 llvm/test/CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll
create mode 100644 llvm/test/CodeGen/VE/Scalar/atomicrmw-cond-sub-clamp.ll
create mode 100644 llvm/test/CodeGen/WebAssembly/atomicrmw-cond-sub-clamp.ll
create mode 100644 llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
diff --git a/llvm/bindings/ocaml/llvm/llvm.ml b/llvm/bindings/ocaml/llvm/llvm.ml
index 86b010e0ac22d..ae42b1eea93d6 100644
--- a/llvm/bindings/ocaml/llvm/llvm.ml
+++ b/llvm/bindings/ocaml/llvm/llvm.ml
@@ -296,6 +296,12 @@ module AtomicRMWBinOp = struct
| UMin
| FAdd
| FSub
+ | FMax
+ | FMin
+ | UInc_Wrap
+ | UDec_Wrap
+ | Cond_Sub
+ | Sub_Clamp
end
module ValueKind = struct
diff --git a/llvm/bindings/ocaml/llvm/llvm.mli b/llvm/bindings/ocaml/llvm/llvm.mli
index c16530d3a70cb..9a6ed2ae80043 100644
--- a/llvm/bindings/ocaml/llvm/llvm.mli
+++ b/llvm/bindings/ocaml/llvm/llvm.mli
@@ -331,6 +331,12 @@ module AtomicRMWBinOp : sig
| UMin
| FAdd
| FSub
+ | FMax
+ | FMin
+ | UInc_Wrap
+ | UDec_Wrap
+ | Cond_Sub
+ | Sub_Clamp
end
(** The kind of an [llvalue], the result of [classify_value v].
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 5a16457412d24..cf0ad1b578759 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1321,11 +1321,6 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
The iglp_opt strategy implementations are subject to change.
- llvm.amdgcn.atomic.cond.sub.u32 Provides direct access to flat_atomic_cond_sub_u32, global_atomic_cond_sub_u32
- and ds_cond_sub_u32 based on address space on gfx12 targets. This
- performs subtraction only if the memory value is greater than or
- equal to the data value.
-
llvm.amdgcn.s.getpc Provides access to the s_getpc_b64 instruction, but with the return value
sign-extended from the width of the underlying PC hardware register even on
processors where the s_getpc_b64 instruction returns a zero-extended value.
diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst
index 42f56348885b4..67bd134174644 100644
--- a/llvm/docs/GlobalISel/GenericOpcode.rst
+++ b/llvm/docs/GlobalISel/GenericOpcode.rst
@@ -825,7 +825,9 @@ operands.
G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
G_ATOMICRMW_UMIN, G_ATOMICRMW_FADD,
G_ATOMICRMW_FSUB, G_ATOMICRMW_FMAX,
- G_ATOMICRMW_FMIN
+ G_ATOMICRMW_FMIN, G_ATOMICRMW_UINC_WRAP,
+ G_ATOMICRMW_UDEC_WRAP, G_ATOMICRMW_COND_SUB,
+ G_ATOMICRMW_SUB_CLAMP
Generic atomicrmw. Expects a MachineMemOperand in addition to explicit
operands.
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index edb362c617565..ed76bd454002a 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -11209,6 +11209,8 @@ operation. The operation must be one of the following keywords:
- fmin
- uinc_wrap
- udec_wrap
+- cond_sub
+- sub_clamp
For most of these operations, the type of '<value>' must be an integer
type whose bit width is a power of two greater than or equal to eight
@@ -11259,6 +11261,8 @@ operation argument:
- fmin: ``*ptr = minnum(*ptr, val)`` (match the `llvm.minnum.*`` intrinsic)
- uinc_wrap: ``*ptr = (*ptr u>= val) ? 0 : (*ptr + 1)`` (increment value with wraparound to zero when incremented above input value)
- udec_wrap: ``*ptr = ((*ptr == 0) || (*ptr u> val)) ? val : (*ptr - 1)`` (decrement with wraparound to input value when decremented below zero).
+- cond_sub: ``*ptr = (*ptr u>= val) ? *ptr - val : *ptr`` (subtract only if result would be positive).
+- sub_clamp: ``*ptr = (*ptr u>= val) ? *ptr - val : 0`` (subtract with clamping of negative results to zero).
Example:
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 76356dd76f1d2..a6162127b3b74 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -80,6 +80,8 @@ Changes to the LLVM IR
removed. The next argument has been changed from byte index to bit
index.
+* Added ``cond_sub`` and ``sub_clamp`` operations to ``atomicrmw``.
+
Changes to LLVM infrastructure
------------------------------
@@ -132,6 +134,10 @@ Changes to the AMDGPU Backend
* Implemented :ref:`llvm.get.rounding <int_get_rounding>` and :ref:`llvm.set.rounding <int_set_rounding>`
+* Removed ``llvm.amdgcn.atomic.cond.sub.u32`` and
+ ``llvm.amdgcn.atomic.csub.u32`` intrinsics. :ref:`atomicrmw <i_atomicrmw>`
+ should be used instead with ``cond_sub`` and ``sub_clamp``.
+
Changes to the ARM Backend
--------------------------
diff --git a/llvm/include/llvm/AsmParser/LLToken.h b/llvm/include/llvm/AsmParser/LLToken.h
index db6780b70ca5a..8ee04f25095f2 100644
--- a/llvm/include/llvm/AsmParser/LLToken.h
+++ b/llvm/include/llvm/AsmParser/LLToken.h
@@ -268,6 +268,8 @@ enum Kind {
kw_fmin,
kw_uinc_wrap,
kw_udec_wrap,
+ kw_cond_sub,
+ kw_sub_clamp,
// Instruction Opcodes (Opcode in UIntVal).
kw_fneg,
diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 5b5e08b5cbc3f..20980695499e6 100644
--- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -484,7 +484,9 @@ enum RMWOperations {
RMW_FMAX = 13,
RMW_FMIN = 14,
RMW_UINC_WRAP = 15,
- RMW_UDEC_WRAP = 16
+ RMW_UDEC_WRAP = 16,
+ RMW_COND_SUB = 17,
+ RMW_SUB_CLAMP = 18
};
/// OverflowingBinaryOperatorOptionalFlags - Flags for serializing
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 6bb89fb58a296..21ac93a3b4b9b 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1292,6 +1292,8 @@ enum NodeType {
ATOMIC_LOAD_FMIN,
ATOMIC_LOAD_UINC_WRAP,
ATOMIC_LOAD_UDEC_WRAP,
+ ATOMIC_LOAD_COND_SUB,
+ ATOMIC_LOAD_SUB_CLAMP,
// Masked load and store - consecutive vector load and store operations
// with additional mask operand that prevents memory accesses to the
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 2f36c2e86b1c3..1f3dd4ac1eda6 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1470,6 +1470,8 @@ class MemSDNode : public SDNode {
case ISD::ATOMIC_LOAD_FMIN:
case ISD::ATOMIC_LOAD_UINC_WRAP:
case ISD::ATOMIC_LOAD_UDEC_WRAP:
+ case ISD::ATOMIC_LOAD_COND_SUB:
+ case ISD::ATOMIC_LOAD_SUB_CLAMP:
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE:
case ISD::MLOAD:
@@ -1536,27 +1538,29 @@ class AtomicSDNode : public MemSDNode {
// Methods to support isa and dyn_cast
static bool classof(const SDNode *N) {
- return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
+ return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
- N->getOpcode() == ISD::ATOMIC_SWAP ||
- N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
- N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
- N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
- N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
- N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
- N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
- N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
- N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
- N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
- N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
- N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
- N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
- N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
- N->getOpcode() == ISD::ATOMIC_LOAD_FMAX ||
- N->getOpcode() == ISD::ATOMIC_LOAD_FMIN ||
+ N->getOpcode() == ISD::ATOMIC_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FMAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FMIN ||
N->getOpcode() == ISD::ATOMIC_LOAD_UINC_WRAP ||
N->getOpcode() == ISD::ATOMIC_LOAD_UDEC_WRAP ||
- N->getOpcode() == ISD::ATOMIC_LOAD ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_COND_SUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB_CLAMP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD ||
N->getOpcode() == ISD::ATOMIC_STORE;
}
};
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index ab58edd1bf78c..ab5c20758abde 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -750,8 +750,16 @@ class AtomicRMWInst : public Instruction {
/// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
UDecWrap,
+ /// Subtract only if result would be positive.
+ /// *p = (old u>= v) ? old - v : old
+ CondSub,
+
+ /// Subtract with clamping of negative results to zero.
+ /// *p = (old u>= v) ? old - v : 0
+ SubClamp,
+
FIRST_BINOP = Xchg,
- LAST_BINOP = UDecWrap,
+ LAST_BINOP = SubClamp,
BAD_BINOP
};
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 7a5e919fe26e3..5a3c80ebaa6e2 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1284,7 +1284,6 @@ def int_amdgcn_raw_buffer_atomic_or : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_xor : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_inc : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_dec : AMDGPURawBufferAtomic;
-def int_amdgcn_raw_buffer_atomic_cond_sub_u32 : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
@@ -1321,7 +1320,6 @@ def int_amdgcn_raw_ptr_buffer_atomic_or : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_xor : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_inc : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_dec : AMDGPURawPtrBufferAtomic;
-def int_amdgcn_raw_ptr_buffer_atomic_cond_sub_u32 : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
@@ -1362,7 +1360,6 @@ def int_amdgcn_struct_buffer_atomic_or : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_xor : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_inc : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_dec : AMDGPUStructBufferAtomic;
-def int_amdgcn_struct_buffer_atomic_cond_sub_u32 : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
@@ -1398,7 +1395,6 @@ def int_amdgcn_struct_ptr_buffer_atomic_or : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_xor : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_inc : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_dec : AMDGPUStructPtrBufferAtomic;
-def int_amdgcn_struct_ptr_buffer_atomic_cond_sub_u32 : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
@@ -2392,8 +2388,6 @@ class AMDGPUAtomicRtn<LLVMType vt, LLVMType pt = llvm_anyptr_ty> : Intrinsic <
[IntrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>, IntrNoCallback, IntrNoFree], "",
[SDNPMemOperand]>;
-def int_amdgcn_global_atomic_csub : AMDGPUAtomicRtn<llvm_i32_ty>;
-
// uint4 llvm.amdgcn.image.bvh.intersect.ray <node_ptr>, <ray_extent>, <ray_origin>,
// <ray_dir>, <ray_inv_dir>, <texture_descr>
// <node_ptr> is i32 or i64.
@@ -2594,8 +2588,6 @@ def int_amdgcn_flat_atomic_fmax_num : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
def int_amdgcn_global_atomic_fmin_num : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
def int_amdgcn_global_atomic_fmax_num : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
-def int_amdgcn_atomic_cond_sub_u32 : AMDGPUAtomicRtn<llvm_i32_ty>;
-
class AMDGPULoadIntrinsic<LLVMType ptr_ty>:
Intrinsic<
[llvm_any_ty],
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index df4b264af72a8..75bc350fa8d6b 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -411,12 +411,14 @@ HANDLE_TARGET_OPCODE(G_ATOMICRMW_FMAX)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_FMIN)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_UINC_WRAP)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_UDEC_WRAP)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_COND_SUB)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_SUB_CLAMP)
// Marker for start of Generic AtomicRMW opcodes
HANDLE_TARGET_OPCODE_MARKER(GENERIC_ATOMICRMW_OP_START, G_ATOMICRMW_XCHG)
// Marker for end of Generic AtomicRMW opcodes
-HANDLE_TARGET_OPCODE_MARKER(GENERIC_ATOMICRMW_OP_END, G_ATOMICRMW_UDEC_WRAP)
+HANDLE_TARGET_OPCODE_MARKER(GENERIC_ATOMICRMW_OP_END, G_ATOMICRMW_SUB_CLAMP)
// Generic atomic fence
HANDLE_TARGET_OPCODE(G_FENCE)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 4abffe6476c85..1691e83eae377 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1291,6 +1291,8 @@ def G_ATOMICRMW_FMAX : G_ATOMICRMW_OP;
def G_ATOMICRMW_FMIN : G_ATOMICRMW_OP;
def G_ATOMICRMW_UINC_WRAP : G_ATOMICRMW_OP;
def G_ATOMICRMW_UDEC_WRAP : G_ATOMICRMW_OP;
+def G_ATOMICRMW_COND_SUB : G_ATOMICRMW_OP;
+def G_ATOMICRMW_SUB_CLAMP : G_ATOMICRMW_OP;
def G_FENCE : GenericInstruction {
let OutOperandList = (outs);
diff --git a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index 560d3b434d07d..43d4e8d37e9b0 100644
--- a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -252,6 +252,8 @@ def : GINodeEquiv<G_ATOMICRMW_FMAX, atomic_load_fmax>;
def : GINodeEquiv<G_ATOMICRMW_FMIN, atomic_load_fmin>;
def : GINodeEquiv<G_ATOMICRMW_UINC_WRAP, atomic_load_uinc_wrap>;
def : GINodeEquiv<G_ATOMICRMW_UDEC_WRAP, atomic_load_udec_wrap>;
+def : GINodeEquiv<G_ATOMICRMW_COND_SUB, atomic_load_cond_sub>;
+def : GINodeEquiv<G_ATOMICRMW_SUB_CLAMP, atomic_load_sub_clamp>;
def : GINodeEquiv<G_FENCE, atomic_fence>;
def : GINodeEquiv<G_PREFETCH, prefetch>;
def : GINodeEquiv<G_TRAP, trap>;
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 8cbf98cd58ca9..ac6cfd823eb44 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -722,6 +722,10 @@ def atomic_load_uinc_wrap : SDNode<"ISD::ATOMIC_LOAD_UINC_WRAP", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_udec_wrap : SDNode<"ISD::ATOMIC_LOAD_UDEC_WRAP", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_cond_sub : SDNode<"ISD::ATOMIC_LOAD_COND_SUB", SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_sub_clamp : SDNode<"ISD::ATOMIC_LOAD_SUB_CLAMP", SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp
index 7d7fe19568e8a..cbd039bf98c44 100644
--- a/llvm/lib/AsmParser/LLLexer.cpp
+++ b/llvm/lib/AsmParser/LLLexer.cpp
@@ -704,6 +704,8 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(umin); KEYWORD(fmax); KEYWORD(fmin);
KEYWORD(uinc_wrap);
KEYWORD(udec_wrap);
+ KEYWORD(cond_sub);
+ KEYWORD(sub_clamp);
KEYWORD(splat);
KEYWORD(vscale);
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 21d386097fc63..2d086c859c14c 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -8331,6 +8331,12 @@ int LLParser::parseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
case lltok::kw_udec_wrap:
Operation = AtomicRMWInst::UDecWrap;
break;
+ case lltok::kw_cond_sub:
+ Operation = AtomicRMWInst::CondSub;
+ break;
+ case lltok::kw_sub_clamp:
+ Operation = AtomicRMWInst::SubClamp;
+ break;
case lltok::kw_fadd:
Operation = AtomicRMWInst::FAdd;
IsFP = true;
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 05c9697123371..eea10e9221b53 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -1349,6 +1349,10 @@ static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) {
return AtomicRMWInst::UIncWrap;
case bitc::RMW_UDEC_WRAP:
return AtomicRMWInst::UDecWrap;
+ case bitc::RMW_COND_SUB:
+ return AtomicRMWInst::CondSub;
+ case bitc::RMW_SUB_CLAMP:
+ return AtomicRMWInst::SubClamp;
}
}
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index ba16c0851e1fd..12002803ca54e 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -658,6 +658,10 @@ static unsigned getEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
return bitc::RMW_UINC_WRAP;
case AtomicRMWInst::UDecWrap:
return bitc::RMW_UDEC_WRAP;
+ case AtomicRMWInst::CondSub:
+ return bitc::RMW_COND_SUB;
+ case AtomicRMWInst::SubClamp:
+ return bitc::RMW_SUB_CLAMP;
}
}
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 7728cc50fc9f9..c54875e4bc0a2 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -868,7 +868,9 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
case AtomicRMWInst::FMin:
case AtomicRMWInst::FMax:
case AtomicRMWInst::UIncWrap:
- case AtomicRMWInst::UDecWrap: {
+ case AtomicRMWInst::UDecWrap:
+ case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::SubClamp: {
// Finally, other ops will operate on the full value, so truncate down to
// the original size, and expand out again after doing the
// operation. Bitcasts will be inserted for FP values.
@@ -1542,6 +1544,8 @@ bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
case AtomicRMWInst::Sub:
case AtomicRMWInst::Or:
case AtomicRMWInst::Xor:
+ case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::SubClamp:
return C->isZero();
case AtomicRMWInst::And:
return C->isMinusOne();
@@ -1783,6 +1787,8 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
case AtomicRMWInst::FSub:
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
+ case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::SubClamp:
// No atomic libcalls are available for max/min/umax/umin.
return {};
}
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index c06b35a98e434..452d8a1599636 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3289,6 +3289,12 @@ bool IRTranslator::translateAtomicRMW(const User &U,
case AtomicRMWInst::UDecWrap:
Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
break;
+ case AtomicRMWInst::CondSub:
+ Opcode = TargetOpcode::G_ATOMICRMW_COND_SUB;
+ break;
+ case AtomicRMWInst::SubClamp:
+ Opcode = TargetOpcode::G_ATOMICRMW_SUB_CLAMP;
+ break;
}
MIRBuilder.buildAtomicRMW(
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 8463e94d7f933..41c5eca68b170 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -8570,24 +8570,18 @@ SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDValue Chain, SDValue Ptr, SDValue Val,
MachineMemOperand *MMO) {
- assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
- Opcode == ISD::ATOMIC_LOAD_SUB ||
- Opcode == ISD::ATOMIC_LOAD_AND ||
- Opcode == ISD::ATOMIC_LOAD_CLR ||
- Opcode == ISD::ATOMIC_LOAD_OR ||
- Opcode == ISD::ATOMIC_LOAD_XOR ||
- Opcode == ISD::ATOMIC_LOAD_NAND ||
- Opcode == ISD::ATOMIC_LOAD_MIN ||
- Opcode == ISD::ATOMIC_LOAD_MAX ||
- Opcode == ISD::ATOMIC_LOAD_UMIN ||
- Opcode == ISD::ATOMIC_LOAD_UMAX ||
- Opcode == ISD::ATOMIC_LOAD_FADD ||
- Opcode == ISD::ATOMIC_LOAD_FSUB ||
- Opcode == ISD::ATOMIC_LOAD_FMAX ||
+ assert((Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB ||
+ Opcode == ISD::ATOMIC_LOAD_AND || Opcode == ISD::ATOMIC_LOAD_CLR ||
+ Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR ||
+ Opcode == ISD::ATOMIC_LOAD_NAND || Opcode == ISD::ATOMIC_LOAD_MIN ||
+ Opcode == ISD::ATOMIC_LOAD_MAX || Opcode == ISD::ATOMIC_LOAD_UMIN ||
+ Opcode == ISD::ATOMIC_LOAD_UMAX || Opcode == ISD::ATOMIC_LOAD_FADD ||
+ Opcode == ISD::ATOMIC_LOAD_FSUB || Opcode == ISD::ATOMIC_LOAD_FMAX ||
Opcode == ISD::ATOMIC_LOAD_FMIN ||
Opcode == ISD::ATOMIC_LOAD_UINC_WRAP ||
Opcode == ISD::ATOMIC_LOAD_UDEC_WRAP ||
- Opcode == ISD::ATOMIC_SWAP ||
+ Opcode == ISD::ATOMIC_LOAD_COND_SUB ||
+ Opcode == ISD::ATOMIC_LOAD_SUB_CLAMP || Opcode == ISD::ATOMIC_SWAP ||
Opcode == ISD::ATOMIC_STORE) &&
"Invalid Atomic Op");
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 296b06187ec0f..5bf9da7715c68 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5075,6 +5075,12 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
case AtomicRMWInst::UDecWrap:
NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
break;
+ case AtomicRMWInst::CondSub:
+ NT = ISD::ATOMIC_LOAD_COND_SUB;
+ break;
+ case AtomicRMWInst::SubClamp:
+ NT = ISD::ATOMIC_LOAD_SUB_CLAMP;
+ break;
}
AtomicOrdering Ordering = I.getOrdering();
SyncScope::ID SSID = I.getSyncScopeID();
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index a7555d6d31f26..1218d272e75a1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -102,6 +102,10 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
return "AtomicLoadUIncWrap";
case ISD::ATOMIC_LOAD_UDEC_WRAP:
return "AtomicLoadUDecWrap";
+ case ISD::ATOMIC_LOAD_COND_SUB:
+ return "AtomicLoadCondSub";
+ case ISD::ATOMIC_LOAD_SUB_CLAMP:
+ return "AtomicLoadSubClamp";
case ISD::ATOMIC_LOAD: return "AtomicLoad";
case ISD::ATOMIC_STORE: return "AtomicStore";
case ISD::PCMARKER: return "PCMarker";
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index dbf7154229d38..e195df03ae7fa 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -5998,6 +5998,8 @@ Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2,
case AtomicRMWInst::FMin:
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
+ case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::SubClamp:
llvm_unreachable("Unsupported atomic update operation");
}
llvm_unreachable("Unsupported atomic update operation");
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index d7825d9b3e3e5..2306d2d61efb6 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1024,9 +1024,10 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
}
if (Name.consume_front("atomic.")) {
- if (Name.starts_with("inc") || Name.starts_with("dec")) {
- // These were replaced with atomicrmw uinc_wrap and udec_wrap, so
- // there's no new declaration.
+ if (Name.starts_with("inc") || Name.starts_with("dec") ||
+ Name.starts_with("cond.sub") || Name.starts_with("csub")) {
+ // These were replaced with atomicrmw uinc_wrap, udec_wrap, cond_sub
+ // and sub_clamp so there's no new declaration.
NewFn = nullptr;
return true;
}
@@ -2348,7 +2349,9 @@ static Value *upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI,
StringSwitch<AtomicRMWInst::BinOp>(Name)
.StartsWith("ds.fadd", AtomicRMWInst::FAdd)
.StartsWith("atomic.inc.", AtomicRMWInst::UIncWrap)
- .StartsWith("atomic.dec.", AtomicRMWInst::UDecWrap);
+ .StartsWith("atomic.dec.", AtomicRMWInst::UDecWrap)
+ .StartsWith("atomic.cond.sub", AtomicRMWInst::CondSub)
+ .StartsWith("atomic.csub", AtomicRMWInst::SubClamp);
unsigned NumOperands = CI->getNumOperands();
if (NumOperands < 3) // Malformed bitcode.
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 445323f2a085b..7917ee14f30ab 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1422,6 +1422,10 @@ StringRef AtomicRMWInst::getOperationName(BinOp Op) {
return "uinc_wrap";
case AtomicRMWInst::UDecWrap:
return "udec_wrap";
+ case AtomicRMWInst::CondSub:
+ return "cond_sub";
+ case AtomicRMWInst::SubClamp:
+ return "sub_clamp";
case AtomicRMWInst::BAD_BINOP:
return "<invalid operation>";
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 537d3a43aa9fa..3a197a9655122 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -271,6 +271,8 @@ def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT_D16, SItbuffer_store_d16>;
// FIXME: Check MMO is atomic
def : GINodeEquiv<G_ATOMICRMW_UINC_WRAP, atomic_load_uinc_wrap_glue>;
def : GINodeEquiv<G_ATOMICRMW_UDEC_WRAP, atomic_load_udec_wrap_glue>;
+def : GINodeEquiv<G_ATOMICRMW_COND_SUB, atomic_load_cond_sub_glue>;
+def : GINodeEquiv<G_ATOMICRMW_SUB_CLAMP, atomic_load_sub_clamp_glue>;
def : GINodeEquiv<G_ATOMICRMW_FMIN, atomic_load_fmin_glue>;
def : GINodeEquiv<G_ATOMICRMW_FMAX, atomic_load_fmax_glue>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 522b3a34161cd..327b3d9054fa5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -6013,6 +6013,15 @@ AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
case AtomicRMWInst::FSub:
case AtomicRMWInst::FMax:
case AtomicRMWInst::FMin:
+ return AtomicExpansionKind::CmpXChg;
+ case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::SubClamp:
+ if (auto *IntTy = dyn_cast<IntegerType>(RMW->getType())) {
+ unsigned Size = IntTy->getBitWidth();
+ if (Size == 32)
+ return AtomicExpansionKind::None;
+ }
+
return AtomicExpansionKind::CmpXChg;
case AtomicRMWInst::Xchg: {
const DataLayout &DL = RMW->getFunction()->getParent()->getDataLayout();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 03e2d622dd319..ea10c0a5ba1fc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -3619,6 +3619,8 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_ATOMICRMW_UMAX:
case TargetOpcode::G_ATOMICRMW_UINC_WRAP:
case TargetOpcode::G_ATOMICRMW_UDEC_WRAP:
+ case TargetOpcode::G_ATOMICRMW_COND_SUB:
+ case TargetOpcode::G_ATOMICRMW_SUB_CLAMP:
case TargetOpcode::G_ATOMICRMW_FADD:
case TargetOpcode::G_ATOMICRMW_FMIN:
case TargetOpcode::G_ATOMICRMW_FMAX:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index c6dbc58395e48..22c23c89febd7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -628,16 +628,16 @@ defm int_amdgcn_flat_atomic_fadd : global_addr_space_atomic_op;
defm int_amdgcn_global_atomic_fadd_v2bf16 : noret_op;
defm int_amdgcn_global_atomic_fmin : noret_op;
defm int_amdgcn_global_atomic_fmax : noret_op;
-defm int_amdgcn_global_atomic_csub : noret_op;
+//defm int_amdgcn_global_atomic_csub : noret_op;
defm int_amdgcn_flat_atomic_fadd : local_addr_space_atomic_op;
defm int_amdgcn_global_atomic_ordered_add_b64 : noret_op;
defm int_amdgcn_flat_atomic_fmin_num : noret_op;
defm int_amdgcn_flat_atomic_fmax_num : noret_op;
defm int_amdgcn_global_atomic_fmin_num : noret_op;
defm int_amdgcn_global_atomic_fmax_num : noret_op;
-defm int_amdgcn_atomic_cond_sub_u32 : local_addr_space_atomic_op;
-defm int_amdgcn_atomic_cond_sub_u32 : flat_addr_space_atomic_op;
-defm int_amdgcn_atomic_cond_sub_u32 : global_addr_space_atomic_op;
+//defm int_amdgcn_atomic_cond_sub_u32 : local_addr_space_atomic_op;
+//defm int_amdgcn_atomic_cond_sub_u32 : flat_addr_space_atomic_op;
+//defm int_amdgcn_atomic_cond_sub_u32 : global_addr_space_atomic_op;
multiclass noret_binary_atomic_op<SDNode atomic_op> {
let HasNoUse = true in
@@ -688,6 +688,8 @@ defm atomic_load_fmin : binary_atomic_op_fp_all_as<atomic_load_fmin>;
defm atomic_load_fmax : binary_atomic_op_fp_all_as<atomic_load_fmax>;
defm atomic_load_uinc_wrap : binary_atomic_op_all_as<atomic_load_uinc_wrap>;
defm atomic_load_udec_wrap : binary_atomic_op_all_as<atomic_load_udec_wrap>;
+defm atomic_load_cond_sub : binary_atomic_op_all_as<atomic_load_cond_sub>;
+defm atomic_load_sub_clamp : binary_atomic_op_all_as<atomic_load_sub_clamp>;
defm AMDGPUatomic_cmp_swap : binary_atomic_op_all_as<AMDGPUatomic_cmp_swap>;
def load_align8_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index e7251a24b29fa..d6c07ad83d5b7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1637,6 +1637,13 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
}
+ auto &Atomics32 =
+ getActionDefinitionsBuilder({G_ATOMICRMW_COND_SUB, G_ATOMICRMW_SUB_CLAMP})
+ .legalFor({{S32, GlobalPtr}, {S32, LocalPtr}, {S32, RegionPtr}});
+ if (ST.hasFlatAddressSpace()) {
+ Atomics32.legalFor({{S32, FlatPtr}});
+ }
+
auto &Atomic = getActionDefinitionsBuilder(G_ATOMICRMW_FADD);
if (ST.hasLDSFPAtomicAddF32()) {
Atomic.legalFor({{S32, LocalPtr}, {S32, RegionPtr}});
@@ -6031,9 +6038,6 @@ static unsigned getBufferAtomicPseudo(Intrinsic::ID IntrID) {
case Intrinsic::amdgcn_struct_buffer_atomic_fmax:
case Intrinsic::amdgcn_struct_ptr_buffer_atomic_fmax:
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX;
- case Intrinsic::amdgcn_raw_buffer_atomic_cond_sub_u32:
- case Intrinsic::amdgcn_struct_buffer_atomic_cond_sub_u32:
- return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32;
default:
llvm_unreachable("unhandled atomic opcode");
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index a8f6ad09fe28c..0ff32bad4cc06 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -1149,7 +1149,15 @@ Value *SplitPtrStructs::handleMemoryInst(Instruction *I, Value *Arg, Value *Ptr,
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
report_fatal_error("wrapping increment/decrement not supported for "
- "buffer resources and should've ben expanded away");
+ "buffer resources and should've been expanded away");
+ break;
+ case AtomicRMWInst::CondSub:
+ report_fatal_error("conditional subtract not supported for buffer "
+ "resources and should've been expanded away");
+ break;
+ case AtomicRMWInst::SubClamp:
+ report_fatal_error("subtract with clamp not supported for buffer "
+ "resources and should've been expanded away");
break;
case AtomicRMWInst::BAD_BINOP:
llvm_unreachable("Not sure how we got a bad binop");
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 9e7694f41d6b8..ce753f235ee60 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4887,7 +4887,6 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
break;
}
case Intrinsic::amdgcn_global_atomic_fadd:
- case Intrinsic::amdgcn_global_atomic_csub:
case Intrinsic::amdgcn_global_atomic_fmin:
case Intrinsic::amdgcn_global_atomic_fmax:
case Intrinsic::amdgcn_global_atomic_fmin_num:
@@ -4899,7 +4898,6 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_flat_atomic_fmax_num:
case Intrinsic::amdgcn_global_atomic_fadd_v2bf16:
case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16:
- case Intrinsic::amdgcn_atomic_cond_sub_u32:
case Intrinsic::amdgcn_global_atomic_ordered_add_b64:
case Intrinsic::amdgcn_global_load_tr_b64:
case Intrinsic::amdgcn_global_load_tr_b128:
@@ -5222,6 +5220,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_ATOMICRMW_FMAX:
case AMDGPU::G_ATOMICRMW_UINC_WRAP:
case AMDGPU::G_ATOMICRMW_UDEC_WRAP:
+ case AMDGPU::G_ATOMICRMW_COND_SUB:
+ case AMDGPU::G_ATOMICRMW_SUB_CLAMP:
case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[1] = getValueMappingForPtr(MRI, MI.getOperand(1).getReg());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
index ed5bae3e4ff61..a4c59b96c9ffd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
@@ -237,8 +237,6 @@ def : SourceOfDivergence<int_amdgcn_mbcnt_lo>;
def : SourceOfDivergence<int_r600_read_tidig_x>;
def : SourceOfDivergence<int_r600_read_tidig_y>;
def : SourceOfDivergence<int_r600_read_tidig_z>;
-def : SourceOfDivergence<int_amdgcn_atomic_cond_sub_u32>;
-def : SourceOfDivergence<int_amdgcn_global_atomic_csub>;
def : SourceOfDivergence<int_amdgcn_global_atomic_fadd>;
def : SourceOfDivergence<int_amdgcn_global_atomic_fmin>;
def : SourceOfDivergence<int_amdgcn_global_atomic_fmax>;
@@ -270,7 +268,6 @@ def : SourceOfDivergence<int_amdgcn_raw_buffer_atomic_fadd>;
def : SourceOfDivergence<int_amdgcn_raw_buffer_atomic_fmin>;
def : SourceOfDivergence<int_amdgcn_raw_buffer_atomic_fmax>;
def : SourceOfDivergence<int_amdgcn_raw_buffer_atomic_cmpswap>;
-def : SourceOfDivergence<int_amdgcn_raw_buffer_atomic_cond_sub_u32>;
def : SourceOfDivergence<int_amdgcn_raw_ptr_buffer_atomic_swap>;
def : SourceOfDivergence<int_amdgcn_raw_ptr_buffer_atomic_add>;
def : SourceOfDivergence<int_amdgcn_raw_ptr_buffer_atomic_sub>;
@@ -287,7 +284,6 @@ def : SourceOfDivergence<int_amdgcn_raw_ptr_buffer_atomic_fadd>;
def : SourceOfDivergence<int_amdgcn_raw_ptr_buffer_atomic_fmin>;
def : SourceOfDivergence<int_amdgcn_raw_ptr_buffer_atomic_fmax>;
def : SourceOfDivergence<int_amdgcn_raw_ptr_buffer_atomic_cmpswap>;
-def : SourceOfDivergence<int_amdgcn_raw_ptr_buffer_atomic_cond_sub_u32>;
def : SourceOfDivergence<int_amdgcn_struct_buffer_atomic_swap>;
def : SourceOfDivergence<int_amdgcn_struct_buffer_atomic_add>;
def : SourceOfDivergence<int_amdgcn_struct_buffer_atomic_sub>;
@@ -304,7 +300,6 @@ def : SourceOfDivergence<int_amdgcn_struct_buffer_atomic_fadd>;
def : SourceOfDivergence<int_amdgcn_struct_buffer_atomic_fmin>;
def : SourceOfDivergence<int_amdgcn_struct_buffer_atomic_fmax>;
def : SourceOfDivergence<int_amdgcn_struct_buffer_atomic_cmpswap>;
-def : SourceOfDivergence<int_amdgcn_struct_buffer_atomic_cond_sub_u32>;
def : SourceOfDivergence<int_amdgcn_struct_ptr_buffer_atomic_swap>;
def : SourceOfDivergence<int_amdgcn_struct_ptr_buffer_atomic_add>;
def : SourceOfDivergence<int_amdgcn_struct_ptr_buffer_atomic_sub>;
@@ -321,7 +316,6 @@ def : SourceOfDivergence<int_amdgcn_struct_ptr_buffer_atomic_fadd>;
def : SourceOfDivergence<int_amdgcn_struct_ptr_buffer_atomic_fmin>;
def : SourceOfDivergence<int_amdgcn_struct_ptr_buffer_atomic_fmax>;
def : SourceOfDivergence<int_amdgcn_struct_ptr_buffer_atomic_cmpswap>;
-def : SourceOfDivergence<int_amdgcn_struct_ptr_buffer_atomic_cond_sub_u32>;
def : SourceOfDivergence<int_amdgcn_ps_live>;
def : SourceOfDivergence<int_amdgcn_live_mask>;
def : SourceOfDivergence<int_amdgcn_ds_swizzle>;
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index cda4b57d9b0ed..acb1163483063 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -1126,7 +1126,7 @@ defm BUFFER_ATOMIC_DEC_X2 : MUBUF_Pseudo_Atomics <
let OtherPredicates = [HasGFX10_BEncoding] in {
defm BUFFER_ATOMIC_CSUB : MUBUF_Pseudo_Atomics <
- "buffer_atomic_csub", VGPR_32, i32, int_amdgcn_global_atomic_csub
+ "buffer_atomic_csub", VGPR_32, i32
>;
}
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index 219246b71fe80..f075834794ece 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -733,17 +733,6 @@ defm DS_COND_SUB_RTN_U32 : DS_1A1D_RET_mc<"ds_cond_sub_rtn_u32", VGPR_32>;
defm DS_SUB_CLAMP_U32 : DS_1A1D_NORET_mc<"ds_sub_clamp_u32">;
defm DS_SUB_CLAMP_RTN_U32 : DS_1A1D_RET_mc<"ds_sub_clamp_rtn_u32", VGPR_32>;
-multiclass DSAtomicRetNoRetPatIntrinsic_mc<DS_Pseudo inst, DS_Pseudo noRetInst,
- ValueType vt, string frag> {
- def : DSAtomicRetPat<inst, vt,
- !cast<PatFrag>(frag#"_local_addrspace")>;
-
- let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
- def : DSAtomicRetPat<noRetInst, vt,
- !cast<PatFrag>(frag#"_noret_local_addrspace"), /* complexity */ 1>;
-}
-
-defm : DSAtomicRetNoRetPatIntrinsic_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "int_amdgcn_atomic_cond_sub_u32">;
} // let SubtargetPredicate = isGFX12Plus
//===----------------------------------------------------------------------===//
@@ -1005,7 +994,34 @@ multiclass DSAtomicRetNoRetPat_mc<DS_Pseudo inst, DS_Pseudo noRetInst,
}
}
+multiclass DSAtomicRetNoRetPatCondSub_mc<DS_Pseudo inst, DS_Pseudo noRetInst,
+ ValueType vt, string frag> {
+ let OtherPredicates = [LDSRequiresM0Init] in {
+ def : DSAtomicRetPat<inst, vt,
+ !cast<PatFrag>(frag#"_local_m0_"#vt)>;
+ let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
+ def : DSAtomicRetPat<noRetInst, vt,
+ !cast<PatFrag>(frag#"_local_m0_noret_"#vt), /* complexity */ 1>;
+ }
+
+ let OtherPredicates = [NotLDSRequiresM0Init] in {
+ def : DSAtomicRetPat<!cast<DS_Pseudo>(!cast<string>(inst)#"_gfx9"), vt,
+ !cast<PatFrag>(frag#"_local_"#vt)>;
+ let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
+ def : DSAtomicRetPat<!cast<DS_Pseudo>(!cast<string>(noRetInst)#"_gfx9"), vt,
+ !cast<PatFrag>(frag#"_local_noret_"#vt), /* complexity */ 1>;
+ }
+ let OtherPredicates = [HasGDS] in {
+ def : DSAtomicRetPat<inst, vt,
+ !cast<PatFrag>(frag#"_region_m0_"#vt),
+ /* complexity */ 0, /* gds */ 1>;
+ let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
+ def : DSAtomicRetPat<noRetInst, vt,
+ !cast<PatFrag>(frag#"_region_m0_noret_"#vt),
+ /* complexity */ 1, /* gds */ 1>;
+ }
+}
let SubtargetPredicate = isGFX6GFX7GFX8GFX9GFX10 in {
// Caution, the order of src and cmp is the *opposite* of the BUFFER_ATOMIC_CMPSWAP opcode.
@@ -1088,6 +1104,18 @@ defm : DSAtomicRetNoRetPat_mc<DS_PK_ADD_RTN_F16, DS_PK_ADD_F16, v2f16, "atomic_l
defm : DSAtomicRetNoRetPat_mc<DS_PK_ADD_RTN_BF16, DS_PK_ADD_BF16, v2bf16, "atomic_load_fadd">;
}
+defm : DSAtomicRetNoRetPat_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "atomic_load_sub_clamp">;
+
+let SubtargetPredicate = isGFX12Plus in {
+
+//defm : DSAtomicRetNoRetPat_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "atomic_load_cond_sub">;
+
+defm : DSAtomicRetNoRetPatCondSub_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "atomic_load_cond_sub">;
+
+defm : DSAtomicRetNoRetPat_mc<DS_SUB_CLAMP_RTN_U32, DS_SUB_CLAMP_U32, i32, "atomic_load_sub_clamp">;
+
+} // let SubtargetPredicate = isGFX12Plus
+
let SubtargetPredicate = isGFX6GFX7GFX8GFX9GFX10 in {
defm : DSAtomicCmpXChgSwapped_mc<DS_CMPST_RTN_B32, DS_CMPST_B32, i32, "atomic_cmp_swap">;
}
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 98054dde398b3..c9ef2e9087c58 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -1108,10 +1108,6 @@ multiclass FlatAtomicNoRtnPatBase <string inst, string node, ValueType vt,
(!cast<FLAT_Pseudo>(inst) VReg_64:$vaddr, getVregSrcForVT<data_vt>.ret:$data, $offset)>;
}
-multiclass FlatAtomicNoRtnPatWithAddrSpace<string inst, string node, string addrSpaceSuffix,
- ValueType vt> :
- FlatAtomicNoRtnPatBase<inst, node # "_noret_" # addrSpaceSuffix, vt, vt>;
-
multiclass FlatAtomicNoRtnPat <string inst, string node, ValueType vt,
ValueType data_vt = vt, bit isIntr = 0> :
FlatAtomicNoRtnPatBase<inst, node # "_noret" # !if(isIntr, "", "_"#vt), vt, data_vt>;
@@ -1126,10 +1122,6 @@ multiclass FlatAtomicRtnPatBase <string inst, string node, ValueType vt,
(!cast<FLAT_Pseudo>(inst#"_RTN") VReg_64:$vaddr, getVregSrcForVT<data_vt>.ret:$data, $offset)>;
}
-multiclass FlatAtomicRtnPatWithAddrSpace<string inst, string intr, string addrSpaceSuffix,
- ValueType vt> :
- FlatAtomicRtnPatBase<inst, intr # "_" # addrSpaceSuffix, vt, vt>;
-
multiclass FlatAtomicRtnPat <string inst, string node, ValueType vt,
ValueType data_vt = vt, bit isIntr = 0> :
FlatAtomicRtnPatBase<inst, node # !if(isIntr, "", "_"#vt), vt, data_vt>;
@@ -1436,14 +1428,13 @@ defm : FlatAtomicPat <"FLAT_ATOMIC_MIN_F64", "atomic_load_fmin_"#as, f64>;
defm : FlatAtomicPat <"FLAT_ATOMIC_MAX_F64", "atomic_load_fmax_"#as, f64>;
}
-} // end foreach as
-
let SubtargetPredicate = isGFX12Plus in {
- defm : FlatAtomicRtnPatWithAddrSpace<"FLAT_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "flat_addrspace", i32 >;
+ defm : FlatAtomicRtnPat<"FLAT_ATOMIC_COND_SUB_U32", "atomic_load_cond_sub_"#as, i32 >;
let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
- defm : FlatAtomicNoRtnPatWithAddrSpace<"FLAT_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "flat_addrspace", i32>;
+ defm : FlatAtomicNoRtnPat<"FLAT_ATOMIC_COND_SUB_U32", "atomic_load_cond_sub_"#as, i32>;
}
+} // end foreach as
def : FlatStorePat <FLAT_STORE_BYTE, truncstorei8_flat, i16>;
def : FlatStorePat <FLAT_STORE_SHORT, store_flat, i16>;
@@ -1558,10 +1549,10 @@ defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_OR", "atomic_load_or_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_SWAP", "atomic_swap_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_CMPSWAP", "AMDGPUatomic_cmp_swap_global", i32, v2i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_XOR", "atomic_load_xor_global", i32>;
-defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_CSUB", "int_amdgcn_global_atomic_csub", i32, i32, /* isIntr */ 1>;
+defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_CSUB", "atomic_load_sub_clamp_global", i32>;
let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
-defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_CSUB", "int_amdgcn_global_atomic_csub", i32, i32, /* isIntr */ 1>;
+defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_CSUB", "atomic_load_sub_clamp_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_ADD_X2", "atomic_load_add_global", i64>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_SUB_X2", "atomic_load_sub_global", i64>;
@@ -1578,10 +1569,10 @@ defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_CMPSWAP_X2", "AMDGPUatomic_cmp_swap_
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_XOR_X2", "atomic_load_xor_global", i64>;
let SubtargetPredicate = isGFX12Plus in {
- defm : GlobalFLATAtomicPatsRtnWithAddrSpace <"GLOBAL_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "global_addrspace", i32>;
+ defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_COND_SUB_U32", "atomic_load_cond_sub_global", i32>;
let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
- defm : GlobalFLATAtomicPatsNoRtnWithAddrSpace <"GLOBAL_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "global_addrspace", i32>;
+ defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_COND_SUB_U32", "atomic_load_cond_sub_global", i32>;
}
let OtherPredicates = [isGFX12Plus] in {
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index 159b2d440b31a..a317977a55b0f 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -2179,6 +2179,8 @@ R600TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
switch (RMW->getOperation()) {
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
+ case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::SubClamp:
// FIXME: Cayman at least appears to have instructions for this, but the
// instruction defintions appear to be missing.
return AtomicExpansionKind::CmpXChg;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4f8882ed1cc96..0fc242af6d4d9 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -949,6 +949,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
ISD::ATOMIC_LOAD_FMAX,
ISD::ATOMIC_LOAD_UINC_WRAP,
ISD::ATOMIC_LOAD_UDEC_WRAP,
+ ISD::ATOMIC_LOAD_COND_SUB,
+ ISD::ATOMIC_LOAD_SUB_CLAMP,
ISD::INTRINSIC_VOID,
ISD::INTRINSIC_W_CHAIN});
@@ -1317,16 +1319,6 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
return true;
}
- case Intrinsic::amdgcn_global_atomic_csub: {
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(CI.getType());
- Info.ptrVal = CI.getOperand(0);
- Info.align.reset();
- Info.flags |= MachineMemOperand::MOLoad |
- MachineMemOperand::MOStore |
- MachineMemOperand::MOVolatile;
- return true;
- }
case Intrinsic::amdgcn_image_bvh_intersect_ray: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getType()); // XXX: what is correct VT?
@@ -1349,7 +1341,6 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::amdgcn_flat_atomic_fmin_num:
case Intrinsic::amdgcn_flat_atomic_fmax_num:
case Intrinsic::amdgcn_global_atomic_fadd_v2bf16:
- case Intrinsic::amdgcn_atomic_cond_sub_u32:
case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getType());
@@ -1447,7 +1438,6 @@ bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
Type *&AccessTy) const {
Value *Ptr = nullptr;
switch (II->getIntrinsicID()) {
- case Intrinsic::amdgcn_atomic_cond_sub_u32:
case Intrinsic::amdgcn_ds_append:
case Intrinsic::amdgcn_ds_consume:
case Intrinsic::amdgcn_ds_fmax:
@@ -1460,7 +1450,6 @@ bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
case Intrinsic::amdgcn_flat_atomic_fmax_num:
case Intrinsic::amdgcn_flat_atomic_fmin:
case Intrinsic::amdgcn_flat_atomic_fmin_num:
- case Intrinsic::amdgcn_global_atomic_csub:
case Intrinsic::amdgcn_global_atomic_fadd:
case Intrinsic::amdgcn_global_atomic_fadd_v2bf16:
case Intrinsic::amdgcn_global_atomic_fmax:
@@ -8864,9 +8853,6 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_raw_buffer_atomic_dec:
case Intrinsic::amdgcn_raw_ptr_buffer_atomic_dec:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC);
- case Intrinsic::amdgcn_raw_buffer_atomic_cond_sub_u32:
- return lowerRawBufferAtomicIntrin(Op, DAG,
- AMDGPUISD::BUFFER_ATOMIC_COND_SUB_U32);
case Intrinsic::amdgcn_struct_buffer_atomic_swap:
case Intrinsic::amdgcn_struct_ptr_buffer_atomic_swap:
return lowerStructBufferAtomicIntrin(Op, DAG,
@@ -8908,9 +8894,6 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_struct_buffer_atomic_dec:
case Intrinsic::amdgcn_struct_ptr_buffer_atomic_dec:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC);
- case Intrinsic::amdgcn_struct_buffer_atomic_cond_sub_u32:
- return lowerStructBufferAtomicIntrin(Op, DAG,
- AMDGPUISD::BUFFER_ATOMIC_COND_SUB_U32);
case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap:
case Intrinsic::amdgcn_raw_ptr_buffer_atomic_cmpswap: {
@@ -15808,7 +15791,6 @@ bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode *N,
case AMDGPUISD::BUFFER_ATOMIC_INC:
case AMDGPUISD::BUFFER_ATOMIC_DEC:
case AMDGPUISD::BUFFER_ATOMIC_CMPSWAP:
- case AMDGPUISD::BUFFER_ATOMIC_CSUB:
case AMDGPUISD::BUFFER_ATOMIC_FADD:
case AMDGPUISD::BUFFER_ATOMIC_FMIN:
case AMDGPUISD::BUFFER_ATOMIC_FMAX:
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index c64b3a7c356f2..1183a6329e7b0 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -718,6 +718,8 @@ defm atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">;
defm atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
defm atomic_load_uinc_wrap : SIAtomicM0Glue2 <"LOAD_UINC_WRAP">;
defm atomic_load_udec_wrap : SIAtomicM0Glue2 <"LOAD_UDEC_WRAP">;
+defm atomic_load_cond_sub : SIAtomicM0Glue2 <"LOAD_COND_SUB">;
+defm atomic_load_sub_clamp : SIAtomicM0Glue2 <"LOAD_SUB_CLAMP">;
defm atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">;
defm atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">;
defm atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">;
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 1721287dab4dd..f9a751f72058d 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -4539,7 +4539,9 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
// operations, use CmpXChg to expand.
if (AI->isFloatingPointOperation() ||
AI->getOperation() == AtomicRMWInst::UIncWrap ||
- AI->getOperation() == AtomicRMWInst::UDecWrap)
+ AI->getOperation() == AtomicRMWInst::UDecWrap ||
+ AI->getOperation() == AtomicRMWInst::CondSub ||
+ AI->getOperation() == AtomicRMWInst::SubClamp)
return AtomicExpansionKind::CmpXChg;
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 9e56b8522fa63..267116a610506 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -18774,6 +18774,8 @@ PPCTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
switch (AI->getOperation()) {
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
+ case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::SubClamp:
return AtomicExpansionKind::CmpXChg;
default:
return TargetLowering::shouldExpandAtomicRMWInIR(AI);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a02fd5bd1b65e..44cfd652b66c7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20757,7 +20757,9 @@ RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
// forward-progress guarantee.
if (AI->isFloatingPointOperation() ||
AI->getOperation() == AtomicRMWInst::UIncWrap ||
- AI->getOperation() == AtomicRMWInst::UDecWrap)
+ AI->getOperation() == AtomicRMWInst::UDecWrap ||
+ AI->getOperation() == AtomicRMWInst::CondSub ||
+ AI->getOperation() == AtomicRMWInst::SubClamp)
return AtomicExpansionKind::CmpXChg;
// Don't expand forced atomics, we want to have __sync libcalls instead.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 82d2b301d854e..13deedfa7c035 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -30912,6 +30912,8 @@ X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
case AtomicRMWInst::FMin:
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
+ case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::SubClamp:
default:
// These always require a non-trivial set of data operations on x86. We must
// use a cmpxchg loop.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
index cba282cea72b8..e9ac96f68d60c 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
@@ -41,6 +41,8 @@ bool isIdempotentRMW(AtomicRMWInst& RMWI) {
case AtomicRMWInst::Sub:
case AtomicRMWInst::Or:
case AtomicRMWInst::Xor:
+ case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::SubClamp:
return C->isZero();
case AtomicRMWInst::And:
return C->isMinusOne();
diff --git a/llvm/lib/Transforms/Utils/LowerAtomic.cpp b/llvm/lib/Transforms/Utils/LowerAtomic.cpp
index b203970ef9c5a..f516555d58947 100644
--- a/llvm/lib/Transforms/Utils/LowerAtomic.cpp
+++ b/llvm/lib/Transforms/Utils/LowerAtomic.cpp
@@ -94,6 +94,17 @@ Value *llvm::buildAtomicRMWValue(AtomicRMWInst::BinOp Op,
Value *Or = Builder.CreateOr(CmpEq0, CmpOldGtVal);
return Builder.CreateSelect(Or, Val, Dec, "new");
}
+ case AtomicRMWInst::CondSub: {
+ Value *Cmp = Builder.CreateICmpUGE(Loaded, Val);
+ Value *Sub = Builder.CreateSub(Loaded, Val);
+ return Builder.CreateSelect(Cmp, Sub, Val, "new");
+ }
+ case AtomicRMWInst::SubClamp: {
+ Constant *Zero = ConstantInt::get(Loaded->getType(), 0);
+ Value *Cmp = Builder.CreateICmpUGE(Loaded, Val);
+ Value *Sub = Builder.CreateSub(Loaded, Val);
+ return Builder.CreateSelect(Cmp, Sub, Zero, "new");
+ }
default:
llvm_unreachable("Unknown atomic op");
}
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
index f2ba7f8b21932..f7dfd530b2285 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
@@ -81,6 +81,12 @@ body: |
; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_UDEC_WRAP
%20:_(s32) = G_ATOMICRMW_UDEC_WRAP %1, %5
+ ; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_COND_SUB
+ %21:_(s32) = G_ATOMICRMW_COND_SUB %1, %5
+
+ ; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_SUB_CLAMP
+ %22:_(s32) = G_ATOMICRMW_SUB_CLAMP %1, %5
+
$vgpr0 = COPY %4(s32)
SI_RETURN implicit $vgpr0
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/atomics.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/atomics.ll
index 15355ea139205..d9e51c39c2042 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/atomics.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/atomics.ll
@@ -15,62 +15,5 @@ define amdgpu_kernel void @test2(ptr %ptr, i32 %cmp, i32 %new) {
ret void
}
-; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %val)
-define amdgpu_kernel void @test_atomic_csub_i32(ptr addrspace(1) %ptr, i32 %val) #0 {
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %val)
- store i32 %ret, ptr addrspace(1) %ptr, align 4
- ret void
-}
-
-; CHECK: DIVERGENT: %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p3(ptr addrspace(3) %gep, i32 %in)
-define amdgpu_kernel void @test_ds_atomic_cond_sub_rtn_u32(ptr addrspace(3) %addr, i32 %in, ptr addrspace(3) %use) #0 {
-entry:
- %gep = getelementptr i32, ptr addrspace(3) %addr, i32 4
- %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p3(ptr addrspace(3) %gep, i32 %in)
- store i32 %val, ptr addrspace(3) %use
- ret void
-}
-
-; CHECK: DIVERGENT: %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr %gep, i32 %in)
-define amdgpu_kernel void @test_flat_atomic_cond_sub_u32(ptr %addr, i32 %in, ptr %use) #0 {
-entry:
- %gep = getelementptr i32, ptr %addr, i32 4
- %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr %gep, i32 %in)
- store i32 %val, ptr %use
- ret void
-}
-
-; CHECK: DIVERGENT: %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1) %gep, i32 %in)
-define amdgpu_kernel void @test_global_atomic_cond_u32(ptr addrspace(1) %addr, i32 %in, ptr addrspace(1) %use) #0 {
-entry:
- %gep = getelementptr i32, ptr addrspace(1) %addr, i32 4
- %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1) %gep, i32 %in)
- store i32 %val, ptr addrspace(1) %use
- ret void
-}
-
-; CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
-define float @test_raw_buffer_atomic_cond_sub_u32(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-entry:
- %orig = call i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
- %r = bitcast i32 %orig to float
- ret float %r
-}
-
-; CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
-define float @test_struct_buffer_atomic_cond_sub_u32(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-entry:
- %orig = call i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
- %r = bitcast i32 %orig to float
- ret float %r
-}
-
-declare i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) nocapture, i32) #1
-declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p3(ptr addrspace(3), i32) #1
-declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr, i32) #1
-declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1), i32) #1
-declare i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32, <4 x i32>, i32, i32, i32) #1
-declare i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32, <4 x i32>, i32, i32, i32, i32) #1
-
attributes #0 = { nounwind }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Assembler/atomic.ll b/llvm/test/Assembler/atomic.ll
index 32fe82ef2268c..f2f6c792fa265 100644
--- a/llvm/test/Assembler/atomic.ll
+++ b/llvm/test/Assembler/atomic.ll
@@ -42,6 +42,16 @@ define void @f(ptr %x) {
; CHECK: atomicrmw volatile udec_wrap ptr %x, i32 10 syncscope("agent") monotonic
atomicrmw volatile udec_wrap ptr %x, i32 10 syncscope("agent") monotonic
+ ; CHECK: atomicrmw volatile cond_sub ptr %x, i32 10 monotonic
+ atomicrmw volatile cond_sub ptr %x, i32 10 monotonic
+ ; CHECK: atomicrmw volatile cond_sub ptr %x, i32 10 syncscope("agent") monotonic
+ atomicrmw volatile cond_sub ptr %x, i32 10 syncscope("agent") monotonic
+
+ ; CHECK: atomicrmw volatile sub_clamp ptr %x, i32 10 monotonic
+ atomicrmw volatile sub_clamp ptr %x, i32 10 monotonic
+ ; CHECK: atomicrmw volatile sub_clamp ptr %x, i32 10 syncscope("agent") monotonic
+ atomicrmw volatile sub_clamp ptr %x, i32 10 syncscope("agent") monotonic
+
; CHECK: fence syncscope("singlethread") release
fence syncscope("singlethread") release
; CHECK: fence seq_cst
diff --git a/llvm/test/Bitcode/amdgcn-atomic.ll b/llvm/test/Bitcode/amdgcn-atomic.ll
index 311bd8863859b..b49f3ac254a1e 100644
--- a/llvm/test/Bitcode/amdgcn-atomic.ll
+++ b/llvm/test/Bitcode/amdgcn-atomic.ll
@@ -249,3 +249,84 @@ define <2 x i16> @upgrade_amdgcn_ds_fadd_v2bf16__missing_args_as_i16(ptr addrspa
}
attributes #0 = { argmemonly nounwind willreturn }
+
+define void @atomic_cond_sub(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr3) {
+ ; CHECK: atomicrmw cond_sub ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ %result0 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 0, i1 false)
+
+ ; CHECK: atomicrmw cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ %result1 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 0, i32 0, i1 false)
+
+ ; CHECK: atomicrmw cond_sub ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4
+ %result2 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p3(ptr addrspace(3) %ptr3, i32 46, i32 0, i32 0, i1 false)
+ ret void
+}
+
+define void @atomic_sub_clamp(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr3) {
+ ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ %result0 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 0, i1 false)
+
+ ; CHECK: atomicrmw sub_clamp ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ %result1 = call i32 @llvm.amdgcn.atomic.csub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 0, i32 0, i1 false)
+
+ ; CHECK: atomicrmw sub_clamp ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4
+ %result2 = call i32 @llvm.amdgcn.atomic.csub.i32.p3(ptr addrspace(3) %ptr3, i32 46, i32 0, i32 0, i1 false)
+ ret void
+}
+
+; Test some invalid ordering handling
+define void @ordering_cond_sub_clamp(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr3) {
+ ; CHECK: atomicrmw volatile cond_sub ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ %result0 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p0(ptr %ptr0, i32 42, i32 -1, i32 0, i1 true)
+
+ ; CHECK: atomicrmw volatile cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ %result1 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 0, i32 0, i1 true)
+
+ ; CHECK: atomicrmw cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ %result2 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 1, i32 0, i1 false)
+
+ ; CHECK: atomicrmw volatile cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") monotonic, align 4
+ %result3 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 2, i32 0, i1 true)
+
+ ; CHECK: atomicrmw cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ %result4 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 3, i32 0, i1 false)
+
+ ; CHECK: atomicrmw volatile sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ %result5 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 4, i1 true)
+
+ ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ %result6 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 5, i1 false)
+
+ ; CHECK: atomicrmw volatile sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ %result7 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 6, i1 true)
+
+ ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ %result8 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 7, i1 false)
+
+ ; CHECK:= atomicrmw volatile sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ %result9 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 8, i1 true)
+
+ ; CHECK:= atomicrmw volatile sub_clamp ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ %result10 = call i32 @llvm.amdgcn.atomic.csub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 3, i32 0, i1 true)
+ ret void
+}
+
+define void @immarg_violations_sub_clamp(ptr %ptr0, i32 %val32, i1 %val1) {
+ ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ %result0 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 %val32, i32 0, i1 false)
+
+ ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4
+ %result1 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 2, i32 %val32, i1 false)
+
+ ; CHECK: atomicrmw volatile sub_clamp ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4
+ %result2 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 2, i32 0, i1 %val1)
+ ret void
+}
+
+declare i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) nocapture, i32, i32 immarg, i32 immarg, i1 immarg) #0
+declare i32 @llvm.amdgcn.atomic.cond.sub.i32.p3(ptr addrspace(3) nocapture, i32, i32 immarg, i32 immarg, i1 immarg) #0
+declare i32 @llvm.amdgcn.atomic.cond.sub.i32.p0(ptr nocapture, i32, i32 immarg, i32 immarg, i1 immarg) #0
+
+declare i32 @llvm.amdgcn.atomic.csub.i32.p1(ptr addrspace(1) nocapture, i32, i32 immarg, i32 immarg, i1 immarg) #0
+declare i32 @llvm.amdgcn.atomic.csub.i32.p3(ptr addrspace(3) nocapture, i32, i32 immarg, i32 immarg, i1 immarg) #0
+declare i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr nocapture, i32, i32 immarg, i32 immarg, i1 immarg) #0
diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll
index e437c37d8d1c8..1c424cdaa1016 100644
--- a/llvm/test/Bitcode/compatibility.ll
+++ b/llvm/test/Bitcode/compatibility.ll
@@ -906,6 +906,34 @@ define void @uinc_udec_wrap_atomics(ptr %word) {
ret void
}
+define void @cond_sub_clamp_atomics(ptr %word) {
+; CHECK: %atomicrmw.condsub0 = atomicrmw cond_sub ptr %word, i32 64 monotonic
+ %atomicrmw.condsub0 = atomicrmw cond_sub ptr %word, i32 64 monotonic
+
+; CHECK: %atomicrmw.condsub1 = atomicrmw cond_sub ptr %word, i32 128 seq_cst
+ %atomicrmw.condsub1 = atomicrmw cond_sub ptr %word, i32 128 seq_cst
+
+; CHECK: %atomicrmw.condsub2 = atomicrmw volatile cond_sub ptr %word, i32 128 seq_cst
+ %atomicrmw.condsub2 = atomicrmw volatile cond_sub ptr %word, i32 128 seq_cst
+
+; CHECK: %atomicrmw.condsub0.syncscope = atomicrmw cond_sub ptr %word, i32 27 syncscope("agent") monotonic
+ %atomicrmw.condsub0.syncscope = atomicrmw cond_sub ptr %word, i32 27 syncscope("agent") monotonic
+
+; CHECK: %atomicrmw.subclamp0 = atomicrmw sub_clamp ptr %word, i32 99 monotonic
+ %atomicrmw.subclamp0 = atomicrmw sub_clamp ptr %word, i32 99 monotonic
+
+; CHECK: %atomicrmw.subclamp1 = atomicrmw sub_clamp ptr %word, i32 12 seq_cst
+ %atomicrmw.subclamp1 = atomicrmw sub_clamp ptr %word, i32 12 seq_cst
+
+; CHECK: %atomicrmw.subclamp2 = atomicrmw volatile sub_clamp ptr %word, i32 12 seq_cst
+ %atomicrmw.subclamp2 = atomicrmw volatile sub_clamp ptr %word, i32 12 seq_cst
+
+; CHECK: %atomicrmw.subclamp0.syncscope = atomicrmw sub_clamp ptr %word, i32 5 syncscope("system") monotonic
+ %atomicrmw.subclamp0.syncscope = atomicrmw sub_clamp ptr %word, i32 5 syncscope("system") monotonic
+
+ ret void
+}
+
define void @pointer_atomics(ptr %word) {
; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, ptr null monotonic
%atomicrmw.xchg = atomicrmw xchg ptr %word, ptr null monotonic
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index f03491924f7f4..6be8c3a180c33 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -261,6 +261,12 @@
# DEBUG-NEXT: G_ATOMICRMW_UDEC_WRAP (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_ATOMICRMW_COND_SUB (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_ATOMICRMW_SUB_CLAMP (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_FENCE (opcode {{[0-9]+}}): 0 type indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/AArch64/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..b59505faccdeb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: .LBB0_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldaxrb w8, [x0]
+; CHECK-NEXT: sub w9, w8, w1
+; CHECK-NEXT: cmp w8, w1, uxtb
+; CHECK-NEXT: csel w9, w9, w1, hs
+; CHECK-NEXT: stlxrb w10, w9, [x0]
+; CHECK-NEXT: cbnz w10, .LBB0_1
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: .LBB1_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldaxrh w8, [x0]
+; CHECK-NEXT: sub w9, w8, w1
+; CHECK-NEXT: cmp w8, w1, uxth
+; CHECK-NEXT: csel w9, w9, w1, hs
+; CHECK-NEXT: stlxrh w10, w9, [x0]
+; CHECK-NEXT: cbnz w10, .LBB1_1
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: .LBB2_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldaxr w8, [x0]
+; CHECK-NEXT: subs w9, w8, w1
+; CHECK-NEXT: csel w9, w9, w1, hs
+; CHECK-NEXT: stlxr w10, w9, [x0]
+; CHECK-NEXT: cbnz w10, .LBB2_1
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x8, x0
+; CHECK-NEXT: .LBB3_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldaxr x0, [x8]
+; CHECK-NEXT: subs x9, x0, x1
+; CHECK-NEXT: csel x9, x9, x1, hs
+; CHECK-NEXT: stlxr w10, x9, [x8]
+; CHECK-NEXT: cbnz w10, .LBB3_1
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: .LBB4_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldaxrb w8, [x0]
+; CHECK-NEXT: sub w9, w8, w1
+; CHECK-NEXT: cmp w8, w1, uxtb
+; CHECK-NEXT: csel w9, w9, wzr, hs
+; CHECK-NEXT: stlxrb w10, w9, [x0]
+; CHECK-NEXT: cbnz w10, .LBB4_1
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: .LBB5_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldaxrh w8, [x0]
+; CHECK-NEXT: sub w9, w8, w1
+; CHECK-NEXT: cmp w8, w1, uxth
+; CHECK-NEXT: csel w9, w9, wzr, hs
+; CHECK-NEXT: stlxrh w10, w9, [x0]
+; CHECK-NEXT: cbnz w10, .LBB5_1
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: .LBB6_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldaxr w8, [x0]
+; CHECK-NEXT: subs w9, w8, w1
+; CHECK-NEXT: csel w9, w9, wzr, hs
+; CHECK-NEXT: stlxr w10, w9, [x0]
+; CHECK-NEXT: cbnz w10, .LBB6_1
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x8, x0
+; CHECK-NEXT: .LBB7_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldaxr x0, [x8]
+; CHECK-NEXT: subs x9, x0, x1
+; CHECK-NEXT: csel x9, x9, xzr, hs
+; CHECK-NEXT: stlxr w10, x9, [x8]
+; CHECK-NEXT: cbnz w10, .LBB7_1
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_cond_sub.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_cond_sub.ll
new file mode 100644
index 0000000000000..ceefb5da1e3cc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_cond_sub.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX12
+
+define i32 @global_atomic_cond_sub(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_cond_sub:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw cond_sub ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
+ ret i32 %ret
+}
+
+define i32 @global_atomic_cond_sub_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_cond_sub_offset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ ret i32 %ret
+}
+
+define void @global_atomic_cond_sub_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_cond_sub_nortn:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %ret = atomicrmw cond_sub ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
+ ret void
+}
+
+define void @global_atomic_cond_sub_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_cond_sub_offset_nortn:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: global_atomic_cond_sub_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_cond_sub_sgpr_base_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_cond_sub_sgpr_base_offset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
+; GFX12-NEXT: global_atomic_cond_sub_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: global_store_b32 v[0:1], v0, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ store i32 %ret, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_cond_sub_sgpr_base_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_cond_sub_sgpr_base_offset_nortn:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
+; GFX12-NEXT: global_atomic_cond_sub_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-NEXT: s_endpgm
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
+ %ret = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ ret void
+}
+
+attributes #0 = { nounwind willreturn }
+attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.csub.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_sub_clamp.ll
similarity index 66%
rename from llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.csub.ll
rename to llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_sub_clamp.ll
index ade6e55b482bb..1cd6dcb193964 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.csub.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_sub_clamp.ll
@@ -4,134 +4,166 @@
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX11
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX12
-define i32 @global_atomic_csub(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_csub:
+define i32 @global_atomic_sub_clamp(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_sub_clamp:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_atomic_csub:
+; GFX11-LABEL: global_atomic_sub_clamp:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: global_atomic_csub:
+; GFX12-LABEL: global_atomic_sub_clamp:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
ret i32 %ret
}
-define i32 @global_atomic_csub_offset(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_csub_offset:
+define i32 @global_atomic_sub_clamp_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_sub_clamp_offset:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_atomic_csub_offset:
+; GFX11-LABEL: global_atomic_sub_clamp_offset:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: global_atomic_csub_offset:
+; GFX12-LABEL: global_atomic_sub_clamp_offset:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
ret i32 %ret
}
-define void @global_atomic_csub_nortn(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_csub_nortn:
+define void @global_atomic_sub_clamp_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_sub_clamp_nortn:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_atomic_csub_nortn:
+; GFX11-LABEL: global_atomic_sub_clamp_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: global_atomic_csub_nortn:
+; GFX12-LABEL: global_atomic_sub_clamp_nortn:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
ret void
}
-define void @global_atomic_csub_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_csub_offset_nortn:
+define void @global_atomic_sub_clamp_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_sub_clamp_offset_nortn:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_atomic_csub v0, v[0:1], v2, off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_atomic_csub_offset_nortn:
+; GFX11-LABEL: global_atomic_sub_clamp_offset_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: global_atomic_csub_u32 v0, v[0:1], v2, off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: global_atomic_csub_offset_nortn:
+; GFX12-LABEL: global_atomic_sub_clamp_offset_nortn:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v[0:1], v2, off offset:4096 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
ret void
}
-define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_csub_sgpr_base_offset:
+define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_sub_clamp_sgpr_base_offset:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: s_load_dword s2, s[4:5], 0x8
@@ -141,10 +173,12 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset(ptr addrspace(1)
; GFX10-NEXT: v_mov_b32_e32 v0, s2
; GFX10-NEXT: global_atomic_csub v0, v1, v0, s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v[0:1], v0, off
; GFX10-NEXT: s_endpgm
;
-; GFX11-LABEL: global_atomic_csub_sgpr_base_offset:
+; GFX11-LABEL: global_atomic_sub_clamp_sgpr_base_offset:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8
@@ -153,30 +187,33 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset(ptr addrspace(1)
; GFX11-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s2
; GFX11-NEXT: global_atomic_csub_u32 v0, v1, v0, s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v[0:1], v0, off
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
-; GFX12-LABEL: global_atomic_csub_sgpr_base_offset:
+; GFX12-LABEL: global_atomic_sub_clamp_sgpr_base_offset:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b32 v[0:1], v0, off
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
store i32 %ret, ptr addrspace(1) undef
ret void
}
-define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_csub_sgpr_base_offset_nortn:
+define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_sub_clamp_sgpr_base_offset_nortn:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: s_load_dword s2, s[4:5], 0x8
@@ -185,9 +222,12 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset_nortn(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v0, s2
; GFX10-NEXT: global_atomic_csub v0, v1, v0, s[0:1] glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
-; GFX11-LABEL: global_atomic_csub_sgpr_base_offset_nortn:
+; GFX11-LABEL: global_atomic_sub_clamp_sgpr_base_offset_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8
@@ -195,21 +235,24 @@ define amdgpu_kernel void @global_atomic_csub_sgpr_base_offset_nortn(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s2
; GFX11-NEXT: global_atomic_csub_u32 v0, v1, v0, s[0:1] glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
;
-; GFX12-LABEL: global_atomic_csub_sgpr_base_offset_nortn:
+; GFX12-LABEL: global_atomic_sub_clamp_sgpr_base_offset_nortn:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
; GFX12-NEXT: global_atomic_sub_clamp_u32 v0, v1, v0, s[0:1] offset:4096 th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
ret void
}
-declare i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) nocapture, i32) #1
-
attributes #0 = { nounwind willreturn }
attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll b/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
index 2c69ae58f0e61..7685e0a58e7ce 100644
--- a/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
@@ -2,10 +2,6 @@
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12-SDAG %s
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12-GISEL %s
-declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p3(ptr addrspace(3), i32)
-declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1), i32)
-declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr, i32)
-
define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32(ptr %addr, i32 %in) {
; GFX12-SDAG-LABEL: flat_atomic_cond_sub_no_rtn_u32:
; GFX12-SDAG: ; %bb.0: ; %entry
@@ -14,6 +10,8 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32(ptr %addr, i32 %in) {
; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, s2
; GFX12-SDAG-NEXT: flat_atomic_cond_sub_u32 v0, v[0:1], v2 offset:-16 th:TH_ATOMIC_RETURN
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: flat_atomic_cond_sub_no_rtn_u32:
@@ -23,10 +21,12 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32(ptr %addr, i32 %in) {
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v1, s1
; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v0, v[0:1], v2 offset:-16 th:TH_ATOMIC_RETURN
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %addr, i32 -4
- %unused = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr %gep, i32 %in)
+ %unused = atomicrmw cond_sub ptr %gep, i32 %in seq_cst
ret void
}
@@ -38,6 +38,8 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32_forced(ptr %addr, i32
; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, s2
; GFX12-SDAG-NEXT: flat_atomic_cond_sub_u32 v[0:1], v2 offset:-16
+; GFX12-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: flat_atomic_cond_sub_no_rtn_u32_forced:
@@ -47,10 +49,12 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32_forced(ptr %addr, i32
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v1, s1
; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v[0:1], v2 offset:-16
+; GFX12-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %addr, i32 -4
- %unused = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr %gep, i32 %in)
+ %unused = atomicrmw cond_sub ptr %gep, i32 %in seq_cst
ret void
}
@@ -64,8 +68,9 @@ define amdgpu_kernel void @flat_atomic_cond_sub_rtn_u32(ptr %addr, i32 %in, ptr
; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX12-SDAG-NEXT: v_mov_b32_e32 v2, s6
; GFX12-SDAG-NEXT: flat_atomic_cond_sub_u32 v2, v[0:1], v2 offset:16 th:TH_ATOMIC_RETURN
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-SDAG-NEXT: flat_store_b32 v[0:1], v2
; GFX12-SDAG-NEXT: s_endpgm
;
@@ -78,13 +83,14 @@ define amdgpu_kernel void @flat_atomic_cond_sub_rtn_u32(ptr %addr, i32 %in, ptr
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v1, s5
; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v2, v[0:1], v2 offset:16 th:TH_ATOMIC_RETURN
-; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-GISEL-NEXT: flat_store_b32 v[0:1], v2
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %addr, i32 4
- %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr %gep, i32 %in)
+ %val = atomicrmw cond_sub ptr %gep, i32 %in seq_cst
store i32 %val, ptr %use
ret void
}
@@ -96,6 +102,8 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32(ptr addrspace(1) %a
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v0, v1, s[0:1] offset:-16 th:TH_ATOMIC_RETURN
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: global_atomic_cond_sub_no_rtn_u32:
@@ -104,10 +112,12 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32(ptr addrspace(1) %a
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v1, v0, s[0:1] offset:-16 th:TH_ATOMIC_RETURN
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %addr, i32 -4
- %unused = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1) %gep, i32 %in)
+ %unused = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %in seq_cst
ret void
}
@@ -118,8 +128,8 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32_forced(ptr addrspac
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v0, v1, s[0:1] offset:-16
-; GFX12-SDAG-NEXT: s_nop 0
-; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: global_atomic_cond_sub_no_rtn_u32_forced:
@@ -128,12 +138,12 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32_forced(ptr addrspac
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v1, v0, s[0:1] offset:-16
-; GFX12-GISEL-NEXT: s_nop 0
-; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %addr, i32 -4
- %unused = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1) %gep, i32 %in)
+ %unused = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %in seq_cst
ret void
}
@@ -147,6 +157,7 @@ define amdgpu_kernel void @global_atomic_cond_sub_rtn_u32(ptr addrspace(1) %addr
; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s6
; GFX12-SDAG-NEXT: global_atomic_cond_sub_u32 v1, v0, v1, s[4:5] offset:16 th:TH_ATOMIC_RETURN
; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-SDAG-NEXT: s_nop 0
; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -161,13 +172,14 @@ define amdgpu_kernel void @global_atomic_cond_sub_rtn_u32(ptr addrspace(1) %addr
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s6
; GFX12-GISEL-NEXT: global_atomic_cond_sub_u32 v0, v1, v0, s[4:5] offset:16 th:TH_ATOMIC_RETURN
; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SYS
; GFX12-GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX12-GISEL-NEXT: s_nop 0
; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %addr, i32 4
- %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1) %gep, i32 %in)
+ %val = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %in seq_cst
store i32 %val, ptr addrspace(1) %use
ret void
}
@@ -181,6 +193,8 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32(ptr addrspace(3) %addr, i32 %i
; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX12-SDAG-NEXT: ds_cond_sub_rtn_u32 v0, v0, v1
+; GFX12-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SE
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: ds_cond_sub_no_rtn_u32:
@@ -191,10 +205,12 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32(ptr addrspace(3) %addr, i32 %i
; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX12-GISEL-NEXT: ds_cond_sub_rtn_u32 v0, v0, v1
+; GFX12-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SE
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(3) %addr, i32 -4
- %unused = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p3(ptr addrspace(3) %gep, i32 %in)
+ %unused = atomicrmw cond_sub ptr addrspace(3) %gep, i32 %in seq_cst
ret void
}
@@ -207,6 +223,8 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32_forced(ptr addrspace(3) %addr,
; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX12-SDAG-NEXT: ds_cond_sub_u32 v0, v1
+; GFX12-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SE
; GFX12-SDAG-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: ds_cond_sub_no_rtn_u32_forced:
@@ -217,10 +235,12 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32_forced(ptr addrspace(3) %addr,
; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX12-GISEL-NEXT: ds_cond_sub_u32 v0, v1
+; GFX12-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SE
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(3) %addr, i32 -4
- %unused = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p3(ptr addrspace(3) %gep, i32 %in)
+ %unused = atomicrmw cond_sub ptr addrspace(3) %gep, i32 %in seq_cst
ret void
}
@@ -231,8 +251,9 @@ define amdgpu_kernel void @ds_cond_sub_rtn_u32(ptr addrspace(3) %addr, i32 %in,
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-SDAG-NEXT: ds_cond_sub_rtn_u32 v0, v0, v1 offset:16
-; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s2
; GFX12-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SE
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s2
; GFX12-SDAG-NEXT: ds_store_b32 v1, v0
; GFX12-SDAG-NEXT: s_endpgm
;
@@ -242,13 +263,14 @@ define amdgpu_kernel void @ds_cond_sub_rtn_u32(ptr addrspace(3) %addr, i32 %in,
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: v_dual_mov_b32 v0, s1 :: v_dual_mov_b32 v1, s0
; GFX12-GISEL-NEXT: ds_cond_sub_rtn_u32 v0, v1, v0 offset:16
-; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, s2
; GFX12-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX12-GISEL-NEXT: global_inv scope:SCOPE_SE
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, s2
; GFX12-GISEL-NEXT: ds_store_b32 v1, v0
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(3) %addr, i32 4
- %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p3(ptr addrspace(3) %gep, i32 %in)
+ %val = atomicrmw cond_sub ptr addrspace(3) %gep, i32 %in seq_cst
store i32 %val, ptr addrspace(3) %use
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
index 1588dde19cfb7..c679e3b57bc2a 100644
--- a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
+++ b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
@@ -13,7 +13,7 @@ define amdgpu_kernel void @test_sink_small_offset_global_atomic_csub_i32(ptr add
; OPT-NEXT: br i1 [[CMP]], label [[ENDIF:%.*]], label [[IF:%.*]]
; OPT: if:
; OPT-NEXT: [[IN_GEP:%.*]] = getelementptr i32, ptr addrspace(1) [[IN:%.*]], i32 7
-; OPT-NEXT: [[VAL:%.*]] = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) [[IN_GEP]], i32 2)
+; OPT-NEXT: [[VAL:%.*]] = atomicrmw sub_clamp ptr addrspace(1) [[IN_GEP]], i32 2 seq_cst, align 4
; OPT-NEXT: br label [[ENDIF]]
; OPT: endif:
; OPT-NEXT: [[X:%.*]] = phi i32 [ [[VAL]], [[IF]] ], [ 0, [[ENTRY:%.*]] ]
@@ -36,10 +36,13 @@ define amdgpu_kernel void @test_sink_small_offset_global_atomic_csub_i32(ptr add
; GCN-NEXT: v_mov_b32_e32 v1, 2
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] offset:28 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: .LBB0_2: ; %endif
; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GCN-NEXT: v_mov_b32_e32 v1, 0x3d0800
-; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_store_dword v1, v0, s[0:1] offset:252
; GCN-NEXT: s_endpgm
entry:
@@ -49,7 +52,7 @@ entry:
if:
%in.gep = getelementptr i32, ptr addrspace(1) %in, i32 7
- %val = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %in.gep, i32 2)
+ %val = atomicrmw sub_clamp ptr addrspace(1) %in.gep, i32 2 seq_cst
br label %endif
endif:
@@ -62,7 +65,6 @@ done:
ret void
}
-declare i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) nocapture, i32) #0
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
attributes #0 = { argmemonly nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
index 79de55eb63bf8..f2e889f73314e 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
@@ -10,10 +10,12 @@ define amdgpu_ps float @global_csub_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GCN: ; %bb.0:
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] glc
; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep0, i32 %data)
+ %rtn = atomicrmw sub_clamp ptr addrspace(1) %gep0, i32 %data seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -23,11 +25,13 @@ define amdgpu_ps float @global_csub_saddr_i32_rtn_neg128(ptr addrspace(1) inreg
; GCN: ; %bb.0:
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] offset:-128 glc
; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep1, i32 %data)
+ %rtn = atomicrmw sub_clamp ptr addrspace(1) %gep1, i32 %data seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -36,10 +40,13 @@ define amdgpu_ps void @global_csub_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GCN-LABEL: global_csub_saddr_i32_nortn:
; GCN: ; %bb.0:
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep0, i32 %data)
+ %unused = atomicrmw sub_clamp ptr addrspace(1) %gep0, i32 %data seq_cst
ret void
}
@@ -47,11 +54,14 @@ define amdgpu_ps void @global_csub_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GCN-LABEL: global_csub_saddr_i32_nortn_neg128:
; GCN: ; %bb.0:
; GCN-NEXT: global_atomic_csub v0, v0, v1, s[2:3] offset:-128 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_gl1_inv
+; GCN-NEXT: buffer_gl0_inv
; GCN-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %gep1, i32 %data)
+ %unused = atomicrmw sub_clamp ptr addrspace(1) %gep1, i32 %data seq_cst
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.cond.sub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.cond.sub.ll
deleted file mode 100644
index 9b63a8a3efcf9..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.cond.sub.ll
+++ /dev/null
@@ -1,219 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX12
-
-define float @raw_buffer_atomic_cond_sub_return(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-; GFX12-LABEL: raw_buffer_atomic_cond_sub_return:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v0, s4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, off, s[0:3], null th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %orig = call i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
- %r = bitcast i32 %orig to float
- ret float %r
-}
-
-define void @raw_buffer_atomic_cond_sub_no_return(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-; GFX12-LABEL: raw_buffer_atomic_cond_sub_no_return:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v0, s4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, off, s[0:3], null th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %unused = call i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
- ret void
-}
-
-define void @raw_buffer_atomic_cond_sub_no_return_forced(<4 x i32> inreg %rsrc, i32 inreg %data) #1 {
-; GFX12-LABEL: raw_buffer_atomic_cond_sub_no_return_forced:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v0, s4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, off, s[0:3], null
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %unused = call i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
- ret void
-}
-
-define float @raw_buffer_atomic_cond_sub_imm_soff_return(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-; GFX12-LABEL: raw_buffer_atomic_cond_sub_imm_soff_return:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v0, s4
-; GFX12-NEXT: s_mov_b32 s4, 4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, off, s[0:3], s4 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %orig = call i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 4, i32 0)
- %r = bitcast i32 %orig to float
- ret float %r
-}
-
-define void @raw_buffer_atomic_cond_sub_imm_soff_no_return(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-; GFX12-LABEL: raw_buffer_atomic_cond_sub_imm_soff_no_return:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v0, s4
-; GFX12-NEXT: s_mov_b32 s4, 4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, off, s[0:3], s4 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %unused = call i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 4, i32 0)
- ret void
-}
-
-define void @raw_buffer_atomic_cond_sub_imm_soff_no_return_forced(<4 x i32> inreg %rsrc, i32 inreg %data) #1 {
-; GFX12-LABEL: raw_buffer_atomic_cond_sub_imm_soff_no_return_forced:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v0, s4
-; GFX12-NEXT: s_mov_b32 s4, 4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, off, s[0:3], s4
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %unused = call i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 4, i32 0)
- ret void
-}
-
-define float @struct_buffer_atomic_cond_sub_return(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-; GFX12-LABEL: struct_buffer_atomic_cond_sub_return:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, v1, s[0:3], null idxen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %orig = call i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
- %r = bitcast i32 %orig to float
- ret float %r
-}
-
-define void @struct_buffer_atomic_cond_sub_no_return(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-; GFX12-LABEL: struct_buffer_atomic_cond_sub_no_return:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v1, v0, s[0:3], null idxen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %unused = call i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
- ret void
-}
-
-define void @struct_buffer_atomic_cond_sub_no_return_forced(<4 x i32> inreg %rsrc, i32 inreg %data) #1 {
-; GFX12-LABEL: struct_buffer_atomic_cond_sub_no_return_forced:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v1, v0, s[0:3], null idxen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %unused = call i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
- ret void
-}
-
-define float @struct_buffer_atomic_cond_sub_imm_soff_return(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-; GFX12-LABEL: struct_buffer_atomic_cond_sub_imm_soff_return:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s4
-; GFX12-NEXT: s_mov_b32 s4, 4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v0, v1, s[0:3], s4 idxen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %orig = call i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 4, i32 0)
- %r = bitcast i32 %orig to float
- ret float %r
-}
-
-define void @struct_buffer_atomic_cond_sub_imm_soff_no_return(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
-; GFX12-LABEL: struct_buffer_atomic_cond_sub_imm_soff_no_return:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
-; GFX12-NEXT: s_mov_b32 s4, 4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v1, v0, s[0:3], s4 idxen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %unused = call i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 4, i32 0)
- ret void
-}
-
-define void @struct_buffer_atomic_cond_sub_imm_soff_no_return_forced(<4 x i32> inreg %rsrc, i32 inreg %data) #1 {
-; GFX12-LABEL: struct_buffer_atomic_cond_sub_imm_soff_no_return_forced:
-; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
-; GFX12-NEXT: s_mov_b32 s4, 4
-; GFX12-NEXT: buffer_atomic_cond_sub_u32 v1, v0, s[0:3], s4 idxen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
-main_body:
- %unused = call i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 4, i32 0)
- ret void
-}
-
-declare i32 @llvm.amdgcn.raw.buffer.atomic.cond.sub.u32.i32(i32, <4 x i32>, i32, i32, i32) #0
-declare i32 @llvm.amdgcn.struct.buffer.atomic.cond.sub.u32.i32(i32, <4 x i32>, i32, i32, i32, i32) #0
-
-attributes #0 = { nounwind }
-attributes #1 = { nounwind "target-features"="+atomic-csub-no-rtn-insts" }
-
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
index d7dd0ce58a08f..93ae2b6391d81 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
@@ -2,14 +2,12 @@
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1031 -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,PREGFX12
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,GFX12PLUS
-declare i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1), i32)
-
; GCN-LABEL: {{^}}global_atomic_csub_rtn:
; PREGFX12: global_atomic_csub v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9:]+}}, s{{\[[0-9]+:[0-9]+\]}} glc
; GFX12PLUS: global_atomic_sub_clamp_u32 v0, v0, v1, s[0:1] th:TH_ATOMIC_RETURN
define amdgpu_kernel void @global_atomic_csub_rtn(ptr addrspace(1) %ptr, i32 %data) {
main_body:
- %ret = call i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1) %ptr, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %ptr, i32 %data seq_cst
ret void
}
@@ -18,7 +16,7 @@ main_body:
; GFX12PLUS: global_atomic_sub_clamp_u32 v0, v1, s[0:1]
define amdgpu_kernel void @global_atomic_csub_no_rtn(ptr addrspace(1) %ptr, i32 %data) #0 {
main_body:
- %ret = call i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1) %ptr, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %ptr, i32 %data seq_cst
ret void
}
@@ -28,7 +26,7 @@ main_body:
define amdgpu_kernel void @global_atomic_csub_off4_rtn(ptr addrspace(1) %ptr, i32 %data) {
main_body:
%p = getelementptr i32, ptr addrspace(1) %ptr, i64 1
- %ret = call i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1) %p, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %p, i32 %data seq_cst
ret void
}
@@ -38,7 +36,7 @@ main_body:
define amdgpu_kernel void @global_atomic_csub_off4_no_rtn(ptr addrspace(1) %ptr, i32 %data) #0 {
main_body:
%p = getelementptr i32, ptr addrspace(1) %ptr, i64 1
- %ret = call i32 @llvm.amdgcn.global.atomic.csub(ptr addrspace(1) %p, i32 %data)
+ %ret = atomicrmw sub_clamp ptr addrspace(1) %p, i32 %data seq_cst
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
index 6fdc0d5834ef6..3f0b6d08e36e8 100644
--- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -632,3 +632,55 @@ define i32 @atomicrmw_dec_private_i32(ptr addrspace(5) %ptr) {
%result = atomicrmw udec_wrap ptr addrspace(5) %ptr, i32 4 seq_cst
ret i32 %result
}
+
+define i32 @atomicrmw_cond_sub_private_i32(ptr addrspace(5) %ptr) {
+; IR-LABEL: define i32 @atomicrmw_cond_sub_private_i32(
+; IR-SAME: ptr addrspace(5) [[PTR:%.*]]) #[[ATTR0]] {
+; IR-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[PTR]], align 4
+; IR-NEXT: [[TMP2:%.*]] = icmp uge i32 [[TMP1]], 4
+; IR-NEXT: [[TMP3:%.*]] = sub i32 [[TMP1]], 4
+; IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 4
+; IR-NEXT: store i32 [[NEW]], ptr addrspace(5) [[PTR]], align 4
+; IR-NEXT: ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_cond_sub_private_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_add_i32_e32 v2, vcc, -4, v1
+; GCN-NEXT: v_cmp_lt_u32_e32 vcc, 3, v1
+; GCN-NEXT: v_cndmask_b32_e32 v2, 4, v2, vcc
+; GCN-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT: v_mov_b32_e32 v0, v1
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw cond_sub ptr addrspace(5) %ptr, i32 4 seq_cst
+ ret i32 %result
+}
+
+define i32 @atomicrmw_sub_clamp_private_i32(ptr addrspace(5) %ptr) {
+; IR-LABEL: define i32 @atomicrmw_sub_clamp_private_i32(
+; IR-SAME: ptr addrspace(5) [[PTR:%.*]]) #[[ATTR0]] {
+; IR-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[PTR]], align 4
+; IR-NEXT: [[TMP2:%.*]] = icmp uge i32 [[TMP1]], 4
+; IR-NEXT: [[TMP3:%.*]] = sub i32 [[TMP1]], 4
+; IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 0
+; IR-NEXT: store i32 [[NEW]], ptr addrspace(5) [[PTR]], align 4
+; IR-NEXT: ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_sub_clamp_private_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_add_i32_e32 v2, vcc, -4, v1
+; GCN-NEXT: v_cmp_lt_u32_e32 vcc, 3, v1
+; GCN-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GCN-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT: v_mov_b32_e32 v0, v1
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %result = atomicrmw sub_clamp ptr addrspace(5) %ptr, i32 4 seq_cst
+ ret i32 %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
index c04cb89e9527b..f948d4bd3280a 100644
--- a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
@@ -12,7 +12,7 @@ define i32 @shl_base_atomicrmw_global_atomic_csub_ptr(ptr addrspace(1) %out, ptr
%cast = ptrtoint ptr addrspace(1) %arrayidx0 to i64
%shl = shl i64 %cast, 2
%castback = inttoptr i64 %shl to ptr addrspace(1)
- %val = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %castback, i32 43)
+ %val = atomicrmw sub_clamp ptr addrspace(1) %castback, i32 43 seq_cst
store volatile i64 %cast, ptr addrspace(1) %extra.use, align 4
ret i32 %val
}
diff --git a/llvm/test/CodeGen/ARM/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/ARM/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..3364b2050dcc6
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,186 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 < %s | FileCheck %s
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: .LBB0_1: @ %atomicrmw.start
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldrexb r12, [r0]
+; CHECK-NEXT: uxtb r3, r1
+; CHECK-NEXT: cmp r12, r3
+; CHECK-NEXT: mov r3, r1
+; CHECK-NEXT: subhs r3, r12, r3
+; CHECK-NEXT: strexb r2, r3, [r0]
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: bne .LBB0_1
+; CHECK-NEXT: @ %bb.2: @ %atomicrmw.end
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: bx lr
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: .LBB1_1: @ %atomicrmw.start
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldrexh r12, [r0]
+; CHECK-NEXT: uxth r3, r1
+; CHECK-NEXT: cmp r12, r3
+; CHECK-NEXT: mov r3, r1
+; CHECK-NEXT: subhs r3, r12, r3
+; CHECK-NEXT: strexh r2, r3, [r0]
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: bne .LBB1_1
+; CHECK-NEXT: @ %bb.2: @ %atomicrmw.end
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: bx lr
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: .LBB2_1: @ %atomicrmw.start
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldrex r12, [r0]
+; CHECK-NEXT: subs r3, r12, r1
+; CHECK-NEXT: movlo r3, r1
+; CHECK-NEXT: strex r2, r3, [r0]
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: bne .LBB2_1
+; CHECK-NEXT: @ %bb.2: @ %atomicrmw.end
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: bx lr
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: .LBB3_1: @ %atomicrmw.start
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldrexd r4, r5, [r0]
+; CHECK-NEXT: mov r1, #0
+; CHECK-NEXT: subs r6, r4, r2
+; CHECK-NEXT: sbcs r7, r5, r3
+; CHECK-NEXT: movwhs r1, #1
+; CHECK-NEXT: cmp r1, #0
+; CHECK-NEXT: moveq r7, r3
+; CHECK-NEXT: moveq r6, r2
+; CHECK-NEXT: strexd r1, r6, r7, [r0]
+; CHECK-NEXT: cmp r1, #0
+; CHECK-NEXT: bne .LBB3_1
+; CHECK-NEXT: @ %bb.2: @ %atomicrmw.end
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r1, r5
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: .LBB4_1: @ %atomicrmw.start
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldrexb r12, [r0]
+; CHECK-NEXT: uxtb r3, r1
+; CHECK-NEXT: cmp r12, r3
+; CHECK-NEXT: mov r3, #0
+; CHECK-NEXT: subhs r3, r12, r1
+; CHECK-NEXT: strexb r2, r3, [r0]
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: bne .LBB4_1
+; CHECK-NEXT: @ %bb.2: @ %atomicrmw.end
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: bx lr
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: .LBB5_1: @ %atomicrmw.start
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldrexh r12, [r0]
+; CHECK-NEXT: uxth r3, r1
+; CHECK-NEXT: cmp r12, r3
+; CHECK-NEXT: mov r3, #0
+; CHECK-NEXT: subhs r3, r12, r1
+; CHECK-NEXT: strexh r2, r3, [r0]
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: bne .LBB5_1
+; CHECK-NEXT: @ %bb.2: @ %atomicrmw.end
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: bx lr
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: .LBB6_1: @ %atomicrmw.start
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldrex r12, [r0]
+; CHECK-NEXT: subs r3, r12, r1
+; CHECK-NEXT: movlo r3, #0
+; CHECK-NEXT: strex r2, r3, [r0]
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: bne .LBB6_1
+; CHECK-NEXT: @ %bb.2: @ %atomicrmw.end
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: bx lr
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: .LBB7_1: @ %atomicrmw.start
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldrexd r4, r5, [r0]
+; CHECK-NEXT: subs r1, r4, r2
+; CHECK-NEXT: sbcs r7, r5, r3
+; CHECK-NEXT: mov r6, #0
+; CHECK-NEXT: movwhs r6, #1
+; CHECK-NEXT: cmp r6, #0
+; CHECK-NEXT: moveq r7, r6
+; CHECK-NEXT: movne r6, r1
+; CHECK-NEXT: strexd r1, r6, r7, [r0]
+; CHECK-NEXT: cmp r1, #0
+; CHECK-NEXT: bne .LBB7_1
+; CHECK-NEXT: @ %bb.2: @ %atomicrmw.end
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r1, r5
+; CHECK-NEXT: dmb ish
+; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/CodeGen/Hexagon/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/Hexagon/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..23d5d2f8a72e8
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,355 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i8:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = and(#24,asl(r0,#3))
+; CHECK-NEXT: r3 = and(r0,#-4)
+; CHECK-NEXT: r2 = #255
+; CHECK-NEXT: r4 = and(r1,#255)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = asl(r2,r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r5 = sub(#-1,r2)
+; CHECK-NEXT: }
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: {
+; CHECK-NEXT: r6 = memw_locked(r3)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = lsr(r6,r0)
+; CHECK-NEXT: r6 = and(r6,r5)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7 = and(r2,#255)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = cmp.gtu(r4,r7)
+; CHECK-NEXT: if (p0.new) r7 = add(r1,#0)
+; CHECK-NEXT: if (!p0.new) r7 = sub(r2,r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7 = and(r7,#255)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r6 |= asl(r7,r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: memw_locked(r3,p0) = r6
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: if (!p0) jump:nt .LBB0_1
+; CHECK-NEXT: }
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = r2
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i16:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = and(#24,asl(r0,#3))
+; CHECK-NEXT: r3 = and(r0,#-4)
+; CHECK-NEXT: r2 = ##65535
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = asl(r2,r0)
+; CHECK-NEXT: r4 = zxth(r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r5 = sub(#-1,r2)
+; CHECK-NEXT: }
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB1_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: {
+; CHECK-NEXT: r6 = memw_locked(r3)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = lsr(r6,r0)
+; CHECK-NEXT: r6 = and(r6,r5)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7 = zxth(r2)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = cmp.gtu(r4,r7)
+; CHECK-NEXT: if (p0.new) r7 = add(r1,#0)
+; CHECK-NEXT: if (!p0.new) r7 = sub(r2,r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7 = zxth(r7)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r6 |= asl(r7,r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: memw_locked(r3,p0) = r6
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: if (!p0) jump:nt .LBB1_1
+; CHECK-NEXT: }
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = r2
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i32:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB2_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = memw_locked(r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = cmp.gtu(r1,r2)
+; CHECK-NEXT: if (p0.new) r3 = add(r1,#0)
+; CHECK-NEXT: if (!p0.new) r3 = sub(r2,r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: memw_locked(r0,p0) = r3
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: if (!p0) jump:nt .LBB2_1
+; CHECK-NEXT: }
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = r2
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i64:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB3_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: {
+; CHECK-NEXT: r5:4 = memd_locked(r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7:6 = sub(r5:4,r3:2)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = cmp.gtu(r3:2,r5:4)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r8 = mux(p0,r2,r6)
+; CHECK-NEXT: r9 = mux(p0,r3,r7)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: memd_locked(r0,p0) = r9:8
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: if (!p0) jump:nt .LBB3_1
+; CHECK-NEXT: }
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: {
+; CHECK-NEXT: r1:0 = combine(r5,r4)
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = and(#24,asl(r0,#3))
+; CHECK-NEXT: r3 = and(r0,#-4)
+; CHECK-NEXT: r2 = #255
+; CHECK-NEXT: r4 = and(r1,#255)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = asl(r2,r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r5 = sub(#-1,r2)
+; CHECK-NEXT: }
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB4_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: {
+; CHECK-NEXT: r6 = memw_locked(r3)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = lsr(r6,r0)
+; CHECK-NEXT: r6 = and(r6,r5)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7 = and(r2,#255)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = cmp.gtu(r4,r7)
+; CHECK-NEXT: if (p0.new) r7 = #0
+; CHECK-NEXT: if (!p0.new) r7 = sub(r2,r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7 = and(r7,#255)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r6 |= asl(r7,r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: memw_locked(r3,p0) = r6
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: if (!p0) jump:nt .LBB4_1
+; CHECK-NEXT: }
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = r2
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = and(#24,asl(r0,#3))
+; CHECK-NEXT: r3 = and(r0,#-4)
+; CHECK-NEXT: r2 = ##65535
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = asl(r2,r0)
+; CHECK-NEXT: r4 = zxth(r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r5 = sub(#-1,r2)
+; CHECK-NEXT: }
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB5_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: {
+; CHECK-NEXT: r6 = memw_locked(r3)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = lsr(r6,r0)
+; CHECK-NEXT: r6 = and(r6,r5)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7 = zxth(r2)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = cmp.gtu(r4,r7)
+; CHECK-NEXT: if (p0.new) r7 = #0
+; CHECK-NEXT: if (!p0.new) r7 = sub(r2,r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7 = zxth(r7)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r6 |= asl(r7,r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: memw_locked(r3,p0) = r6
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: if (!p0) jump:nt .LBB5_1
+; CHECK-NEXT: }
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = r2
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB6_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = memw_locked(r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = cmp.gtu(r1,r2)
+; CHECK-NEXT: if (p0.new) r3 = #0
+; CHECK-NEXT: if (!p0.new) r3 = sub(r2,r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: memw_locked(r0,p0) = r3
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: if (!p0) jump:nt .LBB6_1
+; CHECK-NEXT: }
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = r2
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: {
+; CHECK-NEXT: r1 = #0
+; CHECK-NEXT: }
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB7_1: // %atomicrmw.start
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: {
+; CHECK-NEXT: r5:4 = memd_locked(r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r7:6 = sub(r5:4,r3:2)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = cmp.gtu(r3:2,r5:4)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r8 = mux(p0,r1,r6)
+; CHECK-NEXT: r9 = mux(p0,r1,r7)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: memd_locked(r0,p0) = r9:8
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: if (!p0) jump:nt .LBB7_1
+; CHECK-NEXT: }
+; CHECK-NEXT: // %bb.2: // %atomicrmw.end
+; CHECK-NEXT: {
+; CHECK-NEXT: r1:0 = combine(r5,r4)
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/CodeGen/LoongArch/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/LoongArch/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..154e057c2dc57
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,362 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck --check-prefix=LA64 %s
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; LA64-LABEL: atomicrmw_cond_sub_i8:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a4, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a2, $a4, 24
+; LA64-NEXT: ori $a5, $zero, 255
+; LA64-NEXT: ld.w $a3, $a0, 0
+; LA64-NEXT: sll.w $a4, $a5, $a4
+; LA64-NEXT: nor $a4, $a4, $zero
+; LA64-NEXT: andi $a5, $a1, 255
+; LA64-NEXT: .p2align 4, , 16
+; LA64-NEXT: .LBB0_1: # %atomicrmw.start
+; LA64-NEXT: # =>This Loop Header: Depth=1
+; LA64-NEXT: # Child Loop BB0_3 Depth 2
+; LA64-NEXT: srl.w $a6, $a3, $a2
+; LA64-NEXT: addi.w $a7, $a3, 0
+; LA64-NEXT: andi $t0, $a6, 255
+; LA64-NEXT: sltu $t0, $t0, $a5
+; LA64-NEXT: xori $t0, $t0, 1
+; LA64-NEXT: sub.d $a6, $a6, $a1
+; LA64-NEXT: maskeqz $a6, $a6, $t0
+; LA64-NEXT: masknez $t0, $a1, $t0
+; LA64-NEXT: or $a6, $a6, $t0
+; LA64-NEXT: andi $a6, $a6, 255
+; LA64-NEXT: sll.w $a6, $a6, $a2
+; LA64-NEXT: and $a3, $a3, $a4
+; LA64-NEXT: or $a6, $a3, $a6
+; LA64-NEXT: .LBB0_3: # %atomicrmw.start
+; LA64-NEXT: # Parent Loop BB0_1 Depth=1
+; LA64-NEXT: # => This Inner Loop Header: Depth=2
+; LA64-NEXT: ll.w $a3, $a0, 0
+; LA64-NEXT: bne $a3, $a7, .LBB0_5
+; LA64-NEXT: # %bb.4: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB0_3 Depth=2
+; LA64-NEXT: move $t0, $a6
+; LA64-NEXT: sc.w $t0, $a0, 0
+; LA64-NEXT: beqz $t0, .LBB0_3
+; LA64-NEXT: b .LBB0_6
+; LA64-NEXT: .LBB0_5: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB0_1 Depth=1
+; LA64-NEXT: dbar 20
+; LA64-NEXT: .LBB0_6: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB0_1 Depth=1
+; LA64-NEXT: bne $a3, $a7, .LBB0_1
+; LA64-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; LA64-LABEL: atomicrmw_cond_sub_i16:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a4, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a2, $a4, 24
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a5, $a3, 4095
+; LA64-NEXT: ld.w $a3, $a0, 0
+; LA64-NEXT: sll.w $a4, $a5, $a4
+; LA64-NEXT: nor $a4, $a4, $zero
+; LA64-NEXT: bstrpick.d $a5, $a1, 15, 0
+; LA64-NEXT: .p2align 4, , 16
+; LA64-NEXT: .LBB1_1: # %atomicrmw.start
+; LA64-NEXT: # =>This Loop Header: Depth=1
+; LA64-NEXT: # Child Loop BB1_3 Depth 2
+; LA64-NEXT: srl.w $a6, $a3, $a2
+; LA64-NEXT: addi.w $a7, $a3, 0
+; LA64-NEXT: bstrpick.d $t0, $a6, 15, 0
+; LA64-NEXT: sltu $t0, $t0, $a5
+; LA64-NEXT: xori $t0, $t0, 1
+; LA64-NEXT: sub.d $a6, $a6, $a1
+; LA64-NEXT: maskeqz $a6, $a6, $t0
+; LA64-NEXT: masknez $t0, $a1, $t0
+; LA64-NEXT: or $a6, $a6, $t0
+; LA64-NEXT: bstrpick.d $a6, $a6, 15, 0
+; LA64-NEXT: sll.w $a6, $a6, $a2
+; LA64-NEXT: and $a3, $a3, $a4
+; LA64-NEXT: or $a6, $a3, $a6
+; LA64-NEXT: .LBB1_3: # %atomicrmw.start
+; LA64-NEXT: # Parent Loop BB1_1 Depth=1
+; LA64-NEXT: # => This Inner Loop Header: Depth=2
+; LA64-NEXT: ll.w $a3, $a0, 0
+; LA64-NEXT: bne $a3, $a7, .LBB1_5
+; LA64-NEXT: # %bb.4: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB1_3 Depth=2
+; LA64-NEXT: move $t0, $a6
+; LA64-NEXT: sc.w $t0, $a0, 0
+; LA64-NEXT: beqz $t0, .LBB1_3
+; LA64-NEXT: b .LBB1_6
+; LA64-NEXT: .LBB1_5: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB1_1 Depth=1
+; LA64-NEXT: dbar 20
+; LA64-NEXT: .LBB1_6: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB1_1 Depth=1
+; LA64-NEXT: bne $a3, $a7, .LBB1_1
+; LA64-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; LA64-LABEL: atomicrmw_cond_sub_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a2, $a0, 0
+; LA64-NEXT: addi.w $a3, $a1, 0
+; LA64-NEXT: .p2align 4, , 16
+; LA64-NEXT: .LBB2_1: # %atomicrmw.start
+; LA64-NEXT: # =>This Loop Header: Depth=1
+; LA64-NEXT: # Child Loop BB2_3 Depth 2
+; LA64-NEXT: addi.w $a4, $a2, 0
+; LA64-NEXT: sltu $a5, $a4, $a3
+; LA64-NEXT: xori $a5, $a5, 1
+; LA64-NEXT: sub.d $a2, $a2, $a1
+; LA64-NEXT: maskeqz $a2, $a2, $a5
+; LA64-NEXT: masknez $a5, $a1, $a5
+; LA64-NEXT: or $a5, $a2, $a5
+; LA64-NEXT: .LBB2_3: # %atomicrmw.start
+; LA64-NEXT: # Parent Loop BB2_1 Depth=1
+; LA64-NEXT: # => This Inner Loop Header: Depth=2
+; LA64-NEXT: ll.w $a2, $a0, 0
+; LA64-NEXT: bne $a2, $a4, .LBB2_5
+; LA64-NEXT: # %bb.4: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB2_3 Depth=2
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB2_3
+; LA64-NEXT: b .LBB2_6
+; LA64-NEXT: .LBB2_5: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB2_1 Depth=1
+; LA64-NEXT: dbar 20
+; LA64-NEXT: .LBB2_6: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB2_1 Depth=1
+; LA64-NEXT: bne $a2, $a4, .LBB2_1
+; LA64-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-NEXT: move $a0, $a2
+; LA64-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; LA64-LABEL: atomicrmw_cond_sub_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.d $a2, $a0, 0
+; LA64-NEXT: .p2align 4, , 16
+; LA64-NEXT: .LBB3_1: # %atomicrmw.start
+; LA64-NEXT: # =>This Loop Header: Depth=1
+; LA64-NEXT: # Child Loop BB3_3 Depth 2
+; LA64-NEXT: move $a3, $a2
+; LA64-NEXT: sltu $a2, $a2, $a1
+; LA64-NEXT: xori $a2, $a2, 1
+; LA64-NEXT: sub.d $a4, $a3, $a1
+; LA64-NEXT: maskeqz $a4, $a4, $a2
+; LA64-NEXT: masknez $a2, $a1, $a2
+; LA64-NEXT: or $a4, $a4, $a2
+; LA64-NEXT: .LBB3_3: # %atomicrmw.start
+; LA64-NEXT: # Parent Loop BB3_1 Depth=1
+; LA64-NEXT: # => This Inner Loop Header: Depth=2
+; LA64-NEXT: ll.d $a2, $a0, 0
+; LA64-NEXT: bne $a2, $a3, .LBB3_5
+; LA64-NEXT: # %bb.4: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB3_3 Depth=2
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: sc.d $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB3_3
+; LA64-NEXT: b .LBB3_6
+; LA64-NEXT: .LBB3_5: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB3_1 Depth=1
+; LA64-NEXT: dbar 20
+; LA64-NEXT: .LBB3_6: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB3_1 Depth=1
+; LA64-NEXT: bne $a2, $a3, .LBB3_1
+; LA64-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-NEXT: move $a0, $a2
+; LA64-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; LA64-LABEL: atomicrmw_sub_clamp_i8:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a4, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a2, $a4, 24
+; LA64-NEXT: ori $a5, $zero, 255
+; LA64-NEXT: ld.w $a3, $a0, 0
+; LA64-NEXT: sll.w $a4, $a5, $a4
+; LA64-NEXT: nor $a4, $a4, $zero
+; LA64-NEXT: andi $a5, $a1, 255
+; LA64-NEXT: .p2align 4, , 16
+; LA64-NEXT: .LBB4_1: # %atomicrmw.start
+; LA64-NEXT: # =>This Loop Header: Depth=1
+; LA64-NEXT: # Child Loop BB4_3 Depth 2
+; LA64-NEXT: srl.w $a6, $a3, $a2
+; LA64-NEXT: addi.w $a7, $a3, 0
+; LA64-NEXT: andi $t0, $a6, 255
+; LA64-NEXT: sltu $t0, $t0, $a5
+; LA64-NEXT: xori $t0, $t0, 1
+; LA64-NEXT: sub.d $a6, $a6, $a1
+; LA64-NEXT: maskeqz $a6, $a6, $t0
+; LA64-NEXT: andi $a6, $a6, 255
+; LA64-NEXT: sll.w $a6, $a6, $a2
+; LA64-NEXT: and $a3, $a3, $a4
+; LA64-NEXT: or $a6, $a3, $a6
+; LA64-NEXT: .LBB4_3: # %atomicrmw.start
+; LA64-NEXT: # Parent Loop BB4_1 Depth=1
+; LA64-NEXT: # => This Inner Loop Header: Depth=2
+; LA64-NEXT: ll.w $a3, $a0, 0
+; LA64-NEXT: bne $a3, $a7, .LBB4_5
+; LA64-NEXT: # %bb.4: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB4_3 Depth=2
+; LA64-NEXT: move $t0, $a6
+; LA64-NEXT: sc.w $t0, $a0, 0
+; LA64-NEXT: beqz $t0, .LBB4_3
+; LA64-NEXT: b .LBB4_6
+; LA64-NEXT: .LBB4_5: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB4_1 Depth=1
+; LA64-NEXT: dbar 20
+; LA64-NEXT: .LBB4_6: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB4_1 Depth=1
+; LA64-NEXT: bne $a3, $a7, .LBB4_1
+; LA64-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; LA64-LABEL: atomicrmw_sub_clamp_i16:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a4, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a2, $a4, 24
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a5, $a3, 4095
+; LA64-NEXT: ld.w $a3, $a0, 0
+; LA64-NEXT: sll.w $a4, $a5, $a4
+; LA64-NEXT: nor $a4, $a4, $zero
+; LA64-NEXT: bstrpick.d $a5, $a1, 15, 0
+; LA64-NEXT: .p2align 4, , 16
+; LA64-NEXT: .LBB5_1: # %atomicrmw.start
+; LA64-NEXT: # =>This Loop Header: Depth=1
+; LA64-NEXT: # Child Loop BB5_3 Depth 2
+; LA64-NEXT: srl.w $a6, $a3, $a2
+; LA64-NEXT: addi.w $a7, $a3, 0
+; LA64-NEXT: bstrpick.d $t0, $a6, 15, 0
+; LA64-NEXT: sltu $t0, $t0, $a5
+; LA64-NEXT: xori $t0, $t0, 1
+; LA64-NEXT: sub.d $a6, $a6, $a1
+; LA64-NEXT: maskeqz $a6, $a6, $t0
+; LA64-NEXT: bstrpick.d $a6, $a6, 15, 0
+; LA64-NEXT: sll.w $a6, $a6, $a2
+; LA64-NEXT: and $a3, $a3, $a4
+; LA64-NEXT: or $a6, $a3, $a6
+; LA64-NEXT: .LBB5_3: # %atomicrmw.start
+; LA64-NEXT: # Parent Loop BB5_1 Depth=1
+; LA64-NEXT: # => This Inner Loop Header: Depth=2
+; LA64-NEXT: ll.w $a3, $a0, 0
+; LA64-NEXT: bne $a3, $a7, .LBB5_5
+; LA64-NEXT: # %bb.4: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB5_3 Depth=2
+; LA64-NEXT: move $t0, $a6
+; LA64-NEXT: sc.w $t0, $a0, 0
+; LA64-NEXT: beqz $t0, .LBB5_3
+; LA64-NEXT: b .LBB5_6
+; LA64-NEXT: .LBB5_5: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB5_1 Depth=1
+; LA64-NEXT: dbar 20
+; LA64-NEXT: .LBB5_6: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB5_1 Depth=1
+; LA64-NEXT: bne $a3, $a7, .LBB5_1
+; LA64-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; LA64-LABEL: atomicrmw_sub_clamp_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a2, $a0, 0
+; LA64-NEXT: addi.w $a3, $a1, 0
+; LA64-NEXT: .p2align 4, , 16
+; LA64-NEXT: .LBB6_1: # %atomicrmw.start
+; LA64-NEXT: # =>This Loop Header: Depth=1
+; LA64-NEXT: # Child Loop BB6_3 Depth 2
+; LA64-NEXT: addi.w $a4, $a2, 0
+; LA64-NEXT: sltu $a5, $a4, $a3
+; LA64-NEXT: xori $a5, $a5, 1
+; LA64-NEXT: sub.d $a2, $a2, $a1
+; LA64-NEXT: maskeqz $a5, $a2, $a5
+; LA64-NEXT: .LBB6_3: # %atomicrmw.start
+; LA64-NEXT: # Parent Loop BB6_1 Depth=1
+; LA64-NEXT: # => This Inner Loop Header: Depth=2
+; LA64-NEXT: ll.w $a2, $a0, 0
+; LA64-NEXT: bne $a2, $a4, .LBB6_5
+; LA64-NEXT: # %bb.4: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB6_3 Depth=2
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB6_3
+; LA64-NEXT: b .LBB6_6
+; LA64-NEXT: .LBB6_5: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB6_1 Depth=1
+; LA64-NEXT: dbar 20
+; LA64-NEXT: .LBB6_6: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB6_1 Depth=1
+; LA64-NEXT: bne $a2, $a4, .LBB6_1
+; LA64-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-NEXT: move $a0, $a2
+; LA64-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; LA64-LABEL: atomicrmw_sub_clamp_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.d $a2, $a0, 0
+; LA64-NEXT: .p2align 4, , 16
+; LA64-NEXT: .LBB7_1: # %atomicrmw.start
+; LA64-NEXT: # =>This Loop Header: Depth=1
+; LA64-NEXT: # Child Loop BB7_3 Depth 2
+; LA64-NEXT: move $a3, $a2
+; LA64-NEXT: sltu $a2, $a2, $a1
+; LA64-NEXT: xori $a2, $a2, 1
+; LA64-NEXT: sub.d $a4, $a3, $a1
+; LA64-NEXT: maskeqz $a4, $a4, $a2
+; LA64-NEXT: .LBB7_3: # %atomicrmw.start
+; LA64-NEXT: # Parent Loop BB7_1 Depth=1
+; LA64-NEXT: # => This Inner Loop Header: Depth=2
+; LA64-NEXT: ll.d $a2, $a0, 0
+; LA64-NEXT: bne $a2, $a3, .LBB7_5
+; LA64-NEXT: # %bb.4: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB7_3 Depth=2
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: sc.d $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB7_3
+; LA64-NEXT: b .LBB7_6
+; LA64-NEXT: .LBB7_5: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB7_1 Depth=1
+; LA64-NEXT: dbar 20
+; LA64-NEXT: .LBB7_6: # %atomicrmw.start
+; LA64-NEXT: # in Loop: Header=BB7_1 Depth=1
+; LA64-NEXT: bne $a2, $a3, .LBB7_1
+; LA64-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-NEXT: move $a0, $a2
+; LA64-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..4ed8ff788597b
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,396 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sync
+; CHECK-NEXT: mr 5, 3
+; CHECK-NEXT: rlwinm 7, 5, 3, 27, 28
+; CHECK-NEXT: lbz 3, 0(3)
+; CHECK-NEXT: xori 7, 7, 24
+; CHECK-NEXT: li 8, 255
+; CHECK-NEXT: clrlwi 6, 4, 24
+; CHECK-NEXT: rldicr 5, 5, 0, 61
+; CHECK-NEXT: slw 8, 8, 7
+; CHECK-NEXT: b .LBB0_2
+; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: srw 3, 11, 7
+; CHECK-NEXT: cmplw 3, 9
+; CHECK-NEXT: beq 0, .LBB0_8
+; CHECK-NEXT: .LBB0_2: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB0_6 Depth 2
+; CHECK-NEXT: clrlwi 9, 3, 24
+; CHECK-NEXT: cmplw 9, 6
+; CHECK-NEXT: bge 0, .LBB0_4
+; CHECK-NEXT: # %bb.3: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: b .LBB0_5
+; CHECK-NEXT: .LBB0_4:
+; CHECK-NEXT: sub 3, 3, 4
+; CHECK-NEXT: .LBB0_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: slw 3, 3, 7
+; CHECK-NEXT: slw 10, 9, 7
+; CHECK-NEXT: and 3, 3, 8
+; CHECK-NEXT: and 10, 10, 8
+; CHECK-NEXT: .LBB0_6: # %atomicrmw.start
+; CHECK-NEXT: # Parent Loop BB0_2 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: lwarx 12, 0, 5
+; CHECK-NEXT: and 11, 12, 8
+; CHECK-NEXT: cmpw 11, 10
+; CHECK-NEXT: bne 0, .LBB0_1
+; CHECK-NEXT: # %bb.7: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: andc 12, 12, 8
+; CHECK-NEXT: or 12, 12, 3
+; CHECK-NEXT: stwcx. 12, 0, 5
+; CHECK-NEXT: bne 0, .LBB0_6
+; CHECK-NEXT: b .LBB0_1
+; CHECK-NEXT: .LBB0_8: # %atomicrmw.end
+; CHECK-NEXT: lwsync
+; CHECK-NEXT: blr
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sync
+; CHECK-NEXT: mr 5, 3
+; CHECK-NEXT: li 8, 0
+; CHECK-NEXT: lhz 3, 0(3)
+; CHECK-NEXT: rlwinm 7, 5, 3, 27, 27
+; CHECK-NEXT: xori 7, 7, 16
+; CHECK-NEXT: ori 8, 8, 65535
+; CHECK-NEXT: clrlwi 6, 4, 16
+; CHECK-NEXT: rldicr 5, 5, 0, 61
+; CHECK-NEXT: slw 8, 8, 7
+; CHECK-NEXT: b .LBB1_2
+; CHECK-NEXT: .LBB1_1: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: srw 3, 11, 7
+; CHECK-NEXT: cmplw 3, 9
+; CHECK-NEXT: beq 0, .LBB1_8
+; CHECK-NEXT: .LBB1_2: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB1_6 Depth 2
+; CHECK-NEXT: clrlwi 9, 3, 16
+; CHECK-NEXT: cmplw 9, 6
+; CHECK-NEXT: bge 0, .LBB1_4
+; CHECK-NEXT: # %bb.3: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 3, 4
+; CHECK-NEXT: b .LBB1_5
+; CHECK-NEXT: .LBB1_4:
+; CHECK-NEXT: sub 3, 3, 4
+; CHECK-NEXT: .LBB1_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: slw 3, 3, 7
+; CHECK-NEXT: slw 10, 9, 7
+; CHECK-NEXT: and 3, 3, 8
+; CHECK-NEXT: and 10, 10, 8
+; CHECK-NEXT: .LBB1_6: # %atomicrmw.start
+; CHECK-NEXT: # Parent Loop BB1_2 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: lwarx 12, 0, 5
+; CHECK-NEXT: and 11, 12, 8
+; CHECK-NEXT: cmpw 11, 10
+; CHECK-NEXT: bne 0, .LBB1_1
+; CHECK-NEXT: # %bb.7: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: andc 12, 12, 8
+; CHECK-NEXT: or 12, 12, 3
+; CHECK-NEXT: stwcx. 12, 0, 5
+; CHECK-NEXT: bne 0, .LBB1_6
+; CHECK-NEXT: b .LBB1_1
+; CHECK-NEXT: .LBB1_8: # %atomicrmw.end
+; CHECK-NEXT: lwsync
+; CHECK-NEXT: blr
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sync
+; CHECK-NEXT: lwz 6, 0(3)
+; CHECK-NEXT: b .LBB2_2
+; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: beq 0, .LBB2_7
+; CHECK-NEXT: .LBB2_2: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB2_5 Depth 2
+; CHECK-NEXT: cmplw 6, 4
+; CHECK-NEXT: bge 0, .LBB2_4
+; CHECK-NEXT: # %bb.3: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: b .LBB2_5
+; CHECK-NEXT: .LBB2_4:
+; CHECK-NEXT: sub 7, 6, 4
+; CHECK-NEXT: .LBB2_5: # %atomicrmw.start
+; CHECK-NEXT: # Parent Loop BB2_2 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: lwarx 5, 0, 3
+; CHECK-NEXT: cmpw 5, 6
+; CHECK-NEXT: bne 0, .LBB2_1
+; CHECK-NEXT: # %bb.6: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: stwcx. 7, 0, 3
+; CHECK-NEXT: bne 0, .LBB2_5
+; CHECK-NEXT: b .LBB2_1
+; CHECK-NEXT: .LBB2_7: # %atomicrmw.end
+; CHECK-NEXT: mr 3, 5
+; CHECK-NEXT: lwsync
+; CHECK-NEXT: blr
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sync
+; CHECK-NEXT: ld 6, 0(3)
+; CHECK-NEXT: b .LBB3_2
+; CHECK-NEXT: .LBB3_1: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: cmpld 5, 6
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: beq 0, .LBB3_7
+; CHECK-NEXT: .LBB3_2: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB3_5 Depth 2
+; CHECK-NEXT: cmpld 6, 4
+; CHECK-NEXT: bge 0, .LBB3_4
+; CHECK-NEXT: # %bb.3: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: b .LBB3_5
+; CHECK-NEXT: .LBB3_4:
+; CHECK-NEXT: sub 7, 6, 4
+; CHECK-NEXT: .LBB3_5: # %atomicrmw.start
+; CHECK-NEXT: # Parent Loop BB3_2 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: ldarx 5, 0, 3
+; CHECK-NEXT: cmpd 5, 6
+; CHECK-NEXT: bne 0, .LBB3_1
+; CHECK-NEXT: # %bb.6: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: stdcx. 7, 0, 3
+; CHECK-NEXT: bne 0, .LBB3_5
+; CHECK-NEXT: b .LBB3_1
+; CHECK-NEXT: .LBB3_7: # %atomicrmw.end
+; CHECK-NEXT: mr 3, 5
+; CHECK-NEXT: lwsync
+; CHECK-NEXT: blr
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sync
+; CHECK-NEXT: mr 5, 3
+; CHECK-NEXT: rlwinm 7, 5, 3, 27, 28
+; CHECK-NEXT: lbz 3, 0(3)
+; CHECK-NEXT: xori 7, 7, 24
+; CHECK-NEXT: li 8, 255
+; CHECK-NEXT: clrlwi 6, 4, 24
+; CHECK-NEXT: rldicr 5, 5, 0, 61
+; CHECK-NEXT: slw 8, 8, 7
+; CHECK-NEXT: b .LBB4_2
+; CHECK-NEXT: .LBB4_1: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: srw 3, 11, 7
+; CHECK-NEXT: cmplw 3, 9
+; CHECK-NEXT: beq 0, .LBB4_8
+; CHECK-NEXT: .LBB4_2: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB4_6 Depth 2
+; CHECK-NEXT: clrlwi 9, 3, 24
+; CHECK-NEXT: cmplw 9, 6
+; CHECK-NEXT: bge 0, .LBB4_4
+; CHECK-NEXT: # %bb.3: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: b .LBB4_5
+; CHECK-NEXT: .LBB4_4:
+; CHECK-NEXT: sub 3, 3, 4
+; CHECK-NEXT: .LBB4_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: slw 3, 3, 7
+; CHECK-NEXT: slw 10, 9, 7
+; CHECK-NEXT: and 3, 3, 8
+; CHECK-NEXT: and 10, 10, 8
+; CHECK-NEXT: .LBB4_6: # %atomicrmw.start
+; CHECK-NEXT: # Parent Loop BB4_2 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: lwarx 12, 0, 5
+; CHECK-NEXT: and 11, 12, 8
+; CHECK-NEXT: cmpw 11, 10
+; CHECK-NEXT: bne 0, .LBB4_1
+; CHECK-NEXT: # %bb.7: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: andc 12, 12, 8
+; CHECK-NEXT: or 12, 12, 3
+; CHECK-NEXT: stwcx. 12, 0, 5
+; CHECK-NEXT: bne 0, .LBB4_6
+; CHECK-NEXT: b .LBB4_1
+; CHECK-NEXT: .LBB4_8: # %atomicrmw.end
+; CHECK-NEXT: lwsync
+; CHECK-NEXT: blr
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sync
+; CHECK-NEXT: mr 5, 3
+; CHECK-NEXT: li 8, 0
+; CHECK-NEXT: lhz 3, 0(3)
+; CHECK-NEXT: rlwinm 7, 5, 3, 27, 27
+; CHECK-NEXT: xori 7, 7, 16
+; CHECK-NEXT: ori 8, 8, 65535
+; CHECK-NEXT: clrlwi 6, 4, 16
+; CHECK-NEXT: rldicr 5, 5, 0, 61
+; CHECK-NEXT: slw 8, 8, 7
+; CHECK-NEXT: b .LBB5_2
+; CHECK-NEXT: .LBB5_1: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: srw 3, 11, 7
+; CHECK-NEXT: cmplw 3, 9
+; CHECK-NEXT: beq 0, .LBB5_8
+; CHECK-NEXT: .LBB5_2: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB5_6 Depth 2
+; CHECK-NEXT: clrlwi 9, 3, 16
+; CHECK-NEXT: cmplw 9, 6
+; CHECK-NEXT: bge 0, .LBB5_4
+; CHECK-NEXT: # %bb.3: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: b .LBB5_5
+; CHECK-NEXT: .LBB5_4:
+; CHECK-NEXT: sub 3, 3, 4
+; CHECK-NEXT: .LBB5_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: slw 3, 3, 7
+; CHECK-NEXT: slw 10, 9, 7
+; CHECK-NEXT: and 3, 3, 8
+; CHECK-NEXT: and 10, 10, 8
+; CHECK-NEXT: .LBB5_6: # %atomicrmw.start
+; CHECK-NEXT: # Parent Loop BB5_2 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: lwarx 12, 0, 5
+; CHECK-NEXT: and 11, 12, 8
+; CHECK-NEXT: cmpw 11, 10
+; CHECK-NEXT: bne 0, .LBB5_1
+; CHECK-NEXT: # %bb.7: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: andc 12, 12, 8
+; CHECK-NEXT: or 12, 12, 3
+; CHECK-NEXT: stwcx. 12, 0, 5
+; CHECK-NEXT: bne 0, .LBB5_6
+; CHECK-NEXT: b .LBB5_1
+; CHECK-NEXT: .LBB5_8: # %atomicrmw.end
+; CHECK-NEXT: lwsync
+; CHECK-NEXT: blr
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sync
+; CHECK-NEXT: lwz 6, 0(3)
+; CHECK-NEXT: b .LBB6_2
+; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: beq 0, .LBB6_7
+; CHECK-NEXT: .LBB6_2: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB6_5 Depth 2
+; CHECK-NEXT: cmplw 6, 4
+; CHECK-NEXT: bge 0, .LBB6_4
+; CHECK-NEXT: # %bb.3: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: li 7, 0
+; CHECK-NEXT: b .LBB6_5
+; CHECK-NEXT: .LBB6_4:
+; CHECK-NEXT: sub 7, 6, 4
+; CHECK-NEXT: .LBB6_5: # %atomicrmw.start
+; CHECK-NEXT: # Parent Loop BB6_2 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: lwarx 5, 0, 3
+; CHECK-NEXT: cmpw 5, 6
+; CHECK-NEXT: bne 0, .LBB6_1
+; CHECK-NEXT: # %bb.6: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: stwcx. 7, 0, 3
+; CHECK-NEXT: bne 0, .LBB6_5
+; CHECK-NEXT: b .LBB6_1
+; CHECK-NEXT: .LBB6_7: # %atomicrmw.end
+; CHECK-NEXT: mr 3, 5
+; CHECK-NEXT: lwsync
+; CHECK-NEXT: blr
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sync
+; CHECK-NEXT: ld 6, 0(3)
+; CHECK-NEXT: b .LBB7_2
+; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: cmpld 5, 6
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: beq 0, .LBB7_7
+; CHECK-NEXT: .LBB7_2: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB7_5 Depth 2
+; CHECK-NEXT: cmpld 6, 4
+; CHECK-NEXT: bge 0, .LBB7_4
+; CHECK-NEXT: # %bb.3: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: li 7, 0
+; CHECK-NEXT: b .LBB7_5
+; CHECK-NEXT: .LBB7_4:
+; CHECK-NEXT: sub 7, 6, 4
+; CHECK-NEXT: .LBB7_5: # %atomicrmw.start
+; CHECK-NEXT: # Parent Loop BB7_2 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: ldarx 5, 0, 3
+; CHECK-NEXT: cmpd 5, 6
+; CHECK-NEXT: bne 0, .LBB7_1
+; CHECK-NEXT: # %bb.6: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: stdcx. 7, 0, 3
+; CHECK-NEXT: bne 0, .LBB7_5
+; CHECK-NEXT: b .LBB7_1
+; CHECK-NEXT: .LBB7_7: # %atomicrmw.end
+; CHECK-NEXT: mr 3, 5
+; CHECK-NEXT: lwsync
+; CHECK-NEXT: blr
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..45f8120e0d10b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,1412 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32IA %s
+; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32IA %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64IA %s
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64IA %s
+
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; RV32I-LABEL: atomicrmw_cond_sub_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: .cfi_def_cfa_offset 32
+; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: lbu a3, 0(a0)
+; RV32I-NEXT: mv s1, a1
+; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: j .LBB0_3
+; RV32I-NEXT: .LBB0_1: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV32I-NEXT: mv a2, s1
+; RV32I-NEXT: .LBB0_2: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV32I-NEXT: sb a3, 15(sp)
+; RV32I-NEXT: addi a1, sp, 15
+; RV32I-NEXT: li a3, 5
+; RV32I-NEXT: li a4, 5
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: call __atomic_compare_exchange_1
+; RV32I-NEXT: lbu a3, 15(sp)
+; RV32I-NEXT: bnez a0, .LBB0_5
+; RV32I-NEXT: .LBB0_3: # %atomicrmw.start
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: bltu a0, s2, .LBB0_1
+; RV32I-NEXT: # %bb.4: # in Loop: Header=BB0_3 Depth=1
+; RV32I-NEXT: sub a2, a3, s1
+; RV32I-NEXT: j .LBB0_2
+; RV32I-NEXT: .LBB0_5: # %atomicrmw.end
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_cond_sub_i8:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: andi a2, a0, -4
+; RV32IA-NEXT: slli a3, a0, 3
+; RV32IA-NEXT: andi a0, a3, 24
+; RV32IA-NEXT: li a4, 255
+; RV32IA-NEXT: lw a6, 0(a2)
+; RV32IA-NEXT: sll a3, a4, a3
+; RV32IA-NEXT: not a3, a3
+; RV32IA-NEXT: andi a4, a1, 255
+; RV32IA-NEXT: j .LBB0_3
+; RV32IA-NEXT: .LBB0_1: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV32IA-NEXT: mv a6, a1
+; RV32IA-NEXT: .LBB0_2: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV32IA-NEXT: andi a6, a6, 255
+; RV32IA-NEXT: sll a6, a6, a0
+; RV32IA-NEXT: and a7, a5, a3
+; RV32IA-NEXT: or a7, a7, a6
+; RV32IA-NEXT: .LBB0_6: # %atomicrmw.start
+; RV32IA-NEXT: # Parent Loop BB0_3 Depth=1
+; RV32IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV32IA-NEXT: lr.w.aqrl a6, (a2)
+; RV32IA-NEXT: bne a6, a5, .LBB0_8
+; RV32IA-NEXT: # %bb.7: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB0_6 Depth=2
+; RV32IA-NEXT: sc.w.rl t0, a7, (a2)
+; RV32IA-NEXT: bnez t0, .LBB0_6
+; RV32IA-NEXT: .LBB0_8: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV32IA-NEXT: beq a6, a5, .LBB0_5
+; RV32IA-NEXT: .LBB0_3: # %atomicrmw.start
+; RV32IA-NEXT: # =>This Loop Header: Depth=1
+; RV32IA-NEXT: # Child Loop BB0_6 Depth 2
+; RV32IA-NEXT: mv a5, a6
+; RV32IA-NEXT: srl a6, a6, a0
+; RV32IA-NEXT: andi a7, a6, 255
+; RV32IA-NEXT: bltu a7, a4, .LBB0_1
+; RV32IA-NEXT: # %bb.4: # in Loop: Header=BB0_3 Depth=1
+; RV32IA-NEXT: sub a6, a6, a1
+; RV32IA-NEXT: j .LBB0_2
+; RV32IA-NEXT: .LBB0_5: # %atomicrmw.end
+; RV32IA-NEXT: srl a0, a6, a0
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomicrmw_cond_sub_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -48
+; RV64I-NEXT: .cfi_def_cfa_offset 48
+; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: mv s0, a0
+; RV64I-NEXT: lbu a3, 0(a0)
+; RV64I-NEXT: mv s1, a1
+; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: j .LBB0_3
+; RV64I-NEXT: .LBB0_1: # %atomicrmw.start
+; RV64I-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV64I-NEXT: mv a2, s1
+; RV64I-NEXT: .LBB0_2: # %atomicrmw.start
+; RV64I-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV64I-NEXT: sb a3, 15(sp)
+; RV64I-NEXT: addi a1, sp, 15
+; RV64I-NEXT: li a3, 5
+; RV64I-NEXT: li a4, 5
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: call __atomic_compare_exchange_1
+; RV64I-NEXT: lbu a3, 15(sp)
+; RV64I-NEXT: bnez a0, .LBB0_5
+; RV64I-NEXT: .LBB0_3: # %atomicrmw.start
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: bltu a0, s2, .LBB0_1
+; RV64I-NEXT: # %bb.4: # in Loop: Header=BB0_3 Depth=1
+; RV64I-NEXT: sub a2, a3, s1
+; RV64I-NEXT: j .LBB0_2
+; RV64I-NEXT: .LBB0_5: # %atomicrmw.end
+; RV64I-NEXT: mv a0, a3
+; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 48
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_cond_sub_i8:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, -4
+; RV64IA-NEXT: slli a4, a0, 3
+; RV64IA-NEXT: andi a0, a4, 24
+; RV64IA-NEXT: li a5, 255
+; RV64IA-NEXT: lw a3, 0(a2)
+; RV64IA-NEXT: sllw a4, a5, a4
+; RV64IA-NEXT: not a4, a4
+; RV64IA-NEXT: andi a5, a1, 255
+; RV64IA-NEXT: j .LBB0_3
+; RV64IA-NEXT: .LBB0_1: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV64IA-NEXT: mv a6, a1
+; RV64IA-NEXT: .LBB0_2: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV64IA-NEXT: sext.w a7, a3
+; RV64IA-NEXT: andi a6, a6, 255
+; RV64IA-NEXT: sllw a6, a6, a0
+; RV64IA-NEXT: and a3, a3, a4
+; RV64IA-NEXT: or a6, a3, a6
+; RV64IA-NEXT: .LBB0_6: # %atomicrmw.start
+; RV64IA-NEXT: # Parent Loop BB0_3 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.w.aqrl a3, (a2)
+; RV64IA-NEXT: bne a3, a7, .LBB0_8
+; RV64IA-NEXT: # %bb.7: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB0_6 Depth=2
+; RV64IA-NEXT: sc.w.rl t0, a6, (a2)
+; RV64IA-NEXT: bnez t0, .LBB0_6
+; RV64IA-NEXT: .LBB0_8: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB0_3 Depth=1
+; RV64IA-NEXT: beq a3, a7, .LBB0_5
+; RV64IA-NEXT: .LBB0_3: # %atomicrmw.start
+; RV64IA-NEXT: # =>This Loop Header: Depth=1
+; RV64IA-NEXT: # Child Loop BB0_6 Depth 2
+; RV64IA-NEXT: srlw a6, a3, a0
+; RV64IA-NEXT: andi a7, a6, 255
+; RV64IA-NEXT: bltu a7, a5, .LBB0_1
+; RV64IA-NEXT: # %bb.4: # in Loop: Header=BB0_3 Depth=1
+; RV64IA-NEXT: sub a6, a6, a1
+; RV64IA-NEXT: j .LBB0_2
+; RV64IA-NEXT: .LBB0_5: # %atomicrmw.end
+; RV64IA-NEXT: srlw a0, a3, a0
+; RV64IA-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; RV32I-LABEL: atomicrmw_cond_sub_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: .cfi_def_cfa_offset 32
+; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: .cfi_offset s3, -20
+; RV32I-NEXT: mv s0, a1
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: lhu a1, 0(a0)
+; RV32I-NEXT: lui s2, 16
+; RV32I-NEXT: addi s2, s2, -1
+; RV32I-NEXT: and s3, s0, s2
+; RV32I-NEXT: j .LBB1_3
+; RV32I-NEXT: .LBB1_1: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV32I-NEXT: mv a2, s0
+; RV32I-NEXT: .LBB1_2: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV32I-NEXT: sh a1, 10(sp)
+; RV32I-NEXT: addi a1, sp, 10
+; RV32I-NEXT: li a3, 5
+; RV32I-NEXT: li a4, 5
+; RV32I-NEXT: mv a0, s1
+; RV32I-NEXT: call __atomic_compare_exchange_2
+; RV32I-NEXT: lh a1, 10(sp)
+; RV32I-NEXT: bnez a0, .LBB1_5
+; RV32I-NEXT: .LBB1_3: # %atomicrmw.start
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: and a0, a1, s2
+; RV32I-NEXT: bltu a0, s3, .LBB1_1
+; RV32I-NEXT: # %bb.4: # in Loop: Header=BB1_3 Depth=1
+; RV32I-NEXT: sub a2, a1, s0
+; RV32I-NEXT: j .LBB1_2
+; RV32I-NEXT: .LBB1_5: # %atomicrmw.end
+; RV32I-NEXT: mv a0, a1
+; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_cond_sub_i16:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: andi a2, a0, -4
+; RV32IA-NEXT: slli a4, a0, 3
+; RV32IA-NEXT: andi a0, a4, 24
+; RV32IA-NEXT: lui a3, 16
+; RV32IA-NEXT: addi a3, a3, -1
+; RV32IA-NEXT: lw a7, 0(a2)
+; RV32IA-NEXT: sll a4, a3, a4
+; RV32IA-NEXT: not a4, a4
+; RV32IA-NEXT: and a5, a1, a3
+; RV32IA-NEXT: j .LBB1_3
+; RV32IA-NEXT: .LBB1_1: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV32IA-NEXT: mv a7, a1
+; RV32IA-NEXT: .LBB1_2: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV32IA-NEXT: and a7, a7, a3
+; RV32IA-NEXT: sll a7, a7, a0
+; RV32IA-NEXT: and t0, a6, a4
+; RV32IA-NEXT: or t0, t0, a7
+; RV32IA-NEXT: .LBB1_6: # %atomicrmw.start
+; RV32IA-NEXT: # Parent Loop BB1_3 Depth=1
+; RV32IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV32IA-NEXT: lr.w.aqrl a7, (a2)
+; RV32IA-NEXT: bne a7, a6, .LBB1_8
+; RV32IA-NEXT: # %bb.7: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB1_6 Depth=2
+; RV32IA-NEXT: sc.w.rl t1, t0, (a2)
+; RV32IA-NEXT: bnez t1, .LBB1_6
+; RV32IA-NEXT: .LBB1_8: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV32IA-NEXT: beq a7, a6, .LBB1_5
+; RV32IA-NEXT: .LBB1_3: # %atomicrmw.start
+; RV32IA-NEXT: # =>This Loop Header: Depth=1
+; RV32IA-NEXT: # Child Loop BB1_6 Depth 2
+; RV32IA-NEXT: mv a6, a7
+; RV32IA-NEXT: srl a7, a7, a0
+; RV32IA-NEXT: and t0, a7, a3
+; RV32IA-NEXT: bltu t0, a5, .LBB1_1
+; RV32IA-NEXT: # %bb.4: # in Loop: Header=BB1_3 Depth=1
+; RV32IA-NEXT: sub a7, a7, a1
+; RV32IA-NEXT: j .LBB1_2
+; RV32IA-NEXT: .LBB1_5: # %atomicrmw.end
+; RV32IA-NEXT: srl a0, a7, a0
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomicrmw_cond_sub_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -48
+; RV64I-NEXT: .cfi_def_cfa_offset 48
+; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: .cfi_offset s3, -40
+; RV64I-NEXT: mv s0, a1
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: lhu a1, 0(a0)
+; RV64I-NEXT: lui s2, 16
+; RV64I-NEXT: addiw s2, s2, -1
+; RV64I-NEXT: and s3, s0, s2
+; RV64I-NEXT: j .LBB1_3
+; RV64I-NEXT: .LBB1_1: # %atomicrmw.start
+; RV64I-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV64I-NEXT: mv a2, s0
+; RV64I-NEXT: .LBB1_2: # %atomicrmw.start
+; RV64I-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV64I-NEXT: sh a1, 6(sp)
+; RV64I-NEXT: addi a1, sp, 6
+; RV64I-NEXT: li a3, 5
+; RV64I-NEXT: li a4, 5
+; RV64I-NEXT: mv a0, s1
+; RV64I-NEXT: call __atomic_compare_exchange_2
+; RV64I-NEXT: lh a1, 6(sp)
+; RV64I-NEXT: bnez a0, .LBB1_5
+; RV64I-NEXT: .LBB1_3: # %atomicrmw.start
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: and a0, a1, s2
+; RV64I-NEXT: bltu a0, s3, .LBB1_1
+; RV64I-NEXT: # %bb.4: # in Loop: Header=BB1_3 Depth=1
+; RV64I-NEXT: sub a2, a1, s0
+; RV64I-NEXT: j .LBB1_2
+; RV64I-NEXT: .LBB1_5: # %atomicrmw.end
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 48
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_cond_sub_i16:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, -4
+; RV64IA-NEXT: slli a5, a0, 3
+; RV64IA-NEXT: andi a0, a5, 24
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: lw a4, 0(a2)
+; RV64IA-NEXT: sllw a5, a3, a5
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: and a6, a1, a3
+; RV64IA-NEXT: j .LBB1_3
+; RV64IA-NEXT: .LBB1_1: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV64IA-NEXT: mv a7, a1
+; RV64IA-NEXT: .LBB1_2: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV64IA-NEXT: sext.w t0, a4
+; RV64IA-NEXT: and a7, a7, a3
+; RV64IA-NEXT: sllw a7, a7, a0
+; RV64IA-NEXT: and a4, a4, a5
+; RV64IA-NEXT: or a7, a4, a7
+; RV64IA-NEXT: .LBB1_6: # %atomicrmw.start
+; RV64IA-NEXT: # Parent Loop BB1_3 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.w.aqrl a4, (a2)
+; RV64IA-NEXT: bne a4, t0, .LBB1_8
+; RV64IA-NEXT: # %bb.7: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB1_6 Depth=2
+; RV64IA-NEXT: sc.w.rl t1, a7, (a2)
+; RV64IA-NEXT: bnez t1, .LBB1_6
+; RV64IA-NEXT: .LBB1_8: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB1_3 Depth=1
+; RV64IA-NEXT: beq a4, t0, .LBB1_5
+; RV64IA-NEXT: .LBB1_3: # %atomicrmw.start
+; RV64IA-NEXT: # =>This Loop Header: Depth=1
+; RV64IA-NEXT: # Child Loop BB1_6 Depth 2
+; RV64IA-NEXT: srlw a7, a4, a0
+; RV64IA-NEXT: and t0, a7, a3
+; RV64IA-NEXT: bltu t0, a6, .LBB1_1
+; RV64IA-NEXT: # %bb.4: # in Loop: Header=BB1_3 Depth=1
+; RV64IA-NEXT: sub a7, a7, a1
+; RV64IA-NEXT: j .LBB1_2
+; RV64IA-NEXT: .LBB1_5: # %atomicrmw.end
+; RV64IA-NEXT: srlw a0, a4, a0
+; RV64IA-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; RV32I-LABEL: atomicrmw_cond_sub_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: lw a3, 0(a0)
+; RV32I-NEXT: mv s1, a1
+; RV32I-NEXT: j .LBB2_3
+; RV32I-NEXT: .LBB2_1: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB2_3 Depth=1
+; RV32I-NEXT: mv a2, s1
+; RV32I-NEXT: .LBB2_2: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB2_3 Depth=1
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: mv a1, sp
+; RV32I-NEXT: li a3, 5
+; RV32I-NEXT: li a4, 5
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: call __atomic_compare_exchange_4
+; RV32I-NEXT: lw a3, 0(sp)
+; RV32I-NEXT: bnez a0, .LBB2_5
+; RV32I-NEXT: .LBB2_3: # %atomicrmw.start
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: bltu a3, s1, .LBB2_1
+; RV32I-NEXT: # %bb.4: # in Loop: Header=BB2_3 Depth=1
+; RV32I-NEXT: sub a2, a3, s1
+; RV32I-NEXT: j .LBB2_2
+; RV32I-NEXT: .LBB2_5: # %atomicrmw.end
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_cond_sub_i32:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: lw a2, 0(a0)
+; RV32IA-NEXT: j .LBB2_2
+; RV32IA-NEXT: .LBB2_1: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB2_2 Depth=1
+; RV32IA-NEXT: mv a4, a1
+; RV32IA-NEXT: .LBB2_5: # %atomicrmw.start
+; RV32IA-NEXT: # Parent Loop BB2_2 Depth=1
+; RV32IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV32IA-NEXT: lr.w.aqrl a2, (a0)
+; RV32IA-NEXT: bne a2, a3, .LBB2_7
+; RV32IA-NEXT: # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB2_5 Depth=2
+; RV32IA-NEXT: sc.w.rl a5, a1, (a0)
+; RV32IA-NEXT: bnez a5, .LBB2_5
+; RV32IA-NEXT: .LBB2_7: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB2_2 Depth=1
+; RV32IA-NEXT: beq a2, a3, .LBB2_4
+; RV32IA-NEXT: .LBB2_2: # %atomicrmw.start
+; RV32IA-NEXT: # =>This Loop Header: Depth=1
+; RV32IA-NEXT: # Child Loop BB2_8 Depth 2
+; RV32IA-NEXT: # Child Loop BB2_5 Depth 2
+; RV32IA-NEXT: mv a3, a2
+; RV32IA-NEXT: bltu a2, a1, .LBB2_1
+; RV32IA-NEXT: # %bb.3: # in Loop: Header=BB2_2 Depth=1
+; RV32IA-NEXT: sub a4, a3, a1
+; RV32IA-NEXT: .LBB2_8: # Parent Loop BB2_2 Depth=1
+; RV32IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV32IA-NEXT: lr.w.aqrl a2, (a0)
+; RV32IA-NEXT: bne a2, a3, .LBB2_2
+; RV32IA-NEXT: # %bb.9: # in Loop: Header=BB2_8 Depth=2
+; RV32IA-NEXT: sc.w.rl a5, a4, (a0)
+; RV32IA-NEXT: bnez a5, .LBB2_8
+; RV32IA-NEXT: # %bb.10:
+; RV32IA-NEXT: .LBB2_4: # %atomicrmw.end
+; RV32IA-NEXT: mv a0, a2
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomicrmw_cond_sub_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -48
+; RV64I-NEXT: .cfi_def_cfa_offset 48
+; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: mv s0, a0
+; RV64I-NEXT: lw a3, 0(a0)
+; RV64I-NEXT: mv s1, a1
+; RV64I-NEXT: sext.w s2, a1
+; RV64I-NEXT: j .LBB2_3
+; RV64I-NEXT: .LBB2_1: # %atomicrmw.start
+; RV64I-NEXT: # in Loop: Header=BB2_3 Depth=1
+; RV64I-NEXT: mv a2, s1
+; RV64I-NEXT: .LBB2_2: # %atomicrmw.start
+; RV64I-NEXT: # in Loop: Header=BB2_3 Depth=1
+; RV64I-NEXT: sw a3, 12(sp)
+; RV64I-NEXT: addi a1, sp, 12
+; RV64I-NEXT: li a3, 5
+; RV64I-NEXT: li a4, 5
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: call __atomic_compare_exchange_4
+; RV64I-NEXT: lw a3, 12(sp)
+; RV64I-NEXT: bnez a0, .LBB2_5
+; RV64I-NEXT: .LBB2_3: # %atomicrmw.start
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: bltu a3, s2, .LBB2_1
+; RV64I-NEXT: # %bb.4: # in Loop: Header=BB2_3 Depth=1
+; RV64I-NEXT: subw a2, a3, s1
+; RV64I-NEXT: j .LBB2_2
+; RV64I-NEXT: .LBB2_5: # %atomicrmw.end
+; RV64I-NEXT: mv a0, a3
+; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 48
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_cond_sub_i32:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lw a2, 0(a0)
+; RV64IA-NEXT: sext.w a3, a1
+; RV64IA-NEXT: j .LBB2_2
+; RV64IA-NEXT: .LBB2_1: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB2_2 Depth=1
+; RV64IA-NEXT: mv a5, a1
+; RV64IA-NEXT: .LBB2_5: # %atomicrmw.start
+; RV64IA-NEXT: # Parent Loop BB2_2 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.w.aqrl a2, (a0)
+; RV64IA-NEXT: bne a2, a4, .LBB2_7
+; RV64IA-NEXT: # %bb.6: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB2_5 Depth=2
+; RV64IA-NEXT: sc.w.rl a6, a1, (a0)
+; RV64IA-NEXT: bnez a6, .LBB2_5
+; RV64IA-NEXT: .LBB2_7: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB2_2 Depth=1
+; RV64IA-NEXT: beq a2, a4, .LBB2_4
+; RV64IA-NEXT: .LBB2_2: # %atomicrmw.start
+; RV64IA-NEXT: # =>This Loop Header: Depth=1
+; RV64IA-NEXT: # Child Loop BB2_8 Depth 2
+; RV64IA-NEXT: # Child Loop BB2_5 Depth 2
+; RV64IA-NEXT: sext.w a4, a2
+; RV64IA-NEXT: bltu a4, a3, .LBB2_1
+; RV64IA-NEXT: # %bb.3: # in Loop: Header=BB2_2 Depth=1
+; RV64IA-NEXT: subw a5, a2, a1
+; RV64IA-NEXT: .LBB2_8: # Parent Loop BB2_2 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.w.aqrl a2, (a0)
+; RV64IA-NEXT: bne a2, a4, .LBB2_2
+; RV64IA-NEXT: # %bb.9: # in Loop: Header=BB2_8 Depth=2
+; RV64IA-NEXT: sc.w.rl a6, a5, (a0)
+; RV64IA-NEXT: bnez a6, .LBB2_8
+; RV64IA-NEXT: # %bb.10:
+; RV64IA-NEXT: .LBB2_4: # %atomicrmw.end
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; RV32I-LABEL: atomicrmw_cond_sub_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: .cfi_def_cfa_offset 32
+; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: lw a5, 4(a0)
+; RV32I-NEXT: lw a4, 0(a0)
+; RV32I-NEXT: mv s1, a2
+; RV32I-NEXT: mv s2, a1
+; RV32I-NEXT: j .LBB3_3
+; RV32I-NEXT: .LBB3_1: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV32I-NEXT: mv a3, s1
+; RV32I-NEXT: mv a2, s2
+; RV32I-NEXT: .LBB3_2: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV32I-NEXT: sw a4, 8(sp)
+; RV32I-NEXT: sw a5, 12(sp)
+; RV32I-NEXT: addi a1, sp, 8
+; RV32I-NEXT: li a4, 5
+; RV32I-NEXT: li a5, 5
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: call __atomic_compare_exchange_8
+; RV32I-NEXT: lw a5, 12(sp)
+; RV32I-NEXT: lw a4, 8(sp)
+; RV32I-NEXT: bnez a0, .LBB3_7
+; RV32I-NEXT: .LBB3_3: # %atomicrmw.start
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: sltu a0, a4, s2
+; RV32I-NEXT: mv a1, a0
+; RV32I-NEXT: beq a5, s1, .LBB3_5
+; RV32I-NEXT: # %bb.4: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV32I-NEXT: sltu a1, a5, s1
+; RV32I-NEXT: .LBB3_5: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV32I-NEXT: bnez a1, .LBB3_1
+; RV32I-NEXT: # %bb.6: # in Loop: Header=BB3_3 Depth=1
+; RV32I-NEXT: sub a3, a5, s1
+; RV32I-NEXT: sub a3, a3, a0
+; RV32I-NEXT: sub a2, a4, s2
+; RV32I-NEXT: j .LBB3_2
+; RV32I-NEXT: .LBB3_7: # %atomicrmw.end
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_cond_sub_i64:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -32
+; RV32IA-NEXT: .cfi_def_cfa_offset 32
+; RV32IA-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: .cfi_offset ra, -4
+; RV32IA-NEXT: .cfi_offset s0, -8
+; RV32IA-NEXT: .cfi_offset s1, -12
+; RV32IA-NEXT: .cfi_offset s2, -16
+; RV32IA-NEXT: mv s0, a0
+; RV32IA-NEXT: lw a5, 4(a0)
+; RV32IA-NEXT: lw a4, 0(a0)
+; RV32IA-NEXT: mv s1, a2
+; RV32IA-NEXT: mv s2, a1
+; RV32IA-NEXT: j .LBB3_3
+; RV32IA-NEXT: .LBB3_1: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV32IA-NEXT: mv a3, s1
+; RV32IA-NEXT: mv a2, s2
+; RV32IA-NEXT: .LBB3_2: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV32IA-NEXT: sw a4, 8(sp)
+; RV32IA-NEXT: sw a5, 12(sp)
+; RV32IA-NEXT: addi a1, sp, 8
+; RV32IA-NEXT: li a4, 5
+; RV32IA-NEXT: li a5, 5
+; RV32IA-NEXT: mv a0, s0
+; RV32IA-NEXT: call __atomic_compare_exchange_8
+; RV32IA-NEXT: lw a5, 12(sp)
+; RV32IA-NEXT: lw a4, 8(sp)
+; RV32IA-NEXT: bnez a0, .LBB3_7
+; RV32IA-NEXT: .LBB3_3: # %atomicrmw.start
+; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT: sltu a0, a4, s2
+; RV32IA-NEXT: mv a1, a0
+; RV32IA-NEXT: beq a5, s1, .LBB3_5
+; RV32IA-NEXT: # %bb.4: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV32IA-NEXT: sltu a1, a5, s1
+; RV32IA-NEXT: .LBB3_5: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV32IA-NEXT: bnez a1, .LBB3_1
+; RV32IA-NEXT: # %bb.6: # in Loop: Header=BB3_3 Depth=1
+; RV32IA-NEXT: sub a3, a5, s1
+; RV32IA-NEXT: sub a3, a3, a0
+; RV32IA-NEXT: sub a2, a4, s2
+; RV32IA-NEXT: j .LBB3_2
+; RV32IA-NEXT: .LBB3_7: # %atomicrmw.end
+; RV32IA-NEXT: mv a0, a4
+; RV32IA-NEXT: mv a1, a5
+; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 32
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomicrmw_cond_sub_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: .cfi_def_cfa_offset 32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: mv s0, a0
+; RV64I-NEXT: ld a3, 0(a0)
+; RV64I-NEXT: mv s1, a1
+; RV64I-NEXT: j .LBB3_3
+; RV64I-NEXT: .LBB3_1: # %atomicrmw.start
+; RV64I-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV64I-NEXT: mv a2, s1
+; RV64I-NEXT: .LBB3_2: # %atomicrmw.start
+; RV64I-NEXT: # in Loop: Header=BB3_3 Depth=1
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: mv a1, sp
+; RV64I-NEXT: li a3, 5
+; RV64I-NEXT: li a4, 5
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: call __atomic_compare_exchange_8
+; RV64I-NEXT: ld a3, 0(sp)
+; RV64I-NEXT: bnez a0, .LBB3_5
+; RV64I-NEXT: .LBB3_3: # %atomicrmw.start
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: bltu a3, s1, .LBB3_1
+; RV64I-NEXT: # %bb.4: # in Loop: Header=BB3_3 Depth=1
+; RV64I-NEXT: sub a2, a3, s1
+; RV64I-NEXT: j .LBB3_2
+; RV64I-NEXT: .LBB3_5: # %atomicrmw.end
+; RV64I-NEXT: mv a0, a3
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_cond_sub_i64:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: ld a2, 0(a0)
+; RV64IA-NEXT: j .LBB3_2
+; RV64IA-NEXT: .LBB3_1: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB3_2 Depth=1
+; RV64IA-NEXT: mv a4, a1
+; RV64IA-NEXT: .LBB3_5: # %atomicrmw.start
+; RV64IA-NEXT: # Parent Loop BB3_2 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.d.aqrl a2, (a0)
+; RV64IA-NEXT: bne a2, a3, .LBB3_7
+; RV64IA-NEXT: # %bb.6: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB3_5 Depth=2
+; RV64IA-NEXT: sc.d.rl a5, a1, (a0)
+; RV64IA-NEXT: bnez a5, .LBB3_5
+; RV64IA-NEXT: .LBB3_7: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB3_2 Depth=1
+; RV64IA-NEXT: beq a2, a3, .LBB3_4
+; RV64IA-NEXT: .LBB3_2: # %atomicrmw.start
+; RV64IA-NEXT: # =>This Loop Header: Depth=1
+; RV64IA-NEXT: # Child Loop BB3_8 Depth 2
+; RV64IA-NEXT: # Child Loop BB3_5 Depth 2
+; RV64IA-NEXT: mv a3, a2
+; RV64IA-NEXT: bltu a2, a1, .LBB3_1
+; RV64IA-NEXT: # %bb.3: # in Loop: Header=BB3_2 Depth=1
+; RV64IA-NEXT: sub a4, a3, a1
+; RV64IA-NEXT: .LBB3_8: # Parent Loop BB3_2 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.d.aqrl a2, (a0)
+; RV64IA-NEXT: bne a2, a3, .LBB3_2
+; RV64IA-NEXT: # %bb.9: # in Loop: Header=BB3_8 Depth=2
+; RV64IA-NEXT: sc.d.rl a5, a4, (a0)
+; RV64IA-NEXT: bnez a5, .LBB3_8
+; RV64IA-NEXT: # %bb.10:
+; RV64IA-NEXT: .LBB3_4: # %atomicrmw.end
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; RV32I-LABEL: atomicrmw_sub_clamp_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: .cfi_def_cfa_offset 32
+; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: lbu a3, 0(a0)
+; RV32I-NEXT: mv s1, a1
+; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: .LBB4_1: # %atomicrmw.start
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: sltu a0, a0, s2
+; RV32I-NEXT: sub a1, a3, s1
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: sb a3, 15(sp)
+; RV32I-NEXT: addi a1, sp, 15
+; RV32I-NEXT: li a3, 5
+; RV32I-NEXT: li a4, 5
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: call __atomic_compare_exchange_1
+; RV32I-NEXT: lbu a3, 15(sp)
+; RV32I-NEXT: beqz a0, .LBB4_1
+; RV32I-NEXT: # %bb.2: # %atomicrmw.end
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_sub_clamp_i8:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: andi a2, a0, -4
+; RV32IA-NEXT: slli a3, a0, 3
+; RV32IA-NEXT: andi a0, a3, 24
+; RV32IA-NEXT: li a4, 255
+; RV32IA-NEXT: lw a5, 0(a2)
+; RV32IA-NEXT: sll a3, a4, a3
+; RV32IA-NEXT: not a3, a3
+; RV32IA-NEXT: andi a4, a1, 255
+; RV32IA-NEXT: .LBB4_1: # %atomicrmw.start
+; RV32IA-NEXT: # =>This Loop Header: Depth=1
+; RV32IA-NEXT: # Child Loop BB4_3 Depth 2
+; RV32IA-NEXT: mv a6, a5
+; RV32IA-NEXT: srl a5, a5, a0
+; RV32IA-NEXT: andi a7, a5, 255
+; RV32IA-NEXT: sltu a7, a7, a4
+; RV32IA-NEXT: sub a5, a5, a1
+; RV32IA-NEXT: addi a7, a7, -1
+; RV32IA-NEXT: and a5, a7, a5
+; RV32IA-NEXT: andi a5, a5, 255
+; RV32IA-NEXT: sll a5, a5, a0
+; RV32IA-NEXT: and a7, a6, a3
+; RV32IA-NEXT: or a7, a7, a5
+; RV32IA-NEXT: .LBB4_3: # %atomicrmw.start
+; RV32IA-NEXT: # Parent Loop BB4_1 Depth=1
+; RV32IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV32IA-NEXT: lr.w.aqrl a5, (a2)
+; RV32IA-NEXT: bne a5, a6, .LBB4_1
+; RV32IA-NEXT: # %bb.4: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB4_3 Depth=2
+; RV32IA-NEXT: sc.w.rl t0, a7, (a2)
+; RV32IA-NEXT: bnez t0, .LBB4_3
+; RV32IA-NEXT: # %bb.5: # %atomicrmw.start
+; RV32IA-NEXT: # %bb.2: # %atomicrmw.end
+; RV32IA-NEXT: srl a0, a5, a0
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomicrmw_sub_clamp_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -48
+; RV64I-NEXT: .cfi_def_cfa_offset 48
+; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: mv s0, a0
+; RV64I-NEXT: lbu a3, 0(a0)
+; RV64I-NEXT: mv s1, a1
+; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: .LBB4_1: # %atomicrmw.start
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: sltu a0, a0, s2
+; RV64I-NEXT: sub a1, a3, s1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: sb a3, 15(sp)
+; RV64I-NEXT: addi a1, sp, 15
+; RV64I-NEXT: li a3, 5
+; RV64I-NEXT: li a4, 5
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: call __atomic_compare_exchange_1
+; RV64I-NEXT: lbu a3, 15(sp)
+; RV64I-NEXT: beqz a0, .LBB4_1
+; RV64I-NEXT: # %bb.2: # %atomicrmw.end
+; RV64I-NEXT: mv a0, a3
+; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 48
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_clamp_i8:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, -4
+; RV64IA-NEXT: slli a4, a0, 3
+; RV64IA-NEXT: andi a0, a4, 24
+; RV64IA-NEXT: li a5, 255
+; RV64IA-NEXT: lw a3, 0(a2)
+; RV64IA-NEXT: sllw a4, a5, a4
+; RV64IA-NEXT: not a4, a4
+; RV64IA-NEXT: andi a5, a1, 255
+; RV64IA-NEXT: .LBB4_1: # %atomicrmw.start
+; RV64IA-NEXT: # =>This Loop Header: Depth=1
+; RV64IA-NEXT: # Child Loop BB4_3 Depth 2
+; RV64IA-NEXT: srlw a6, a3, a0
+; RV64IA-NEXT: sext.w a7, a3
+; RV64IA-NEXT: andi t0, a6, 255
+; RV64IA-NEXT: sltu t0, t0, a5
+; RV64IA-NEXT: sub a6, a6, a1
+; RV64IA-NEXT: addi t0, t0, -1
+; RV64IA-NEXT: and a6, t0, a6
+; RV64IA-NEXT: andi a6, a6, 255
+; RV64IA-NEXT: sllw a6, a6, a0
+; RV64IA-NEXT: and a3, a3, a4
+; RV64IA-NEXT: or a6, a3, a6
+; RV64IA-NEXT: .LBB4_3: # %atomicrmw.start
+; RV64IA-NEXT: # Parent Loop BB4_1 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.w.aqrl a3, (a2)
+; RV64IA-NEXT: bne a3, a7, .LBB4_1
+; RV64IA-NEXT: # %bb.4: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB4_3 Depth=2
+; RV64IA-NEXT: sc.w.rl t0, a6, (a2)
+; RV64IA-NEXT: bnez t0, .LBB4_3
+; RV64IA-NEXT: # %bb.5: # %atomicrmw.start
+; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
+; RV64IA-NEXT: srlw a0, a3, a0
+; RV64IA-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; RV32I-LABEL: atomicrmw_sub_clamp_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: .cfi_def_cfa_offset 32
+; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: .cfi_offset s3, -20
+; RV32I-NEXT: mv s0, a1
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: lhu a1, 0(a0)
+; RV32I-NEXT: lui s2, 16
+; RV32I-NEXT: addi s2, s2, -1
+; RV32I-NEXT: and s3, s0, s2
+; RV32I-NEXT: .LBB5_1: # %atomicrmw.start
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: and a0, a1, s2
+; RV32I-NEXT: sltu a0, a0, s3
+; RV32I-NEXT: sub a2, a1, s0
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: sh a1, 10(sp)
+; RV32I-NEXT: addi a1, sp, 10
+; RV32I-NEXT: li a3, 5
+; RV32I-NEXT: li a4, 5
+; RV32I-NEXT: mv a0, s1
+; RV32I-NEXT: call __atomic_compare_exchange_2
+; RV32I-NEXT: lh a1, 10(sp)
+; RV32I-NEXT: beqz a0, .LBB5_1
+; RV32I-NEXT: # %bb.2: # %atomicrmw.end
+; RV32I-NEXT: mv a0, a1
+; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_sub_clamp_i16:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: andi a2, a0, -4
+; RV32IA-NEXT: slli a4, a0, 3
+; RV32IA-NEXT: andi a0, a4, 24
+; RV32IA-NEXT: lui a3, 16
+; RV32IA-NEXT: addi a3, a3, -1
+; RV32IA-NEXT: lw a6, 0(a2)
+; RV32IA-NEXT: sll a4, a3, a4
+; RV32IA-NEXT: not a4, a4
+; RV32IA-NEXT: and a5, a1, a3
+; RV32IA-NEXT: .LBB5_1: # %atomicrmw.start
+; RV32IA-NEXT: # =>This Loop Header: Depth=1
+; RV32IA-NEXT: # Child Loop BB5_3 Depth 2
+; RV32IA-NEXT: mv a7, a6
+; RV32IA-NEXT: srl a6, a6, a0
+; RV32IA-NEXT: and t0, a6, a3
+; RV32IA-NEXT: sltu t0, t0, a5
+; RV32IA-NEXT: sub a6, a6, a1
+; RV32IA-NEXT: addi t0, t0, -1
+; RV32IA-NEXT: and a6, a6, a3
+; RV32IA-NEXT: and a6, t0, a6
+; RV32IA-NEXT: sll a6, a6, a0
+; RV32IA-NEXT: and t0, a7, a4
+; RV32IA-NEXT: or t0, t0, a6
+; RV32IA-NEXT: .LBB5_3: # %atomicrmw.start
+; RV32IA-NEXT: # Parent Loop BB5_1 Depth=1
+; RV32IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV32IA-NEXT: lr.w.aqrl a6, (a2)
+; RV32IA-NEXT: bne a6, a7, .LBB5_1
+; RV32IA-NEXT: # %bb.4: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB5_3 Depth=2
+; RV32IA-NEXT: sc.w.rl t1, t0, (a2)
+; RV32IA-NEXT: bnez t1, .LBB5_3
+; RV32IA-NEXT: # %bb.5: # %atomicrmw.start
+; RV32IA-NEXT: # %bb.2: # %atomicrmw.end
+; RV32IA-NEXT: srl a0, a6, a0
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomicrmw_sub_clamp_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -48
+; RV64I-NEXT: .cfi_def_cfa_offset 48
+; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: .cfi_offset s3, -40
+; RV64I-NEXT: mv s0, a1
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: lhu a1, 0(a0)
+; RV64I-NEXT: lui s2, 16
+; RV64I-NEXT: addiw s2, s2, -1
+; RV64I-NEXT: and s3, s0, s2
+; RV64I-NEXT: .LBB5_1: # %atomicrmw.start
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: and a0, a1, s2
+; RV64I-NEXT: sltu a0, a0, s3
+; RV64I-NEXT: sub a2, a1, s0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a2, a0, a2
+; RV64I-NEXT: sh a1, 6(sp)
+; RV64I-NEXT: addi a1, sp, 6
+; RV64I-NEXT: li a3, 5
+; RV64I-NEXT: li a4, 5
+; RV64I-NEXT: mv a0, s1
+; RV64I-NEXT: call __atomic_compare_exchange_2
+; RV64I-NEXT: lh a1, 6(sp)
+; RV64I-NEXT: beqz a0, .LBB5_1
+; RV64I-NEXT: # %bb.2: # %atomicrmw.end
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 48
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_clamp_i16:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, -4
+; RV64IA-NEXT: slli a5, a0, 3
+; RV64IA-NEXT: andi a0, a5, 24
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: lw a4, 0(a2)
+; RV64IA-NEXT: sllw a5, a3, a5
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: and a6, a1, a3
+; RV64IA-NEXT: .LBB5_1: # %atomicrmw.start
+; RV64IA-NEXT: # =>This Loop Header: Depth=1
+; RV64IA-NEXT: # Child Loop BB5_3 Depth 2
+; RV64IA-NEXT: srlw a7, a4, a0
+; RV64IA-NEXT: sext.w t0, a4
+; RV64IA-NEXT: and t1, a7, a3
+; RV64IA-NEXT: sltu t1, t1, a6
+; RV64IA-NEXT: sub a7, a7, a1
+; RV64IA-NEXT: addi t1, t1, -1
+; RV64IA-NEXT: and a7, a7, a3
+; RV64IA-NEXT: and a7, t1, a7
+; RV64IA-NEXT: sllw a7, a7, a0
+; RV64IA-NEXT: and a4, a4, a5
+; RV64IA-NEXT: or a7, a4, a7
+; RV64IA-NEXT: .LBB5_3: # %atomicrmw.start
+; RV64IA-NEXT: # Parent Loop BB5_1 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.w.aqrl a4, (a2)
+; RV64IA-NEXT: bne a4, t0, .LBB5_1
+; RV64IA-NEXT: # %bb.4: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB5_3 Depth=2
+; RV64IA-NEXT: sc.w.rl t1, a7, (a2)
+; RV64IA-NEXT: bnez t1, .LBB5_3
+; RV64IA-NEXT: # %bb.5: # %atomicrmw.start
+; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
+; RV64IA-NEXT: srlw a0, a4, a0
+; RV64IA-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; RV32I-LABEL: atomicrmw_sub_clamp_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: lw a3, 0(a0)
+; RV32I-NEXT: mv s1, a1
+; RV32I-NEXT: .LBB6_1: # %atomicrmw.start
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: sltu a0, a3, s1
+; RV32I-NEXT: sub a1, a3, s1
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: mv a1, sp
+; RV32I-NEXT: li a3, 5
+; RV32I-NEXT: li a4, 5
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: call __atomic_compare_exchange_4
+; RV32I-NEXT: lw a3, 0(sp)
+; RV32I-NEXT: beqz a0, .LBB6_1
+; RV32I-NEXT: # %bb.2: # %atomicrmw.end
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_sub_clamp_i32:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: lw a2, 0(a0)
+; RV32IA-NEXT: .LBB6_1: # %atomicrmw.start
+; RV32IA-NEXT: # =>This Loop Header: Depth=1
+; RV32IA-NEXT: # Child Loop BB6_3 Depth 2
+; RV32IA-NEXT: mv a3, a2
+; RV32IA-NEXT: sltu a2, a2, a1
+; RV32IA-NEXT: sub a4, a3, a1
+; RV32IA-NEXT: addi a2, a2, -1
+; RV32IA-NEXT: and a4, a2, a4
+; RV32IA-NEXT: .LBB6_3: # %atomicrmw.start
+; RV32IA-NEXT: # Parent Loop BB6_1 Depth=1
+; RV32IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV32IA-NEXT: lr.w.aqrl a2, (a0)
+; RV32IA-NEXT: bne a2, a3, .LBB6_1
+; RV32IA-NEXT: # %bb.4: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB6_3 Depth=2
+; RV32IA-NEXT: sc.w.rl a5, a4, (a0)
+; RV32IA-NEXT: bnez a5, .LBB6_3
+; RV32IA-NEXT: # %bb.5: # %atomicrmw.start
+; RV32IA-NEXT: # %bb.2: # %atomicrmw.end
+; RV32IA-NEXT: mv a0, a2
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomicrmw_sub_clamp_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -48
+; RV64I-NEXT: .cfi_def_cfa_offset 48
+; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: mv s0, a0
+; RV64I-NEXT: lw a3, 0(a0)
+; RV64I-NEXT: mv s1, a1
+; RV64I-NEXT: sext.w s2, a1
+; RV64I-NEXT: .LBB6_1: # %atomicrmw.start
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: sltu a0, a3, s2
+; RV64I-NEXT: subw a1, a3, s1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: sw a3, 12(sp)
+; RV64I-NEXT: addi a1, sp, 12
+; RV64I-NEXT: li a3, 5
+; RV64I-NEXT: li a4, 5
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: call __atomic_compare_exchange_4
+; RV64I-NEXT: lw a3, 12(sp)
+; RV64I-NEXT: beqz a0, .LBB6_1
+; RV64I-NEXT: # %bb.2: # %atomicrmw.end
+; RV64I-NEXT: mv a0, a3
+; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 48
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_clamp_i32:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lw a2, 0(a0)
+; RV64IA-NEXT: sext.w a3, a1
+; RV64IA-NEXT: .LBB6_1: # %atomicrmw.start
+; RV64IA-NEXT: # =>This Loop Header: Depth=1
+; RV64IA-NEXT: # Child Loop BB6_3 Depth 2
+; RV64IA-NEXT: sext.w a4, a2
+; RV64IA-NEXT: sltu a5, a4, a3
+; RV64IA-NEXT: subw a2, a2, a1
+; RV64IA-NEXT: addi a5, a5, -1
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: .LBB6_3: # %atomicrmw.start
+; RV64IA-NEXT: # Parent Loop BB6_1 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.w.aqrl a2, (a0)
+; RV64IA-NEXT: bne a2, a4, .LBB6_1
+; RV64IA-NEXT: # %bb.4: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB6_3 Depth=2
+; RV64IA-NEXT: sc.w.rl a6, a5, (a0)
+; RV64IA-NEXT: bnez a6, .LBB6_3
+; RV64IA-NEXT: # %bb.5: # %atomicrmw.start
+; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; RV32I-LABEL: atomicrmw_sub_clamp_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: .cfi_def_cfa_offset 32
+; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: lw a5, 4(a0)
+; RV32I-NEXT: lw a4, 0(a0)
+; RV32I-NEXT: mv s1, a2
+; RV32I-NEXT: mv s2, a1
+; RV32I-NEXT: j .LBB7_2
+; RV32I-NEXT: .LBB7_1: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB7_2 Depth=1
+; RV32I-NEXT: xori a1, a1, 1
+; RV32I-NEXT: sub a2, a5, s1
+; RV32I-NEXT: sub a2, a2, a0
+; RV32I-NEXT: sub a0, a4, s2
+; RV32I-NEXT: neg a1, a1
+; RV32I-NEXT: and a3, a1, a2
+; RV32I-NEXT: and a2, a1, a0
+; RV32I-NEXT: sw a4, 8(sp)
+; RV32I-NEXT: sw a5, 12(sp)
+; RV32I-NEXT: addi a1, sp, 8
+; RV32I-NEXT: li a4, 5
+; RV32I-NEXT: li a5, 5
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: call __atomic_compare_exchange_8
+; RV32I-NEXT: lw a5, 12(sp)
+; RV32I-NEXT: lw a4, 8(sp)
+; RV32I-NEXT: bnez a0, .LBB7_4
+; RV32I-NEXT: .LBB7_2: # %atomicrmw.start
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: sltu a0, a4, s2
+; RV32I-NEXT: mv a1, a0
+; RV32I-NEXT: beq a5, s1, .LBB7_1
+; RV32I-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-NEXT: # in Loop: Header=BB7_2 Depth=1
+; RV32I-NEXT: sltu a1, a5, s1
+; RV32I-NEXT: j .LBB7_1
+; RV32I-NEXT: .LBB7_4: # %atomicrmw.end
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_sub_clamp_i64:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -32
+; RV32IA-NEXT: .cfi_def_cfa_offset 32
+; RV32IA-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: .cfi_offset ra, -4
+; RV32IA-NEXT: .cfi_offset s0, -8
+; RV32IA-NEXT: .cfi_offset s1, -12
+; RV32IA-NEXT: .cfi_offset s2, -16
+; RV32IA-NEXT: mv s0, a0
+; RV32IA-NEXT: lw a5, 4(a0)
+; RV32IA-NEXT: lw a4, 0(a0)
+; RV32IA-NEXT: mv s1, a2
+; RV32IA-NEXT: mv s2, a1
+; RV32IA-NEXT: j .LBB7_2
+; RV32IA-NEXT: .LBB7_1: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB7_2 Depth=1
+; RV32IA-NEXT: xori a1, a1, 1
+; RV32IA-NEXT: sub a2, a5, s1
+; RV32IA-NEXT: sub a2, a2, a0
+; RV32IA-NEXT: sub a0, a4, s2
+; RV32IA-NEXT: neg a1, a1
+; RV32IA-NEXT: and a3, a1, a2
+; RV32IA-NEXT: and a2, a1, a0
+; RV32IA-NEXT: sw a4, 8(sp)
+; RV32IA-NEXT: sw a5, 12(sp)
+; RV32IA-NEXT: addi a1, sp, 8
+; RV32IA-NEXT: li a4, 5
+; RV32IA-NEXT: li a5, 5
+; RV32IA-NEXT: mv a0, s0
+; RV32IA-NEXT: call __atomic_compare_exchange_8
+; RV32IA-NEXT: lw a5, 12(sp)
+; RV32IA-NEXT: lw a4, 8(sp)
+; RV32IA-NEXT: bnez a0, .LBB7_4
+; RV32IA-NEXT: .LBB7_2: # %atomicrmw.start
+; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT: sltu a0, a4, s2
+; RV32IA-NEXT: mv a1, a0
+; RV32IA-NEXT: beq a5, s1, .LBB7_1
+; RV32IA-NEXT: # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT: # in Loop: Header=BB7_2 Depth=1
+; RV32IA-NEXT: sltu a1, a5, s1
+; RV32IA-NEXT: j .LBB7_1
+; RV32IA-NEXT: .LBB7_4: # %atomicrmw.end
+; RV32IA-NEXT: mv a0, a4
+; RV32IA-NEXT: mv a1, a5
+; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 32
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomicrmw_sub_clamp_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: .cfi_def_cfa_offset 32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: mv s0, a0
+; RV64I-NEXT: ld a3, 0(a0)
+; RV64I-NEXT: mv s1, a1
+; RV64I-NEXT: .LBB7_1: # %atomicrmw.start
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: sltu a0, a3, s1
+; RV64I-NEXT: sub a1, a3, s1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: mv a1, sp
+; RV64I-NEXT: li a3, 5
+; RV64I-NEXT: li a4, 5
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: call __atomic_compare_exchange_8
+; RV64I-NEXT: ld a3, 0(sp)
+; RV64I-NEXT: beqz a0, .LBB7_1
+; RV64I-NEXT: # %bb.2: # %atomicrmw.end
+; RV64I-NEXT: mv a0, a3
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_clamp_i64:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: ld a2, 0(a0)
+; RV64IA-NEXT: .LBB7_1: # %atomicrmw.start
+; RV64IA-NEXT: # =>This Loop Header: Depth=1
+; RV64IA-NEXT: # Child Loop BB7_3 Depth 2
+; RV64IA-NEXT: mv a3, a2
+; RV64IA-NEXT: sltu a2, a2, a1
+; RV64IA-NEXT: sub a4, a3, a1
+; RV64IA-NEXT: addi a2, a2, -1
+; RV64IA-NEXT: and a4, a2, a4
+; RV64IA-NEXT: .LBB7_3: # %atomicrmw.start
+; RV64IA-NEXT: # Parent Loop BB7_1 Depth=1
+; RV64IA-NEXT: # => This Inner Loop Header: Depth=2
+; RV64IA-NEXT: lr.d.aqrl a2, (a0)
+; RV64IA-NEXT: bne a2, a3, .LBB7_1
+; RV64IA-NEXT: # %bb.4: # %atomicrmw.start
+; RV64IA-NEXT: # in Loop: Header=BB7_3 Depth=2
+; RV64IA-NEXT: sc.d.rl a5, a4, (a0)
+; RV64IA-NEXT: bnez a5, .LBB7_3
+; RV64IA-NEXT: # %bb.5: # %atomicrmw.start
+; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..ec8ad74d69479
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,326 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=sparc -mcpu=v9 < %s | FileCheck %s
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i8:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: and %o0, -4, %o2
+; CHECK-NEXT: mov 3, %o3
+; CHECK-NEXT: andn %o3, %o0, %o0
+; CHECK-NEXT: sll %o0, 3, %o0
+; CHECK-NEXT: mov 255, %o3
+; CHECK-NEXT: ld [%o2], %o5
+; CHECK-NEXT: sll %o3, %o0, %o3
+; CHECK-NEXT: xor %o3, -1, %o3
+; CHECK-NEXT: and %o1, 255, %o4
+; CHECK-NEXT: .LBB0_1: ! %atomicrmw.start
+; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov %o5, %g2
+; CHECK-NEXT: srl %o5, %o0, %o5
+; CHECK-NEXT: and %o5, 255, %g3
+; CHECK-NEXT: sub %o5, %o1, %o5
+; CHECK-NEXT: cmp %g3, %o4
+; CHECK-NEXT: mov %o1, %g3
+; CHECK-NEXT: movcc %icc, %o5, %g3
+; CHECK-NEXT: and %g3, 255, %o5
+; CHECK-NEXT: sll %o5, %o0, %o5
+; CHECK-NEXT: and %g2, %o3, %g3
+; CHECK-NEXT: or %g3, %o5, %o5
+; CHECK-NEXT: cas [%o2], %g2, %o5
+; CHECK-NEXT: mov %g0, %g3
+; CHECK-NEXT: cmp %o5, %g2
+; CHECK-NEXT: move %icc, 1, %g3
+; CHECK-NEXT: cmp %g3, 1
+; CHECK-NEXT: bne %icc, .LBB0_1
+; CHECK-NEXT: nop
+; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
+; CHECK-NEXT: srl %o5, %o0, %o0
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: retl
+; CHECK-NEXT: nop
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i16:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: and %o0, -4, %o2
+; CHECK-NEXT: and %o0, 3, %o0
+; CHECK-NEXT: xor %o0, 2, %o0
+; CHECK-NEXT: sll %o0, 3, %o0
+; CHECK-NEXT: sethi 63, %o3
+; CHECK-NEXT: or %o3, 1023, %o3
+; CHECK-NEXT: ld [%o2], %g2
+; CHECK-NEXT: sll %o3, %o0, %o4
+; CHECK-NEXT: xor %o4, -1, %o4
+; CHECK-NEXT: and %o1, %o3, %o5
+; CHECK-NEXT: .LBB1_1: ! %atomicrmw.start
+; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov %g2, %g3
+; CHECK-NEXT: srl %g2, %o0, %g2
+; CHECK-NEXT: and %g2, %o3, %g4
+; CHECK-NEXT: sub %g2, %o1, %g2
+; CHECK-NEXT: cmp %g4, %o5
+; CHECK-NEXT: mov %o1, %g4
+; CHECK-NEXT: movcc %icc, %g2, %g4
+; CHECK-NEXT: and %g4, %o3, %g2
+; CHECK-NEXT: sll %g2, %o0, %g2
+; CHECK-NEXT: and %g3, %o4, %g4
+; CHECK-NEXT: or %g4, %g2, %g2
+; CHECK-NEXT: cas [%o2], %g3, %g2
+; CHECK-NEXT: mov %g0, %g4
+; CHECK-NEXT: cmp %g2, %g3
+; CHECK-NEXT: move %icc, 1, %g4
+; CHECK-NEXT: cmp %g4, 1
+; CHECK-NEXT: bne %icc, .LBB1_1
+; CHECK-NEXT: nop
+; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
+; CHECK-NEXT: srl %g2, %o0, %o0
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: retl
+; CHECK-NEXT: nop
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i32:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: ld [%o0], %o2
+; CHECK-NEXT: .LBB2_1: ! %atomicrmw.start
+; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov %o2, %o3
+; CHECK-NEXT: sub %o2, %o1, %o4
+; CHECK-NEXT: cmp %o2, %o1
+; CHECK-NEXT: mov %o1, %o2
+; CHECK-NEXT: movcc %icc, %o4, %o2
+; CHECK-NEXT: cas [%o0], %o3, %o2
+; CHECK-NEXT: mov %g0, %o4
+; CHECK-NEXT: cmp %o2, %o3
+; CHECK-NEXT: move %icc, 1, %o4
+; CHECK-NEXT: cmp %o4, 1
+; CHECK-NEXT: bne %icc, .LBB2_1
+; CHECK-NEXT: nop
+; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: retl
+; CHECK-NEXT: mov %o2, %o0
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i64:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: save %sp, -104, %sp
+; CHECK-NEXT: .cfi_def_cfa_register %fp
+; CHECK-NEXT: .cfi_window_save
+; CHECK-NEXT: .cfi_register %o7, %i7
+; CHECK-NEXT: ldd [%i0], %g2
+; CHECK-NEXT: add %fp, -8, %i3
+; CHECK-NEXT: mov 5, %i4
+; CHECK-NEXT: .LBB3_1: ! %atomicrmw.start
+; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov %g0, %i5
+; CHECK-NEXT: mov %g0, %g4
+; CHECK-NEXT: cmp %g2, %i1
+; CHECK-NEXT: movcc %icc, 1, %i5
+; CHECK-NEXT: cmp %g3, %i2
+; CHECK-NEXT: movcc %icc, 1, %g4
+; CHECK-NEXT: cmp %g2, %i1
+; CHECK-NEXT: move %icc, %g4, %i5
+; CHECK-NEXT: subcc %g3, %i2, %g4
+; CHECK-NEXT: subxcc %g2, %i1, %l0
+; CHECK-NEXT: cmp %i5, 0
+; CHECK-NEXT: mov %i1, %o2
+; CHECK-NEXT: movne %icc, %l0, %o2
+; CHECK-NEXT: mov %i2, %o3
+; CHECK-NEXT: movne %icc, %g4, %o3
+; CHECK-NEXT: std %g2, [%fp+-8]
+; CHECK-NEXT: mov %i0, %o0
+; CHECK-NEXT: mov %i3, %o1
+; CHECK-NEXT: mov %i4, %o4
+; CHECK-NEXT: call __atomic_compare_exchange_8
+; CHECK-NEXT: mov %i4, %o5
+; CHECK-NEXT: cmp %o0, 0
+; CHECK-NEXT: be %icc, .LBB3_1
+; CHECK-NEXT: ldd [%fp+-8], %g2
+; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
+; CHECK-NEXT: mov %g2, %i0
+; CHECK-NEXT: ret
+; CHECK-NEXT: restore %g0, %g3, %o1
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: and %o0, -4, %o2
+; CHECK-NEXT: mov 3, %o3
+; CHECK-NEXT: andn %o3, %o0, %o0
+; CHECK-NEXT: sll %o0, 3, %o0
+; CHECK-NEXT: mov 255, %o3
+; CHECK-NEXT: ld [%o2], %o5
+; CHECK-NEXT: sll %o3, %o0, %o3
+; CHECK-NEXT: xor %o3, -1, %o3
+; CHECK-NEXT: and %o1, 255, %o4
+; CHECK-NEXT: .LBB4_1: ! %atomicrmw.start
+; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov %g0, %g2
+; CHECK-NEXT: mov %o5, %g3
+; CHECK-NEXT: srl %o5, %o0, %o5
+; CHECK-NEXT: and %o5, 255, %g4
+; CHECK-NEXT: cmp %g4, %o4
+; CHECK-NEXT: sub %o5, %o1, %o5
+; CHECK-NEXT: movcc %icc, %o5, %g2
+; CHECK-NEXT: and %g2, 255, %o5
+; CHECK-NEXT: sll %o5, %o0, %o5
+; CHECK-NEXT: and %g3, %o3, %g2
+; CHECK-NEXT: or %g2, %o5, %o5
+; CHECK-NEXT: cas [%o2], %g3, %o5
+; CHECK-NEXT: mov %g0, %g2
+; CHECK-NEXT: cmp %o5, %g3
+; CHECK-NEXT: move %icc, 1, %g2
+; CHECK-NEXT: cmp %g2, 1
+; CHECK-NEXT: bne %icc, .LBB4_1
+; CHECK-NEXT: nop
+; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
+; CHECK-NEXT: srl %o5, %o0, %o0
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: retl
+; CHECK-NEXT: nop
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: save %sp, -96, %sp
+; CHECK-NEXT: .cfi_def_cfa_register %fp
+; CHECK-NEXT: .cfi_window_save
+; CHECK-NEXT: .cfi_register %o7, %i7
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: and %i0, -4, %i2
+; CHECK-NEXT: and %i0, 3, %i0
+; CHECK-NEXT: xor %i0, 2, %i0
+; CHECK-NEXT: sll %i0, 3, %i0
+; CHECK-NEXT: sethi 63, %i3
+; CHECK-NEXT: or %i3, 1023, %i3
+; CHECK-NEXT: ld [%i2], %g2
+; CHECK-NEXT: sll %i3, %i0, %i4
+; CHECK-NEXT: xor %i4, -1, %i4
+; CHECK-NEXT: and %i1, %i3, %i5
+; CHECK-NEXT: .LBB5_1: ! %atomicrmw.start
+; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov %g0, %g3
+; CHECK-NEXT: mov %g2, %g4
+; CHECK-NEXT: srl %g2, %i0, %g2
+; CHECK-NEXT: and %g2, %i3, %l0
+; CHECK-NEXT: cmp %l0, %i5
+; CHECK-NEXT: sub %g2, %i1, %g2
+; CHECK-NEXT: movcc %icc, %g2, %g3
+; CHECK-NEXT: and %g3, %i3, %g2
+; CHECK-NEXT: sll %g2, %i0, %g2
+; CHECK-NEXT: and %g4, %i4, %g3
+; CHECK-NEXT: or %g3, %g2, %g2
+; CHECK-NEXT: cas [%i2], %g4, %g2
+; CHECK-NEXT: mov %g0, %g3
+; CHECK-NEXT: cmp %g2, %g4
+; CHECK-NEXT: move %icc, 1, %g3
+; CHECK-NEXT: cmp %g3, 1
+; CHECK-NEXT: bne %icc, .LBB5_1
+; CHECK-NEXT: nop
+; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
+; CHECK-NEXT: srl %g2, %i0, %i0
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: ret
+; CHECK-NEXT: restore
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: ld [%o0], %o2
+; CHECK-NEXT: .LBB6_1: ! %atomicrmw.start
+; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov %o2, %o3
+; CHECK-NEXT: sub %o2, %o1, %o4
+; CHECK-NEXT: cmp %o2, %o1
+; CHECK-NEXT: mov %g0, %o2
+; CHECK-NEXT: movcc %icc, %o4, %o2
+; CHECK-NEXT: cas [%o0], %o3, %o2
+; CHECK-NEXT: mov %g0, %o4
+; CHECK-NEXT: cmp %o2, %o3
+; CHECK-NEXT: move %icc, 1, %o4
+; CHECK-NEXT: cmp %o4, 1
+; CHECK-NEXT: bne %icc, .LBB6_1
+; CHECK-NEXT: nop
+; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
+; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
+; CHECK-NEXT: retl
+; CHECK-NEXT: mov %o2, %o0
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: save %sp, -104, %sp
+; CHECK-NEXT: .cfi_def_cfa_register %fp
+; CHECK-NEXT: .cfi_window_save
+; CHECK-NEXT: .cfi_register %o7, %i7
+; CHECK-NEXT: ldd [%i0], %g2
+; CHECK-NEXT: add %fp, -8, %i3
+; CHECK-NEXT: mov 5, %i4
+; CHECK-NEXT: .LBB7_1: ! %atomicrmw.start
+; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov %g0, %i5
+; CHECK-NEXT: mov %g0, %g4
+; CHECK-NEXT: mov %g0, %o2
+; CHECK-NEXT: mov %g0, %o3
+; CHECK-NEXT: cmp %g2, %i1
+; CHECK-NEXT: movcc %icc, 1, %i5
+; CHECK-NEXT: cmp %g3, %i2
+; CHECK-NEXT: movcc %icc, 1, %g4
+; CHECK-NEXT: cmp %g2, %i1
+; CHECK-NEXT: move %icc, %g4, %i5
+; CHECK-NEXT: subcc %g3, %i2, %g4
+; CHECK-NEXT: subxcc %g2, %i1, %l0
+; CHECK-NEXT: cmp %i5, 0
+; CHECK-NEXT: movne %icc, %l0, %o2
+; CHECK-NEXT: movne %icc, %g4, %o3
+; CHECK-NEXT: std %g2, [%fp+-8]
+; CHECK-NEXT: mov %i0, %o0
+; CHECK-NEXT: mov %i3, %o1
+; CHECK-NEXT: mov %i4, %o4
+; CHECK-NEXT: call __atomic_compare_exchange_8
+; CHECK-NEXT: mov %i4, %o5
+; CHECK-NEXT: cmp %o0, 0
+; CHECK-NEXT: be %icc, .LBB7_1
+; CHECK-NEXT: ldd [%fp+-8], %g2
+; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
+; CHECK-NEXT: mov %g2, %i0
+; CHECK-NEXT: ret
+; CHECK-NEXT: restore %g0, %g3, %o1
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/CodeGen/VE/Scalar/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/VE/Scalar/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..85774dde7bb39
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Scalar/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,240 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=ve-unknown-unknown < %s | FileCheck %s
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: and %s2, -4, %s0
+; CHECK-NEXT: and %s0, 3, %s0
+; CHECK-NEXT: sla.w.sx %s0, %s0, 3
+; CHECK-NEXT: sla.w.sx %s3, (56)0, %s0
+; CHECK-NEXT: ldl.sx %s5, (, %s2)
+; CHECK-NEXT: xor %s3, -1, %s3
+; CHECK-NEXT: and %s3, %s3, (32)0
+; CHECK-NEXT: and %s4, %s1, (56)0
+; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or %s6, 0, %s5
+; CHECK-NEXT: and %s5, %s6, (32)0
+; CHECK-NEXT: srl %s5, %s5, %s0
+; CHECK-NEXT: and %s7, %s5, (56)0
+; CHECK-NEXT: subs.w.sx %s5, %s5, %s1
+; CHECK-NEXT: cmpu.w %s7, %s7, %s4
+; CHECK-NEXT: or %s34, 0, %s1
+; CHECK-NEXT: cmov.w.ge %s34, %s5, %s7
+; CHECK-NEXT: and %s5, %s34, (56)0
+; CHECK-NEXT: sla.w.sx %s5, %s5, %s0
+; CHECK-NEXT: and %s7, %s6, %s3
+; CHECK-NEXT: or %s5, %s7, %s5
+; CHECK-NEXT: cas.w %s5, (%s2), %s6
+; CHECK-NEXT: brne.w %s5, %s6, .LBB0_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: and %s1, %s5, (32)0
+; CHECK-NEXT: srl %s0, %s1, %s0
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: b.l.t (, %s10)
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: and %s2, -4, %s0
+; CHECK-NEXT: and %s0, 3, %s0
+; CHECK-NEXT: sla.w.sx %s0, %s0, 3
+; CHECK-NEXT: sla.w.sx %s3, (48)0, %s0
+; CHECK-NEXT: ldl.sx %s5, (, %s2)
+; CHECK-NEXT: xor %s3, -1, %s3
+; CHECK-NEXT: and %s3, %s3, (32)0
+; CHECK-NEXT: and %s4, %s1, (48)0
+; CHECK-NEXT: .LBB1_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or %s6, 0, %s5
+; CHECK-NEXT: and %s5, %s6, (32)0
+; CHECK-NEXT: srl %s5, %s5, %s0
+; CHECK-NEXT: and %s7, %s5, (48)0
+; CHECK-NEXT: subs.w.sx %s5, %s5, %s1
+; CHECK-NEXT: cmpu.w %s7, %s7, %s4
+; CHECK-NEXT: or %s34, 0, %s1
+; CHECK-NEXT: cmov.w.ge %s34, %s5, %s7
+; CHECK-NEXT: and %s5, %s34, (48)0
+; CHECK-NEXT: sla.w.sx %s5, %s5, %s0
+; CHECK-NEXT: and %s7, %s6, %s3
+; CHECK-NEXT: or %s5, %s7, %s5
+; CHECK-NEXT: cas.w %s5, (%s2), %s6
+; CHECK-NEXT: brne.w %s5, %s6, .LBB1_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: and %s1, %s5, (32)0
+; CHECK-NEXT: srl %s0, %s1, %s0
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: b.l.t (, %s10)
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: ldl.sx %s2, (, %s0)
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or %s3, 0, %s2
+; CHECK-NEXT: subs.w.sx %s4, %s2, %s1
+; CHECK-NEXT: cmpu.w %s5, %s2, %s1
+; CHECK-NEXT: or %s2, 0, %s1
+; CHECK-NEXT: cmov.w.ge %s2, %s4, %s5
+; CHECK-NEXT: cas.w %s2, (%s0), %s3
+; CHECK-NEXT: brne.w %s2, %s3, .LBB2_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: or %s0, 0, %s2
+; CHECK-NEXT: b.l.t (, %s10)
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: ld %s2, (, %s0)
+; CHECK-NEXT: .LBB3_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or %s3, 0, %s2
+; CHECK-NEXT: subs.l %s4, %s2, %s1
+; CHECK-NEXT: cmpu.l %s5, %s2, %s1
+; CHECK-NEXT: or %s2, 0, %s1
+; CHECK-NEXT: cmov.l.ge %s2, %s4, %s5
+; CHECK-NEXT: cas.l %s2, (%s0), %s3
+; CHECK-NEXT: brne.l %s2, %s3, .LBB3_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: or %s0, 0, %s2
+; CHECK-NEXT: b.l.t (, %s10)
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: and %s2, -4, %s0
+; CHECK-NEXT: and %s0, 3, %s0
+; CHECK-NEXT: sla.w.sx %s0, %s0, 3
+; CHECK-NEXT: sla.w.sx %s3, (56)0, %s0
+; CHECK-NEXT: ldl.sx %s5, (, %s2)
+; CHECK-NEXT: xor %s3, -1, %s3
+; CHECK-NEXT: and %s3, %s3, (32)0
+; CHECK-NEXT: and %s4, %s1, (56)0
+; CHECK-NEXT: .LBB4_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or %s6, 0, %s5
+; CHECK-NEXT: and %s5, %s6, (32)0
+; CHECK-NEXT: srl %s5, %s5, %s0
+; CHECK-NEXT: and %s7, %s5, (56)0
+; CHECK-NEXT: subs.w.sx %s5, %s5, %s1
+; CHECK-NEXT: cmpu.w %s7, %s7, %s4
+; CHECK-NEXT: cmov.w.lt %s5, (0)1, %s7
+; CHECK-NEXT: and %s5, %s5, (56)0
+; CHECK-NEXT: sla.w.sx %s5, %s5, %s0
+; CHECK-NEXT: and %s7, %s6, %s3
+; CHECK-NEXT: or %s5, %s7, %s5
+; CHECK-NEXT: cas.w %s5, (%s2), %s6
+; CHECK-NEXT: brne.w %s5, %s6, .LBB4_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: and %s1, %s5, (32)0
+; CHECK-NEXT: srl %s0, %s1, %s0
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: b.l.t (, %s10)
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: and %s2, -4, %s0
+; CHECK-NEXT: and %s0, 3, %s0
+; CHECK-NEXT: sla.w.sx %s0, %s0, 3
+; CHECK-NEXT: sla.w.sx %s3, (48)0, %s0
+; CHECK-NEXT: ldl.sx %s5, (, %s2)
+; CHECK-NEXT: xor %s3, -1, %s3
+; CHECK-NEXT: and %s3, %s3, (32)0
+; CHECK-NEXT: and %s4, %s1, (48)0
+; CHECK-NEXT: .LBB5_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or %s6, 0, %s5
+; CHECK-NEXT: and %s5, %s6, (32)0
+; CHECK-NEXT: srl %s5, %s5, %s0
+; CHECK-NEXT: and %s7, %s5, (48)0
+; CHECK-NEXT: subs.w.sx %s5, %s5, %s1
+; CHECK-NEXT: cmpu.w %s7, %s7, %s4
+; CHECK-NEXT: cmov.w.lt %s5, (0)1, %s7
+; CHECK-NEXT: and %s5, %s5, (48)0
+; CHECK-NEXT: sla.w.sx %s5, %s5, %s0
+; CHECK-NEXT: and %s7, %s6, %s3
+; CHECK-NEXT: or %s5, %s7, %s5
+; CHECK-NEXT: cas.w %s5, (%s2), %s6
+; CHECK-NEXT: brne.w %s5, %s6, .LBB5_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: and %s1, %s5, (32)0
+; CHECK-NEXT: srl %s0, %s1, %s0
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: b.l.t (, %s10)
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: ldl.sx %s2, (, %s0)
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or %s3, 0, %s2
+; CHECK-NEXT: subs.w.sx %s2, %s2, %s1
+; CHECK-NEXT: cmpu.w %s4, %s3, %s1
+; CHECK-NEXT: cmov.w.lt %s2, (0)1, %s4
+; CHECK-NEXT: cas.w %s2, (%s0), %s3
+; CHECK-NEXT: brne.w %s2, %s3, .LBB6_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: or %s0, 0, %s2
+; CHECK-NEXT: b.l.t (, %s10)
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: ld %s2, (, %s0)
+; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or %s3, 0, %s2
+; CHECK-NEXT: subs.l %s2, %s2, %s1
+; CHECK-NEXT: cmpu.l %s4, %s3, %s1
+; CHECK-NEXT: cmov.l.lt %s2, (0)1, %s4
+; CHECK-NEXT: cas.l %s2, (%s0), %s3
+; CHECK-NEXT: brne.l %s2, %s3, .LBB7_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: fencem 3
+; CHECK-NEXT: or %s0, 0, %s2
+; CHECK-NEXT: b.l.t (, %s10)
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/CodeGen/WebAssembly/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/WebAssembly/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..e634b332c588b
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,355 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=wasm32-unknown-unknown < %s | FileCheck -check-prefix=WASM32 %s
+; RUN: llc -mtriple=wasm64-unknown-unknown < %s | FileCheck -check-prefix=WASM64 %s
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; WASM32-LABEL: atomicrmw_cond_sub_i8:
+; WASM32: .functype atomicrmw_cond_sub_i8 (i32, i32) -> (i32)
+; WASM32-NEXT: .local i32
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: i32.load8_u 0
+; WASM32-NEXT: local.tee 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.sub
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.const 255
+; WASM32-NEXT: i32.and
+; WASM32-NEXT: i32.ge_u
+; WASM32-NEXT: i32.select
+; WASM32-NEXT: i32.store8 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: atomicrmw_cond_sub_i8:
+; WASM64: .functype atomicrmw_cond_sub_i8 (i64, i32) -> (i32)
+; WASM64-NEXT: .local i32
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: i32.load8_u 0
+; WASM64-NEXT: local.tee 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.sub
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.const 255
+; WASM64-NEXT: i32.and
+; WASM64-NEXT: i32.ge_u
+; WASM64-NEXT: i32.select
+; WASM64-NEXT: i32.store8 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: # fallthrough-return
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; WASM32-LABEL: atomicrmw_cond_sub_i16:
+; WASM32: .functype atomicrmw_cond_sub_i16 (i32, i32) -> (i32)
+; WASM32-NEXT: .local i32
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: i32.load16_u 0
+; WASM32-NEXT: local.tee 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.sub
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.const 65535
+; WASM32-NEXT: i32.and
+; WASM32-NEXT: i32.ge_u
+; WASM32-NEXT: i32.select
+; WASM32-NEXT: i32.store16 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: atomicrmw_cond_sub_i16:
+; WASM64: .functype atomicrmw_cond_sub_i16 (i64, i32) -> (i32)
+; WASM64-NEXT: .local i32
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: i32.load16_u 0
+; WASM64-NEXT: local.tee 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.sub
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.const 65535
+; WASM64-NEXT: i32.and
+; WASM64-NEXT: i32.ge_u
+; WASM64-NEXT: i32.select
+; WASM64-NEXT: i32.store16 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: # fallthrough-return
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; WASM32-LABEL: atomicrmw_cond_sub_i32:
+; WASM32: .functype atomicrmw_cond_sub_i32 (i32, i32) -> (i32)
+; WASM32-NEXT: .local i32
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: i32.load 0
+; WASM32-NEXT: local.tee 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.sub
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.ge_u
+; WASM32-NEXT: i32.select
+; WASM32-NEXT: i32.store 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: atomicrmw_cond_sub_i32:
+; WASM64: .functype atomicrmw_cond_sub_i32 (i64, i32) -> (i32)
+; WASM64-NEXT: .local i32
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: i32.load 0
+; WASM64-NEXT: local.tee 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.sub
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.ge_u
+; WASM64-NEXT: i32.select
+; WASM64-NEXT: i32.store 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: # fallthrough-return
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; WASM32-LABEL: atomicrmw_cond_sub_i64:
+; WASM32: .functype atomicrmw_cond_sub_i64 (i32, i64) -> (i64)
+; WASM32-NEXT: .local i64
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: i64.load 0
+; WASM32-NEXT: local.tee 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i64.sub
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i64.ge_u
+; WASM32-NEXT: i64.select
+; WASM32-NEXT: i64.store 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: atomicrmw_cond_sub_i64:
+; WASM64: .functype atomicrmw_cond_sub_i64 (i64, i64) -> (i64)
+; WASM64-NEXT: .local i64
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: i64.load 0
+; WASM64-NEXT: local.tee 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i64.sub
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i64.ge_u
+; WASM64-NEXT: i64.select
+; WASM64-NEXT: i64.store 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: # fallthrough-return
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; WASM32-LABEL: atomicrmw_sub_clamp_i8:
+; WASM32: .functype atomicrmw_sub_clamp_i8 (i32, i32) -> (i32)
+; WASM32-NEXT: .local i32
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: i32.load8_u 0
+; WASM32-NEXT: local.tee 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.sub
+; WASM32-NEXT: i32.const 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.const 255
+; WASM32-NEXT: i32.and
+; WASM32-NEXT: i32.ge_u
+; WASM32-NEXT: i32.select
+; WASM32-NEXT: i32.store8 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: atomicrmw_sub_clamp_i8:
+; WASM64: .functype atomicrmw_sub_clamp_i8 (i64, i32) -> (i32)
+; WASM64-NEXT: .local i32
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: i32.load8_u 0
+; WASM64-NEXT: local.tee 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.sub
+; WASM64-NEXT: i32.const 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.const 255
+; WASM64-NEXT: i32.and
+; WASM64-NEXT: i32.ge_u
+; WASM64-NEXT: i32.select
+; WASM64-NEXT: i32.store8 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: # fallthrough-return
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; WASM32-LABEL: atomicrmw_sub_clamp_i16:
+; WASM32: .functype atomicrmw_sub_clamp_i16 (i32, i32) -> (i32)
+; WASM32-NEXT: .local i32
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: i32.load16_u 0
+; WASM32-NEXT: local.tee 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.sub
+; WASM32-NEXT: i32.const 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.const 65535
+; WASM32-NEXT: i32.and
+; WASM32-NEXT: i32.ge_u
+; WASM32-NEXT: i32.select
+; WASM32-NEXT: i32.store16 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: atomicrmw_sub_clamp_i16:
+; WASM64: .functype atomicrmw_sub_clamp_i16 (i64, i32) -> (i32)
+; WASM64-NEXT: .local i32
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: i32.load16_u 0
+; WASM64-NEXT: local.tee 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.sub
+; WASM64-NEXT: i32.const 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.const 65535
+; WASM64-NEXT: i32.and
+; WASM64-NEXT: i32.ge_u
+; WASM64-NEXT: i32.select
+; WASM64-NEXT: i32.store16 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: # fallthrough-return
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; WASM32-LABEL: atomicrmw_sub_clamp_i32:
+; WASM32: .functype atomicrmw_sub_clamp_i32 (i32, i32) -> (i32)
+; WASM32-NEXT: .local i32
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: i32.load 0
+; WASM32-NEXT: local.tee 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.sub
+; WASM32-NEXT: i32.const 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i32.ge_u
+; WASM32-NEXT: i32.select
+; WASM32-NEXT: i32.store 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: atomicrmw_sub_clamp_i32:
+; WASM64: .functype atomicrmw_sub_clamp_i32 (i64, i32) -> (i32)
+; WASM64-NEXT: .local i32
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: i32.load 0
+; WASM64-NEXT: local.tee 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.sub
+; WASM64-NEXT: i32.const 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i32.ge_u
+; WASM64-NEXT: i32.select
+; WASM64-NEXT: i32.store 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: # fallthrough-return
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; WASM32-LABEL: atomicrmw_sub_clamp_i64:
+; WASM32: .functype atomicrmw_sub_clamp_i64 (i32, i64) -> (i64)
+; WASM32-NEXT: .local i64
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: i64.load 0
+; WASM32-NEXT: local.tee 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i64.sub
+; WASM32-NEXT: i64.const 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: i64.ge_u
+; WASM32-NEXT: i64.select
+; WASM32-NEXT: i64.store 0
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: atomicrmw_sub_clamp_i64:
+; WASM64: .functype atomicrmw_sub_clamp_i64 (i64, i64) -> (i64)
+; WASM64-NEXT: .local i64
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: i64.load 0
+; WASM64-NEXT: local.tee 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i64.sub
+; WASM64-NEXT: i64.const 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: i64.ge_u
+; WASM64-NEXT: i64.select
+; WASM64-NEXT: i64.store 0
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: # fallthrough-return
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
new file mode 100644
index 0000000000000..5c49107e0fdef
--- /dev/null
+++ b/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
@@ -0,0 +1,153 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple x86_64-pc-linux < %s | FileCheck %s
+
+define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movzbl (%rdi), %eax
+; CHECK-NEXT: movzbl %sil, %ecx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %edx
+; CHECK-NEXT: subb %cl, %dl
+; CHECK-NEXT: movzbl %dl, %edx
+; CHECK-NEXT: cmovbl %ecx, %edx
+; CHECK-NEXT: lock cmpxchgb %dl, (%rdi)
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: retq
+ %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movzwl (%rdi), %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB1_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: subw %si, %cx
+; CHECK-NEXT: cmovbl %esi, %ecx
+; CHECK-NEXT: lock cmpxchgw %cx, (%rdi)
+; CHECK-NEXT: jne .LBB1_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: retq
+ %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: subl %esi, %ecx
+; CHECK-NEXT: cmovbl %esi, %ecx
+; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-NEXT: jne .LBB2_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: retq
+ %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_cond_sub_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB3_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq %rax, %rcx
+; CHECK-NEXT: subq %rsi, %rcx
+; CHECK-NEXT: cmovbq %rsi, %rcx
+; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi)
+; CHECK-NEXT: jne .LBB3_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: retq
+ %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
+
+define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movzbl (%rdi), %eax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB4_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %edx
+; CHECK-NEXT: subb %sil, %dl
+; CHECK-NEXT: movzbl %dl, %edx
+; CHECK-NEXT: cmovbl %ecx, %edx
+; CHECK-NEXT: lock cmpxchgb %dl, (%rdi)
+; CHECK-NEXT: jne .LBB4_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: retq
+ %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ ret i8 %result
+}
+
+define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movzwl (%rdi), %eax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB5_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %edx
+; CHECK-NEXT: subw %si, %dx
+; CHECK-NEXT: cmovbl %ecx, %edx
+; CHECK-NEXT: lock cmpxchgw %dx, (%rdi)
+; CHECK-NEXT: jne .LBB5_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: retq
+ %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ ret i16 %result
+}
+
+define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %edx
+; CHECK-NEXT: subl %esi, %edx
+; CHECK-NEXT: cmovbl %ecx, %edx
+; CHECK-NEXT: lock cmpxchgl %edx, (%rdi)
+; CHECK-NEXT: jne .LBB6_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: retq
+ %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ ret i32 %result
+}
+
+define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq %rax, %rdx
+; CHECK-NEXT: subq %rsi, %rdx
+; CHECK-NEXT: cmovbq %rcx, %rdx
+; CHECK-NEXT: lock cmpxchgq %rdx, (%rdi)
+; CHECK-NEXT: jne .LBB7_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: retq
+ %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ ret i64 %result
+}
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td
index bb1acde296429..784b666e068f5 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td
@@ -85,13 +85,13 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
// CHECK-NEXT: constexpr static uint8_t MatchTable0[] = {
-// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(94), GIMT_Encode2(194), /*)*//*default:*//*Label 4*/ GIMT_Encode4(464),
-// CHECK-NEXT: /*TargetOpcode::G_STORE*//*Label 0*/ GIMT_Encode4(410), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_SEXT*//*Label 1*/ GIMT_Encode4(428), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_FNEG*//*Label 2*/ GIMT_Encode4(440), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_FABS*//*Label 3*/ GIMT_Encode4(452),
-// CHECK-NEXT: // Label 0: @410
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 5*/ GIMT_Encode4(427), // Rule ID 2 //
+// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(94), GIMT_Encode2(196), /*)*//*default:*//*Label 4*/ GIMT_Encode4(472),
+// CHECK-NEXT: /*TargetOpcode::G_STORE*//*Label 0*/ GIMT_Encode4(418), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_SEXT*//*Label 1*/ GIMT_Encode4(436), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_FNEG*//*Label 2*/ GIMT_Encode4(448), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_FABS*//*Label 3*/ GIMT_Encode4(460),
+// CHECK-NEXT: // Label 0: @418
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 5*/ GIMT_Encode4(435), // Rule ID 2 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule2Enabled),
// CHECK-NEXT: // MIs[0] x
// CHECK-NEXT: // No operand predicates
@@ -101,10 +101,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_GICombiner1),
// CHECK-NEXT: // Combiner Rule #2: TwoMatchNoApply
// CHECK-NEXT: GIR_EraseRootFromParent_Done,
-// CHECK-NEXT: // Label 5: @427
+// CHECK-NEXT: // Label 5: @435
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 1: @428
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 6*/ GIMT_Encode4(439), // Rule ID 3 //
+// CHECK-NEXT: // Label 1: @436
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 6*/ GIMT_Encode4(447), // Rule ID 3 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule3Enabled),
// CHECK-NEXT: // MIs[0] a
// CHECK-NEXT: // No operand predicates
@@ -112,10 +112,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: // No operand predicates
// CHECK-NEXT: // Combiner Rule #3: NoMatchTwoApply
// CHECK-NEXT: GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner2),
-// CHECK-NEXT: // Label 6: @439
+// CHECK-NEXT: // Label 6: @447
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 2: @440
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(451), // Rule ID 1 //
+// CHECK-NEXT: // Label 2: @448
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(459), // Rule ID 1 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled),
// CHECK-NEXT: // MIs[0] a
// CHECK-NEXT: // No operand predicates
@@ -123,10 +123,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: // No operand predicates
// CHECK-NEXT: // Combiner Rule #1: TwoMatchTwoApply
// CHECK-NEXT: GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner1),
-// CHECK-NEXT: // Label 7: @451
+// CHECK-NEXT: // Label 7: @459
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 3: @452
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(463), // Rule ID 0 //
+// CHECK-NEXT: // Label 3: @460
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(471), // Rule ID 0 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule0Enabled),
// CHECK-NEXT: // MIs[0] a
// CHECK-NEXT: // No operand predicates
@@ -134,10 +134,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: // No operand predicates
// CHECK-NEXT: // Combiner Rule #0: OneMatchOneApply
// CHECK-NEXT: GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner0),
-// CHECK-NEXT: // Label 8: @463
+// CHECK-NEXT: // Label 8: @471
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 4: @464
+// CHECK-NEXT: // Label 4: @472
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: }; // Size: 465 bytes
+// CHECK-NEXT: }; // Size: 473 bytes
// CHECK-NEXT: return MatchTable0;
// CHECK-NEXT: }
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
index 513a86754d6d9..6bfb6c0b2e677 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
@@ -135,15 +135,15 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// Verify match table.
// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
// CHECK-NEXT: constexpr static uint8_t MatchTable0[] = {
-// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(19), GIMT_Encode2(133), /*)*//*default:*//*Label 6*/ GIMT_Encode4(653),
-// CHECK-NEXT: /*TargetOpcode::COPY*//*Label 0*/ GIMT_Encode4(466), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_AND*//*Label 1*/ GIMT_Encode4(502), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_STORE*//*Label 2*/ GIMT_Encode4(549), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_TRUNC*//*Label 3*/ GIMT_Encode4(583), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_SEXT*//*Label 4*/ GIMT_Encode4(606), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_ZEXT*//*Label 5*/ GIMT_Encode4(618),
-// CHECK-NEXT: // Label 0: @466
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(490), // Rule ID 4 //
+// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(19), GIMT_Encode2(135), /*)*//*default:*//*Label 6*/ GIMT_Encode4(661),
+// CHECK-NEXT: /*TargetOpcode::COPY*//*Label 0*/ GIMT_Encode4(474), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_AND*//*Label 1*/ GIMT_Encode4(510), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_STORE*//*Label 2*/ GIMT_Encode4(557), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_TRUNC*//*Label 3*/ GIMT_Encode4(591), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_SEXT*//*Label 4*/ GIMT_Encode4(614), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_ZEXT*//*Label 5*/ GIMT_Encode4(626),
+// CHECK-NEXT: // Label 0: @474
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(498), // Rule ID 4 //
// CHECK-NEXT: GIM_CheckFeatures, GIMT_Encode2(GIFBS_HasAnswerToEverything),
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule3Enabled),
// CHECK-NEXT: // MIs[0] a
@@ -156,8 +156,8 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIM_CheckIsSafeToFold, /*NumInsns*/1,
// CHECK-NEXT: // Combiner Rule #3: InstTest1
// CHECK-NEXT: GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner2),
-// CHECK-NEXT: // Label 7: @490
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(501), // Rule ID 3 //
+// CHECK-NEXT: // Label 7: @498
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(509), // Rule ID 3 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule2Enabled),
// CHECK-NEXT: // MIs[0] a
// CHECK-NEXT: // No operand predicates
@@ -165,10 +165,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: // No operand predicates
// CHECK-NEXT: // Combiner Rule #2: InstTest0
// CHECK-NEXT: GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner1),
-// CHECK-NEXT: // Label 8: @501
+// CHECK-NEXT: // Label 8: @509
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 1: @502
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 9*/ GIMT_Encode4(548), // Rule ID 6 //
+// CHECK-NEXT: // Label 1: @510
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 9*/ GIMT_Encode4(556), // Rule ID 6 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule5Enabled),
// CHECK-NEXT: GIM_RootCheckType, /*Op*/2, /*Type*/GILLT_s32,
// CHECK-NEXT: // MIs[0] dst
@@ -185,10 +185,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_RootToRootCopy, /*OpIdx*/0, // dst
// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/1, // z
// CHECK-NEXT: GIR_EraseRootFromParent_Done,
-// CHECK-NEXT: // Label 9: @548
+// CHECK-NEXT: // Label 9: @556
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 2: @549
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 10*/ GIMT_Encode4(582), // Rule ID 5 //
+// CHECK-NEXT: // Label 2: @557
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 10*/ GIMT_Encode4(590), // Rule ID 5 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule4Enabled),
// CHECK-NEXT: // MIs[0] tmp
// CHECK-NEXT: GIM_RecordInsnIgnoreCopies, /*DefineMI*/1, /*MI*/0, /*OpIdx*/0, // MIs[1]
@@ -204,29 +204,29 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_RootToRootCopy, /*OpIdx*/1, // ptr
// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0, /*NumInsns*/2, /*MergeInsnID's*/0, 1,
// CHECK-NEXT: GIR_EraseRootFromParent_Done,
-// CHECK-NEXT: // Label 10: @582
+// CHECK-NEXT: // Label 10: @590
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 3: @583
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 11*/ GIMT_Encode4(594), // Rule ID 0 //
+// CHECK-NEXT: // Label 3: @591
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 11*/ GIMT_Encode4(602), // Rule ID 0 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule0Enabled),
// CHECK-NEXT: // Combiner Rule #0: WipOpcodeTest0; wip_match_opcode 'G_TRUNC'
// CHECK-NEXT: GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner0),
-// CHECK-NEXT: // Label 11: @594
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 12*/ GIMT_Encode4(605), // Rule ID 1 //
+// CHECK-NEXT: // Label 11: @602
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 12*/ GIMT_Encode4(613), // Rule ID 1 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled),
// CHECK-NEXT: // Combiner Rule #1: WipOpcodeTest1; wip_match_opcode 'G_TRUNC'
// CHECK-NEXT: GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner0),
-// CHECK-NEXT: // Label 12: @605
+// CHECK-NEXT: // Label 12: @613
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 4: @606
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 13*/ GIMT_Encode4(617), // Rule ID 2 //
+// CHECK-NEXT: // Label 4: @614
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 13*/ GIMT_Encode4(625), // Rule ID 2 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled),
// CHECK-NEXT: // Combiner Rule #1: WipOpcodeTest1; wip_match_opcode 'G_SEXT'
// CHECK-NEXT: GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner0),
-// CHECK-NEXT: // Label 13: @617
+// CHECK-NEXT: // Label 13: @625
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 5: @618
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 14*/ GIMT_Encode4(652), // Rule ID 7 //
+// CHECK-NEXT: // Label 5: @626
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 14*/ GIMT_Encode4(660), // Rule ID 7 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule6Enabled),
// CHECK-NEXT: // MIs[0] dst
// CHECK-NEXT: // No operand predicates
@@ -240,10 +240,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_RootToRootCopy, /*OpIdx*/0, // dst
// CHECK-NEXT: GIR_AddSimpleTempRegister, /*InsnID*/0, /*TempRegID*/0,
// CHECK-NEXT: GIR_EraseRootFromParent_Done,
-// CHECK-NEXT: // Label 14: @652
+// CHECK-NEXT: // Label 14: @660
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 6: @653
+// CHECK-NEXT: // Label 6: @661
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: }; // Size: 654 bytes
+// CHECK-NEXT: }; // Size: 662 bytes
// CHECK-NEXT: return MatchTable0;
// CHECK-NEXT: }
diff --git a/llvm/test/TableGen/GlobalISelEmitter.td b/llvm/test/TableGen/GlobalISelEmitter.td
index 5d5bf92664a79..e24e88993e2f5 100644
--- a/llvm/test/TableGen/GlobalISelEmitter.td
+++ b/llvm/test/TableGen/GlobalISelEmitter.td
@@ -513,7 +513,7 @@ def : Pat<(frag GPR32:$src1, complex:$src2, complex:$src3),
// R00O-NEXT: GIM_Reject,
// R00O: // Label [[DEFAULT_NUM]]: @[[DEFAULT]]
// R00O-NEXT: GIM_Reject,
-// R00O-NEXT: }; // Size: 1804 bytes
+// R00O-NEXT: }; // Size: 1812 bytes
def INSNBOB : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3, GPR32:$src4),
[(set GPR32:$dst,
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index b8196cfcc3510..44a9790129510 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -1366,6 +1366,364 @@ define i16 @test_atomicrmw_add_i16_buffer_fat_agent_align4(ptr addrspace(7) %ptr
ret i16 %res
}
+define i16 @test_atomicrmw_cond_sub_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_global_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP5:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i16 [[TMP7]], i16 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_cond_sub_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_global_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP3:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[TMP5]], i16 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_cond_sub_i16_local(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_local(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP5:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i16 [[TMP7]], i16 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_cond_sub_i16_local_align4(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_local_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP3:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[TMP5]], i16 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_cond_sub_i16_flat_agent(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_flat_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP5:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i16 [[TMP7]], i16 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw cond_sub ptr %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_cond_sub_i16_flat_agent_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_flat_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP3:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[TMP5]], i16 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw cond_sub ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_sub_clamp_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_global_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i16 [[TMP5]], i16 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_sub_clamp_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_global_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i16 [[TMP3]], i16 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_sub_clamp_i16_local(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_local(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i16 [[TMP5]], i16 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i16 %value seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_sub_clamp_i16_local_align4(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_local_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i16 [[TMP3]], i16 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i16 %value seq_cst, align 4
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_sub_clamp_i16_flat_agent(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_flat_agent(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i16 [[TMP5]], i16 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED3]]
+;
+ %res = atomicrmw sub_clamp ptr %ptr, i16 %value syncscope("agent") seq_cst
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_sub_clamp_i16_flat_agent_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_flat_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i16 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i16 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i16 [[TMP3]], i16 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -65536
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED1]]
+;
+ %res = atomicrmw sub_clamp ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ ret i16 %res
+}
+
!0 = !{}
!1 = !{!"foo", !"bar"}
!2 = !{!3}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
index 590ee63001615..f5158e43861bc 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
@@ -1712,3 +1712,801 @@ define i8 @test_atomicrmw_add_i8_buffer_fat_agent_align4(ptr addrspace(7) %ptr,
%res = atomicrmw add ptr addrspace(7) %ptr, i8 %value syncscope("agent") seq_cst, align 4
ret i8 %res
}
+
+define i8 @test_atomicrmw_cond_sub_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_cond_sub_i8_global_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_cond_sub_i8_global_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_cond_sub_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_cond_sub_i8_global_agent_align2(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_cond_sub_i8_global_agent_align2(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_cond_sub_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_global_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP3:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[TMP5]], i8 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_cond_sub_i8_local(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_local(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i8 %value seq_cst
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_cond_sub_i8_local_align2(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_local_align2(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i8 %value seq_cst, align 2
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_cond_sub_i8_local_align4(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_local_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP3:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[TMP5]], i8 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i8 %value seq_cst, align 4
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_cond_sub_i8_flat_agent(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw cond_sub ptr %ptr, i8 %value syncscope("agent") seq_cst
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_cond_sub_i8_flat_agent_align2(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent_align2(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent_align2(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP5:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP7:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i8 [[TMP7]], i8 [[VALUE]]
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw cond_sub ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_cond_sub_i8_flat_agent_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP3:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[TMP5]], i8 [[VALUE]]
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw cond_sub ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_sub_clamp_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_sub_clamp_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent_align2(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent_align2(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_sub_clamp_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i8 [[TMP3]], i8 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_sub_clamp_i8_local(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_local(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i8 %value seq_cst
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_sub_clamp_i8_local_align2(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_local_align2(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(3) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(3) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i8 %value seq_cst, align 2
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_sub_clamp_i8_local_align4(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_local_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i8 [[TMP3]], i8 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(3) [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i8 %value seq_cst, align 4
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_sub_clamp_i8_flat_agent(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw sub_clamp ptr %ptr, i8 %value syncscope("agent") seq_cst
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_sub_clamp_i8_flat_agent_align2(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent_align2(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; GCN-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; GCN-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; GCN-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; GCN-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED3]]
+;
+; R600-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent_align2(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: [[TMP4:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; R600-NEXT: [[TMP5:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; R600-NEXT: [[NEW:%.*]] = select i1 [[TMP4]], i8 [[TMP5]], i8 0
+; R600-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; R600-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
+; R600-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED3]]
+;
+ %res = atomicrmw sub_clamp ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ ret i8 %res
+ }
+
+define i8 @test_atomicrmw_sub_clamp_i8_flat_agent_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent_align4(
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[LOADED]] to i8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[EXTRACTED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i8 [[EXTRACTED]], [[VALUE]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i8 [[TMP3]], i8 0
+; CHECK-NEXT: [[EXTENDED:%.*]] = zext i8 [[NEW]] to i32
+; CHECK-NEXT: [[UNMASKED:%.*]] = and i32 [[LOADED]], -256
+; CHECK-NEXT: [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[EXTENDED]]
+; CHECK-NEXT: [[TMP6:%.*]] = cmpxchg ptr [[PTR]], i32 [[LOADED]], i32 [[INSERTED]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
+; CHECK-NEXT: ret i8 [[EXTRACTED1]]
+;
+ %res = atomicrmw sub_clamp ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ ret i8 %res
+}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
index f8e85004d5f93..b8dcc4f0d2939 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
@@ -106,6 +106,10 @@ def AtomicBinOpUIncWrap : LLVM_EnumAttrCase<"uinc_wrap",
"uinc_wrap", "UIncWrap", 15>;
def AtomicBinOpUDecWrap : LLVM_EnumAttrCase<"udec_wrap",
"udec_wrap", "UDecWrap", 16>;
+def AtomicBinOpCondSub : LLVM_EnumAttrCase<"cond_sub",
+ "cond_sub", "CondSub", 17>;
+def AtomicBinOpSubClamp : LLVM_EnumAttrCase<"sub_clamp",
+ "sub_clamp", "SubClamp", 18>;
// A sentinel value that has no MLIR counterpart.
def AtomicBadBinOp : LLVM_EnumAttrCase<"", "", "BAD_BINOP", 0>;
@@ -118,7 +122,7 @@ def AtomicBinOp : LLVM_EnumAttr<
AtomicBinOpNand, AtomicBinOpOr, AtomicBinOpXor, AtomicBinOpMax,
AtomicBinOpMin, AtomicBinOpUMax, AtomicBinOpUMin, AtomicBinOpFAdd,
AtomicBinOpFSub, AtomicBinOpFMax, AtomicBinOpFMin, AtomicBinOpUIncWrap,
- AtomicBinOpUDecWrap],
+ AtomicBinOpUDecWrap, AtomicBinOpCondSub, AtomicBinOpSubClamp],
[AtomicBadBinOp]> {
let cppNamespace = "::mlir::LLVM";
}
diff --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll
index 005aafb20a510..01a86aea20768 100644
--- a/mlir/test/Target/LLVMIR/Import/instructions.ll
+++ b/mlir/test/Target/LLVMIR/Import/instructions.ll
@@ -440,11 +440,15 @@ define void @atomic_rmw(ptr %ptr1, i32 %val1, ptr %ptr2, float %val2) {
%16 = atomicrmw uinc_wrap ptr %ptr1, i32 %val1 acquire
; CHECK: llvm.atomicrmw udec_wrap %[[PTR1]], %[[VAL1]] acquire
%17 = atomicrmw udec_wrap ptr %ptr1, i32 %val1 acquire
+ ; CHECK: llvm.atomicrmw cond_sub %[[PTR1]], %[[VAL1]] acquire
+ %18 = atomicrmw cond_sub ptr %ptr1, i32 %val1 acquire
+ ; CHECK: llvm.atomicrmw sub_clamp %[[PTR1]], %[[VAL1]] acquire
+ %19 = atomicrmw sub_clamp ptr %ptr1, i32 %val1 acquire
; CHECK: llvm.atomicrmw volatile
; CHECK-SAME: syncscope("singlethread")
; CHECK-SAME: {alignment = 8 : i64}
- %18 = atomicrmw volatile udec_wrap ptr %ptr1, i32 %val1 syncscope("singlethread") acquire, align 8
+ %20 = atomicrmw volatile udec_wrap ptr %ptr1, i32 %val1 syncscope("singlethread") acquire, align 8
ret void
}
diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index 7116f3b64d7f5..3dbd4aab6ae47 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -1501,11 +1501,15 @@ llvm.func @atomicrmw(
%15 = llvm.atomicrmw uinc_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw udec_wrap ptr %{{.*}}, i32 %{{.*}} monotonic
%16 = llvm.atomicrmw udec_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ // CHECK: atomicrmw cond_sub ptr %{{.*}}, i32 %{{.*}} monotonic
+ %17 = llvm.atomicrmw cond_sub %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ // CHECK: atomicrmw sub_clamp ptr %{{.*}}, i32 %{{.*}} monotonic
+ %18 = llvm.atomicrmw sub_clamp %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw volatile
// CHECK-SAME: syncscope("singlethread")
// CHECK-SAME: align 8
- %17 = llvm.atomicrmw volatile udec_wrap %i32_ptr, %i32 syncscope("singlethread") monotonic {alignment = 8 : i64} : !llvm.ptr, i32
+ %19 = llvm.atomicrmw volatile udec_wrap %i32_ptr, %i32 syncscope("singlethread") monotonic {alignment = 8 : i64} : !llvm.ptr, i32
llvm.return
}
>From 7815de717f2b8e67a839c7ff613f5ddd093e8cb9 Mon Sep 17 00:00:00 2001
From: Andrew Jenner <Andrew.Jenner at amd.com>
Date: Wed, 31 Jul 2024 11:28:27 -0400
Subject: [PATCH 2/2] Feedback from pull request review.
---
llvm/bindings/ocaml/llvm/llvm.ml | 4 +-
llvm/bindings/ocaml/llvm/llvm.mli | 4 +-
llvm/docs/GlobalISel/GenericOpcode.rst | 4 +-
llvm/docs/LangRef.rst | 8 +-
llvm/docs/ReleaseNotes.rst | 4 +-
llvm/include/llvm/AsmParser/LLToken.h | 4 +-
llvm/include/llvm/Bitcode/LLVMBitCodes.h | 4 +-
.../CodeGen/GlobalISel/MachineIRBuilder.h | 34 ++
llvm/include/llvm/CodeGen/ISDOpcodes.h | 4 +-
llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 8 +-
llvm/include/llvm/IR/Instructions.h | 6 +-
llvm/include/llvm/Support/TargetOpcodes.def | 6 +-
llvm/include/llvm/Target/GenericOpcodes.td | 4 +-
.../Target/GlobalISel/SelectionDAGCompat.td | 4 +-
.../include/llvm/Target/TargetSelectionDAG.td | 4 +-
llvm/lib/AsmParser/LLLexer.cpp | 4 +-
llvm/lib/AsmParser/LLParser.cpp | 8 +-
llvm/lib/Bitcode/Reader/BitcodeReader.cpp | 8 +-
llvm/lib/Bitcode/Writer/BitcodeWriter.cpp | 8 +-
llvm/lib/CodeGen/AtomicExpandPass.cpp | 12 +-
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 8 +-
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 4 +-
.../SelectionDAG/SelectionDAGBuilder.cpp | 8 +-
.../SelectionDAG/SelectionDAGDumper.cpp | 8 +-
llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp | 4 +-
llvm/lib/IR/AutoUpgrade.cpp | 8 +-
llvm/lib/IR/Instructions.cpp | 8 +-
llvm/lib/Target/AMDGPU/AMDGPUGISel.td | 4 +-
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 4 +-
.../AMDGPU/AMDGPUInstructionSelector.cpp | 4 +-
llvm/lib/Target/AMDGPU/AMDGPUInstructions.td | 8 +-
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 2 +-
.../AMDGPU/AMDGPULowerBufferFatPointers.cpp | 4 +-
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 4 +-
llvm/lib/Target/AMDGPU/DSInstructions.td | 8 +-
llvm/lib/Target/AMDGPU/FLATInstructions.td | 12 +-
llvm/lib/Target/AMDGPU/R600ISelLowering.cpp | 4 +-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 +-
llvm/lib/Target/AMDGPU/SIInstrInfo.td | 4 +-
.../LoongArch/LoongArchISelLowering.cpp | 4 +-
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 4 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 4 +-
.../InstCombine/InstCombineAtomicRMW.cpp | 4 +-
llvm/lib/Transforms/Utils/LowerAtomic.cpp | 4 +-
.../AMDGPU/MIR/atomics-gmir.mir | 8 +-
llvm/test/Assembler/atomic.ll | 18 +-
llvm/test/Bitcode/amdgcn-atomic.ll | 48 +-
llvm/test/Bitcode/compatibility.ll | 34 +-
.../GlobalISel/legalizer-info-validation.mir | 4 +-
.../AArch64/atomicrmw-cond-sub-clamp.ll | 48 +-
.../AMDGPU/GlobalISel/atomicrmw_cond_sub.ll | 36 +-
.../AMDGPU/GlobalISel/atomicrmw_sub_clamp.ll | 60 +-
llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll | 72 +--
.../AMDGPU/cgp-addressing-modes-gfx1030.ll | 4 +-
.../AMDGPU/global-saddr-atomics.gfx1030.ll | 8 +-
.../AMDGPU/llvm.amdgcn.global.atomic.csub.ll | 8 +-
.../CodeGen/AMDGPU/private-memory-atomics.ll | 16 +-
llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll | 2 +-
.../CodeGen/ARM/atomicrmw-cond-sub-clamp.ll | 48 +-
.../Hexagon/atomicrmw-cond-sub-clamp.ll | 48 +-
.../LoongArch/atomicrmw-cond-sub-clamp.ll | 48 +-
.../PowerPC/atomicrmw-cond-sub-clamp.ll | 48 +-
.../CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll | 96 ++--
.../CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll | 48 +-
.../VE/Scalar/atomicrmw-cond-sub-clamp.ll | 48 +-
.../WebAssembly/atomicrmw-cond-sub-clamp.ll | 96 ++--
.../CodeGen/X86/atomicrmw-cond-sub-clamp.ll | 516 +++++++++++++-----
.../AtomicExpand/AMDGPU/expand-atomic-i16.ll | 72 +--
.../AtomicExpand/AMDGPU/expand-atomic-i8.ll | 124 ++---
mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td | 10 +-
.../test/Target/LLVMIR/Import/instructions.ll | 8 +-
mlir/test/Target/LLVMIR/llvmir.mlir | 8 +-
73 files changed, 1073 insertions(+), 787 deletions(-)
diff --git a/llvm/bindings/ocaml/llvm/llvm.ml b/llvm/bindings/ocaml/llvm/llvm.ml
index ae42b1eea93d6..12531e21445b1 100644
--- a/llvm/bindings/ocaml/llvm/llvm.ml
+++ b/llvm/bindings/ocaml/llvm/llvm.ml
@@ -300,8 +300,8 @@ module AtomicRMWBinOp = struct
| FMin
| UInc_Wrap
| UDec_Wrap
- | Cond_Sub
- | Sub_Clamp
+ | USub_Cond
+ | USub_Sat
end
module ValueKind = struct
diff --git a/llvm/bindings/ocaml/llvm/llvm.mli b/llvm/bindings/ocaml/llvm/llvm.mli
index 9a6ed2ae80043..4b71be3406004 100644
--- a/llvm/bindings/ocaml/llvm/llvm.mli
+++ b/llvm/bindings/ocaml/llvm/llvm.mli
@@ -335,8 +335,8 @@ module AtomicRMWBinOp : sig
| FMin
| UInc_Wrap
| UDec_Wrap
- | Cond_Sub
- | Sub_Clamp
+ | USub_Cond
+ | USub_Sat
end
(** The kind of an [llvalue], the result of [classify_value v].
diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst
index 67bd134174644..9105fc18d72cb 100644
--- a/llvm/docs/GlobalISel/GenericOpcode.rst
+++ b/llvm/docs/GlobalISel/GenericOpcode.rst
@@ -826,8 +826,8 @@ operands.
G_ATOMICRMW_UMIN, G_ATOMICRMW_FADD,
G_ATOMICRMW_FSUB, G_ATOMICRMW_FMAX,
G_ATOMICRMW_FMIN, G_ATOMICRMW_UINC_WRAP,
- G_ATOMICRMW_UDEC_WRAP, G_ATOMICRMW_COND_SUB,
- G_ATOMICRMW_SUB_CLAMP
+ G_ATOMICRMW_UDEC_WRAP, G_ATOMICRMW_USUB_COND,
+ G_ATOMICRMW_USUB_SAT
Generic atomicrmw. Expects a MachineMemOperand in addition to explicit
operands.
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index ed76bd454002a..3b83ec2484392 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -11209,8 +11209,8 @@ operation. The operation must be one of the following keywords:
- fmin
- uinc_wrap
- udec_wrap
-- cond_sub
-- sub_clamp
+- usub_cond
+- usub_sat
For most of these operations, the type of '<value>' must be an integer
type whose bit width is a power of two greater than or equal to eight
@@ -11261,8 +11261,8 @@ operation argument:
- fmin: ``*ptr = minnum(*ptr, val)`` (match the `llvm.minnum.*`` intrinsic)
- uinc_wrap: ``*ptr = (*ptr u>= val) ? 0 : (*ptr + 1)`` (increment value with wraparound to zero when incremented above input value)
- udec_wrap: ``*ptr = ((*ptr == 0) || (*ptr u> val)) ? val : (*ptr - 1)`` (decrement with wraparound to input value when decremented below zero).
-- cond_sub: ``*ptr = (*ptr u>= val) ? *ptr - val : *ptr`` (subtract only if result would be positive).
-- sub_clamp: ``*ptr = (*ptr u>= val) ? *ptr - val : 0`` (subtract with clamping of negative results to zero).
+- usub_cond: ``*ptr = (*ptr u>= val) ? *ptr - val : *ptr`` (subtract only if no unsigned overflow).
+- usub_sat: ``*ptr = (*ptr u>= val) ? *ptr - val : 0`` (subtract with clamping to zero).
Example:
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index a6162127b3b74..f1c0ed4715611 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -80,7 +80,7 @@ Changes to the LLVM IR
removed. The next argument has been changed from byte index to bit
index.
-* Added ``cond_sub`` and ``sub_clamp`` operations to ``atomicrmw``.
+* Added ``usub_cond`` and ``usub_sat`` operations to ``atomicrmw``.
Changes to LLVM infrastructure
------------------------------
@@ -136,7 +136,7 @@ Changes to the AMDGPU Backend
* Removed ``llvm.amdgcn.atomic.cond.sub.u32`` and
``llvm.amdgcn.atomic.csub.u32`` intrinsics. :ref:`atomicrmw <i_atomicrmw>`
- should be used instead with ``cond_sub`` and ``sub_clamp``.
+ should be used instead with ``usub_cond`` and ``usub_sat``.
Changes to the ARM Backend
--------------------------
diff --git a/llvm/include/llvm/AsmParser/LLToken.h b/llvm/include/llvm/AsmParser/LLToken.h
index 8ee04f25095f2..19029842a572a 100644
--- a/llvm/include/llvm/AsmParser/LLToken.h
+++ b/llvm/include/llvm/AsmParser/LLToken.h
@@ -268,8 +268,8 @@ enum Kind {
kw_fmin,
kw_uinc_wrap,
kw_udec_wrap,
- kw_cond_sub,
- kw_sub_clamp,
+ kw_usub_cond,
+ kw_usub_sat,
// Instruction Opcodes (Opcode in UIntVal).
kw_fneg,
diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 20980695499e6..1c6fc442a4ef8 100644
--- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -485,8 +485,8 @@ enum RMWOperations {
RMW_FMIN = 14,
RMW_UINC_WRAP = 15,
RMW_UDEC_WRAP = 16,
- RMW_COND_SUB = 17,
- RMW_SUB_CLAMP = 18
+ RMW_USUB_COND = 17,
+ RMW_USUB_SAT = 18
};
/// OverflowingBinaryOperatorOptionalFlags - Flags for serializing
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 92e05ee858a75..bd64656f158b6 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -1601,6 +1601,40 @@ class MachineIRBuilder {
const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
MachineMemOperand &MMO);
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_USUB_COND Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the original value minus \p Val
+ /// if the original value is greater than or equal to \p Val, or leaves it
+ /// unchanged otherwise. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWCondSub(
+ const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_SUB_CLAMP Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the original value minus \p Val
+ /// if the original value is greater than or equal to \p Val, or with zero
+ /// otherwise. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWSubClamp(
+ const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO);
+
/// Build and insert `G_FENCE Ordering, Scope`.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope);
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 21ac93a3b4b9b..ee80d1e36af2a 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1292,8 +1292,8 @@ enum NodeType {
ATOMIC_LOAD_FMIN,
ATOMIC_LOAD_UINC_WRAP,
ATOMIC_LOAD_UDEC_WRAP,
- ATOMIC_LOAD_COND_SUB,
- ATOMIC_LOAD_SUB_CLAMP,
+ ATOMIC_LOAD_USUB_COND,
+ ATOMIC_LOAD_USUB_SAT,
// Masked load and store - consecutive vector load and store operations
// with additional mask operand that prevents memory accesses to the
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 1f3dd4ac1eda6..9838f89b73451 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1470,8 +1470,8 @@ class MemSDNode : public SDNode {
case ISD::ATOMIC_LOAD_FMIN:
case ISD::ATOMIC_LOAD_UINC_WRAP:
case ISD::ATOMIC_LOAD_UDEC_WRAP:
- case ISD::ATOMIC_LOAD_COND_SUB:
- case ISD::ATOMIC_LOAD_SUB_CLAMP:
+ case ISD::ATOMIC_LOAD_USUB_COND:
+ case ISD::ATOMIC_LOAD_USUB_SAT:
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE:
case ISD::MLOAD:
@@ -1558,8 +1558,8 @@ class AtomicSDNode : public MemSDNode {
N->getOpcode() == ISD::ATOMIC_LOAD_FMIN ||
N->getOpcode() == ISD::ATOMIC_LOAD_UINC_WRAP ||
N->getOpcode() == ISD::ATOMIC_LOAD_UDEC_WRAP ||
- N->getOpcode() == ISD::ATOMIC_LOAD_COND_SUB ||
- N->getOpcode() == ISD::ATOMIC_LOAD_SUB_CLAMP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_USUB_COND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_USUB_SAT ||
N->getOpcode() == ISD::ATOMIC_LOAD ||
N->getOpcode() == ISD::ATOMIC_STORE;
}
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index ab5c20758abde..fa8db60e5b91f 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -752,14 +752,14 @@ class AtomicRMWInst : public Instruction {
/// Subtract only if result would be positive.
/// *p = (old u>= v) ? old - v : old
- CondSub,
+ USubCond,
/// Subtract with clamping of negative results to zero.
/// *p = (old u>= v) ? old - v : 0
- SubClamp,
+ USubSat,
FIRST_BINOP = Xchg,
- LAST_BINOP = SubClamp,
+ LAST_BINOP = USubSat,
BAD_BINOP
};
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 75bc350fa8d6b..4bb8627d5cd25 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -411,14 +411,14 @@ HANDLE_TARGET_OPCODE(G_ATOMICRMW_FMAX)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_FMIN)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_UINC_WRAP)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_UDEC_WRAP)
-HANDLE_TARGET_OPCODE(G_ATOMICRMW_COND_SUB)
-HANDLE_TARGET_OPCODE(G_ATOMICRMW_SUB_CLAMP)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_USUB_COND)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_USUB_SAT)
// Marker for start of Generic AtomicRMW opcodes
HANDLE_TARGET_OPCODE_MARKER(GENERIC_ATOMICRMW_OP_START, G_ATOMICRMW_XCHG)
// Marker for end of Generic AtomicRMW opcodes
-HANDLE_TARGET_OPCODE_MARKER(GENERIC_ATOMICRMW_OP_END, G_ATOMICRMW_SUB_CLAMP)
+HANDLE_TARGET_OPCODE_MARKER(GENERIC_ATOMICRMW_OP_END, G_ATOMICRMW_USUB_SAT)
// Generic atomic fence
HANDLE_TARGET_OPCODE(G_FENCE)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 1691e83eae377..8401dfbb0208c 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1291,8 +1291,8 @@ def G_ATOMICRMW_FMAX : G_ATOMICRMW_OP;
def G_ATOMICRMW_FMIN : G_ATOMICRMW_OP;
def G_ATOMICRMW_UINC_WRAP : G_ATOMICRMW_OP;
def G_ATOMICRMW_UDEC_WRAP : G_ATOMICRMW_OP;
-def G_ATOMICRMW_COND_SUB : G_ATOMICRMW_OP;
-def G_ATOMICRMW_SUB_CLAMP : G_ATOMICRMW_OP;
+def G_ATOMICRMW_USUB_COND : G_ATOMICRMW_OP;
+def G_ATOMICRMW_USUB_SAT : G_ATOMICRMW_OP;
def G_FENCE : GenericInstruction {
let OutOperandList = (outs);
diff --git a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index 43d4e8d37e9b0..49dd5a7928721 100644
--- a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -252,8 +252,8 @@ def : GINodeEquiv<G_ATOMICRMW_FMAX, atomic_load_fmax>;
def : GINodeEquiv<G_ATOMICRMW_FMIN, atomic_load_fmin>;
def : GINodeEquiv<G_ATOMICRMW_UINC_WRAP, atomic_load_uinc_wrap>;
def : GINodeEquiv<G_ATOMICRMW_UDEC_WRAP, atomic_load_udec_wrap>;
-def : GINodeEquiv<G_ATOMICRMW_COND_SUB, atomic_load_cond_sub>;
-def : GINodeEquiv<G_ATOMICRMW_SUB_CLAMP, atomic_load_sub_clamp>;
+def : GINodeEquiv<G_ATOMICRMW_USUB_COND, atomic_load_usub_cond>;
+def : GINodeEquiv<G_ATOMICRMW_USUB_SAT, atomic_load_usub_sat>;
def : GINodeEquiv<G_FENCE, atomic_fence>;
def : GINodeEquiv<G_PREFETCH, prefetch>;
def : GINodeEquiv<G_TRAP, trap>;
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index ac6cfd823eb44..93831f99bb5fe 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -722,9 +722,9 @@ def atomic_load_uinc_wrap : SDNode<"ISD::ATOMIC_LOAD_UINC_WRAP", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_udec_wrap : SDNode<"ISD::ATOMIC_LOAD_UDEC_WRAP", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_cond_sub : SDNode<"ISD::ATOMIC_LOAD_COND_SUB", SDTAtomic2,
+def atomic_load_usub_cond : SDNode<"ISD::ATOMIC_LOAD_USUB_COND", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_sub_clamp : SDNode<"ISD::ATOMIC_LOAD_SUB_CLAMP", SDTAtomic2,
+def atomic_load_usub_sat : SDNode<"ISD::ATOMIC_LOAD_USUB_SAT", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp
index cbd039bf98c44..8758b19106ced 100644
--- a/llvm/lib/AsmParser/LLLexer.cpp
+++ b/llvm/lib/AsmParser/LLLexer.cpp
@@ -704,8 +704,8 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(umin); KEYWORD(fmax); KEYWORD(fmin);
KEYWORD(uinc_wrap);
KEYWORD(udec_wrap);
- KEYWORD(cond_sub);
- KEYWORD(sub_clamp);
+ KEYWORD(usub_cond);
+ KEYWORD(usub_sat);
KEYWORD(splat);
KEYWORD(vscale);
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 2d086c859c14c..cdf8f247fdc80 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -8331,11 +8331,11 @@ int LLParser::parseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
case lltok::kw_udec_wrap:
Operation = AtomicRMWInst::UDecWrap;
break;
- case lltok::kw_cond_sub:
- Operation = AtomicRMWInst::CondSub;
+ case lltok::kw_usub_cond:
+ Operation = AtomicRMWInst::USubCond;
break;
- case lltok::kw_sub_clamp:
- Operation = AtomicRMWInst::SubClamp;
+ case lltok::kw_usub_sat:
+ Operation = AtomicRMWInst::USubSat;
break;
case lltok::kw_fadd:
Operation = AtomicRMWInst::FAdd;
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index eea10e9221b53..acb18fbf39554 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -1349,10 +1349,10 @@ static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) {
return AtomicRMWInst::UIncWrap;
case bitc::RMW_UDEC_WRAP:
return AtomicRMWInst::UDecWrap;
- case bitc::RMW_COND_SUB:
- return AtomicRMWInst::CondSub;
- case bitc::RMW_SUB_CLAMP:
- return AtomicRMWInst::SubClamp;
+ case bitc::RMW_USUB_COND:
+ return AtomicRMWInst::USubCond;
+ case bitc::RMW_USUB_SAT:
+ return AtomicRMWInst::USubSat;
}
}
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 12002803ca54e..0d181b10bb8f3 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -658,10 +658,10 @@ static unsigned getEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
return bitc::RMW_UINC_WRAP;
case AtomicRMWInst::UDecWrap:
return bitc::RMW_UDEC_WRAP;
- case AtomicRMWInst::CondSub:
- return bitc::RMW_COND_SUB;
- case AtomicRMWInst::SubClamp:
- return bitc::RMW_SUB_CLAMP;
+ case AtomicRMWInst::USubCond:
+ return bitc::RMW_USUB_COND;
+ case AtomicRMWInst::USubSat:
+ return bitc::RMW_USUB_SAT;
}
}
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index c54875e4bc0a2..f3a04e073a82f 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -869,8 +869,8 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
case AtomicRMWInst::FMax:
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
- case AtomicRMWInst::CondSub:
- case AtomicRMWInst::SubClamp: {
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat: {
// Finally, other ops will operate on the full value, so truncate down to
// the original size, and expand out again after doing the
// operation. Bitcasts will be inserted for FP values.
@@ -1544,8 +1544,8 @@ bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
case AtomicRMWInst::Sub:
case AtomicRMWInst::Or:
case AtomicRMWInst::Xor:
- case AtomicRMWInst::CondSub:
- case AtomicRMWInst::SubClamp:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat:
return C->isZero();
case AtomicRMWInst::And:
return C->isMinusOne();
@@ -1787,8 +1787,8 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
case AtomicRMWInst::FSub:
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
- case AtomicRMWInst::CondSub:
- case AtomicRMWInst::SubClamp:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat:
// No atomic libcalls are available for max/min/umax/umin.
return {};
}
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 452d8a1599636..6558c239d1b9b 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3289,11 +3289,11 @@ bool IRTranslator::translateAtomicRMW(const User &U,
case AtomicRMWInst::UDecWrap:
Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
break;
- case AtomicRMWInst::CondSub:
- Opcode = TargetOpcode::G_ATOMICRMW_COND_SUB;
+ case AtomicRMWInst::USubCond:
+ Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
break;
- case AtomicRMWInst::SubClamp:
- Opcode = TargetOpcode::G_ATOMICRMW_SUB_CLAMP;
+ case AtomicRMWInst::USubSat:
+ Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
break;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 41c5eca68b170..08c47a9e34cd6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -8580,8 +8580,8 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
Opcode == ISD::ATOMIC_LOAD_FMIN ||
Opcode == ISD::ATOMIC_LOAD_UINC_WRAP ||
Opcode == ISD::ATOMIC_LOAD_UDEC_WRAP ||
- Opcode == ISD::ATOMIC_LOAD_COND_SUB ||
- Opcode == ISD::ATOMIC_LOAD_SUB_CLAMP || Opcode == ISD::ATOMIC_SWAP ||
+ Opcode == ISD::ATOMIC_LOAD_USUB_COND ||
+ Opcode == ISD::ATOMIC_LOAD_USUB_SAT || Opcode == ISD::ATOMIC_SWAP ||
Opcode == ISD::ATOMIC_STORE) &&
"Invalid Atomic Op");
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 5bf9da7715c68..820a9934fe5fb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5075,11 +5075,11 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
case AtomicRMWInst::UDecWrap:
NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
break;
- case AtomicRMWInst::CondSub:
- NT = ISD::ATOMIC_LOAD_COND_SUB;
+ case AtomicRMWInst::USubCond:
+ NT = ISD::ATOMIC_LOAD_USUB_COND;
break;
- case AtomicRMWInst::SubClamp:
- NT = ISD::ATOMIC_LOAD_SUB_CLAMP;
+ case AtomicRMWInst::USubSat:
+ NT = ISD::ATOMIC_LOAD_USUB_SAT;
break;
}
AtomicOrdering Ordering = I.getOrdering();
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 1218d272e75a1..816e0d6d0219b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -102,10 +102,10 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
return "AtomicLoadUIncWrap";
case ISD::ATOMIC_LOAD_UDEC_WRAP:
return "AtomicLoadUDecWrap";
- case ISD::ATOMIC_LOAD_COND_SUB:
- return "AtomicLoadCondSub";
- case ISD::ATOMIC_LOAD_SUB_CLAMP:
- return "AtomicLoadSubClamp";
+ case ISD::ATOMIC_LOAD_USUB_COND:
+ return "AtomicLoadUSubCond";
+ case ISD::ATOMIC_LOAD_USUB_SAT:
+ return "AtomicLoadUSubSat";
case ISD::ATOMIC_LOAD: return "AtomicLoad";
case ISD::ATOMIC_STORE: return "AtomicStore";
case ISD::PCMARKER: return "PCMarker";
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index e195df03ae7fa..c60a3baba9d80 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -5998,8 +5998,8 @@ Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2,
case AtomicRMWInst::FMin:
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
- case AtomicRMWInst::CondSub:
- case AtomicRMWInst::SubClamp:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat:
llvm_unreachable("Unsupported atomic update operation");
}
llvm_unreachable("Unsupported atomic update operation");
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 2306d2d61efb6..914077640a7d6 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1026,8 +1026,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
if (Name.consume_front("atomic.")) {
if (Name.starts_with("inc") || Name.starts_with("dec") ||
Name.starts_with("cond.sub") || Name.starts_with("csub")) {
- // These were replaced with atomicrmw uinc_wrap, udec_wrap, cond_sub
- // and sub_clamp so there's no new declaration.
+ // These were replaced with atomicrmw uinc_wrap, udec_wrap, usub_cond
+ // and usub_sat so there's no new declaration.
NewFn = nullptr;
return true;
}
@@ -2350,8 +2350,8 @@ static Value *upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI,
.StartsWith("ds.fadd", AtomicRMWInst::FAdd)
.StartsWith("atomic.inc.", AtomicRMWInst::UIncWrap)
.StartsWith("atomic.dec.", AtomicRMWInst::UDecWrap)
- .StartsWith("atomic.cond.sub", AtomicRMWInst::CondSub)
- .StartsWith("atomic.csub", AtomicRMWInst::SubClamp);
+ .StartsWith("atomic.cond.sub", AtomicRMWInst::USubCond)
+ .StartsWith("atomic.csub", AtomicRMWInst::USubSat);
unsigned NumOperands = CI->getNumOperands();
if (NumOperands < 3) // Malformed bitcode.
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 7917ee14f30ab..58e721f43dbd1 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1422,10 +1422,10 @@ StringRef AtomicRMWInst::getOperationName(BinOp Op) {
return "uinc_wrap";
case AtomicRMWInst::UDecWrap:
return "udec_wrap";
- case AtomicRMWInst::CondSub:
- return "cond_sub";
- case AtomicRMWInst::SubClamp:
- return "sub_clamp";
+ case AtomicRMWInst::USubCond:
+ return "usub_cond";
+ case AtomicRMWInst::USubSat:
+ return "usub_sat";
case AtomicRMWInst::BAD_BINOP:
return "<invalid operation>";
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 3a197a9655122..a88d76f8d7130 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -271,8 +271,8 @@ def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT_D16, SItbuffer_store_d16>;
// FIXME: Check MMO is atomic
def : GINodeEquiv<G_ATOMICRMW_UINC_WRAP, atomic_load_uinc_wrap_glue>;
def : GINodeEquiv<G_ATOMICRMW_UDEC_WRAP, atomic_load_udec_wrap_glue>;
-def : GINodeEquiv<G_ATOMICRMW_COND_SUB, atomic_load_cond_sub_glue>;
-def : GINodeEquiv<G_ATOMICRMW_SUB_CLAMP, atomic_load_sub_clamp_glue>;
+def : GINodeEquiv<G_ATOMICRMW_USUB_COND, atomic_load_usub_cond_glue>;
+def : GINodeEquiv<G_ATOMICRMW_USUB_SAT, atomic_load_usub_sat_glue>;
def : GINodeEquiv<G_ATOMICRMW_FMIN, atomic_load_fmin_glue>;
def : GINodeEquiv<G_ATOMICRMW_FMAX, atomic_load_fmax_glue>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 327b3d9054fa5..32f06b9dff796 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -6014,8 +6014,8 @@ AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
case AtomicRMWInst::FMax:
case AtomicRMWInst::FMin:
return AtomicExpansionKind::CmpXChg;
- case AtomicRMWInst::CondSub:
- case AtomicRMWInst::SubClamp:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat:
if (auto *IntTy = dyn_cast<IntegerType>(RMW->getType())) {
unsigned Size = IntTy->getBitWidth();
if (Size == 32)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index ea10c0a5ba1fc..83822699757c5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -3619,8 +3619,8 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_ATOMICRMW_UMAX:
case TargetOpcode::G_ATOMICRMW_UINC_WRAP:
case TargetOpcode::G_ATOMICRMW_UDEC_WRAP:
- case TargetOpcode::G_ATOMICRMW_COND_SUB:
- case TargetOpcode::G_ATOMICRMW_SUB_CLAMP:
+ case TargetOpcode::G_ATOMICRMW_USUB_COND:
+ case TargetOpcode::G_ATOMICRMW_USUB_SAT:
case TargetOpcode::G_ATOMICRMW_FADD:
case TargetOpcode::G_ATOMICRMW_FMIN:
case TargetOpcode::G_ATOMICRMW_FMAX:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 22c23c89febd7..85c8e5bf00554 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -628,16 +628,12 @@ defm int_amdgcn_flat_atomic_fadd : global_addr_space_atomic_op;
defm int_amdgcn_global_atomic_fadd_v2bf16 : noret_op;
defm int_amdgcn_global_atomic_fmin : noret_op;
defm int_amdgcn_global_atomic_fmax : noret_op;
-//defm int_amdgcn_global_atomic_csub : noret_op;
defm int_amdgcn_flat_atomic_fadd : local_addr_space_atomic_op;
defm int_amdgcn_global_atomic_ordered_add_b64 : noret_op;
defm int_amdgcn_flat_atomic_fmin_num : noret_op;
defm int_amdgcn_flat_atomic_fmax_num : noret_op;
defm int_amdgcn_global_atomic_fmin_num : noret_op;
defm int_amdgcn_global_atomic_fmax_num : noret_op;
-//defm int_amdgcn_atomic_cond_sub_u32 : local_addr_space_atomic_op;
-//defm int_amdgcn_atomic_cond_sub_u32 : flat_addr_space_atomic_op;
-//defm int_amdgcn_atomic_cond_sub_u32 : global_addr_space_atomic_op;
multiclass noret_binary_atomic_op<SDNode atomic_op> {
let HasNoUse = true in
@@ -688,8 +684,8 @@ defm atomic_load_fmin : binary_atomic_op_fp_all_as<atomic_load_fmin>;
defm atomic_load_fmax : binary_atomic_op_fp_all_as<atomic_load_fmax>;
defm atomic_load_uinc_wrap : binary_atomic_op_all_as<atomic_load_uinc_wrap>;
defm atomic_load_udec_wrap : binary_atomic_op_all_as<atomic_load_udec_wrap>;
-defm atomic_load_cond_sub : binary_atomic_op_all_as<atomic_load_cond_sub>;
-defm atomic_load_sub_clamp : binary_atomic_op_all_as<atomic_load_sub_clamp>;
+defm atomic_load_usub_cond : binary_atomic_op_all_as<atomic_load_usub_cond>;
+defm atomic_load_usub_sat : binary_atomic_op_all_as<atomic_load_usub_sat>;
defm AMDGPUatomic_cmp_swap : binary_atomic_op_all_as<AMDGPUatomic_cmp_swap>;
def load_align8_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index d6c07ad83d5b7..c0b7eebebba7c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1638,7 +1638,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
}
auto &Atomics32 =
- getActionDefinitionsBuilder({G_ATOMICRMW_COND_SUB, G_ATOMICRMW_SUB_CLAMP})
+ getActionDefinitionsBuilder({G_ATOMICRMW_USUB_COND, G_ATOMICRMW_USUB_SAT})
.legalFor({{S32, GlobalPtr}, {S32, LocalPtr}, {S32, RegionPtr}});
if (ST.hasFlatAddressSpace()) {
Atomics32.legalFor({{S32, FlatPtr}});
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index 0ff32bad4cc06..60cc847704d28 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -1151,11 +1151,11 @@ Value *SplitPtrStructs::handleMemoryInst(Instruction *I, Value *Arg, Value *Ptr,
report_fatal_error("wrapping increment/decrement not supported for "
"buffer resources and should've been expanded away");
break;
- case AtomicRMWInst::CondSub:
+ case AtomicRMWInst::USubCond:
report_fatal_error("conditional subtract not supported for buffer "
"resources and should've been expanded away");
break;
- case AtomicRMWInst::SubClamp:
+ case AtomicRMWInst::USubSat:
report_fatal_error("subtract with clamp not supported for buffer "
"resources and should've been expanded away");
break;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index ce753f235ee60..589138ddd2ca6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -5220,8 +5220,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_ATOMICRMW_FMAX:
case AMDGPU::G_ATOMICRMW_UINC_WRAP:
case AMDGPU::G_ATOMICRMW_UDEC_WRAP:
- case AMDGPU::G_ATOMICRMW_COND_SUB:
- case AMDGPU::G_ATOMICRMW_SUB_CLAMP:
+ case AMDGPU::G_ATOMICRMW_USUB_COND:
+ case AMDGPU::G_ATOMICRMW_USUB_SAT:
case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[1] = getValueMappingForPtr(MRI, MI.getOperand(1).getReg());
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index f075834794ece..c2878e1f42c9a 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -1104,15 +1104,11 @@ defm : DSAtomicRetNoRetPat_mc<DS_PK_ADD_RTN_F16, DS_PK_ADD_F16, v2f16, "atomic_l
defm : DSAtomicRetNoRetPat_mc<DS_PK_ADD_RTN_BF16, DS_PK_ADD_BF16, v2bf16, "atomic_load_fadd">;
}
-defm : DSAtomicRetNoRetPat_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "atomic_load_sub_clamp">;
-
let SubtargetPredicate = isGFX12Plus in {
-//defm : DSAtomicRetNoRetPat_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "atomic_load_cond_sub">;
-
-defm : DSAtomicRetNoRetPatCondSub_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "atomic_load_cond_sub">;
+defm : DSAtomicRetNoRetPatCondSub_mc<DS_COND_SUB_RTN_U32, DS_COND_SUB_U32, i32, "atomic_load_usub_cond">;
-defm : DSAtomicRetNoRetPat_mc<DS_SUB_CLAMP_RTN_U32, DS_SUB_CLAMP_U32, i32, "atomic_load_sub_clamp">;
+defm : DSAtomicRetNoRetPat_mc<DS_SUB_CLAMP_RTN_U32, DS_SUB_CLAMP_U32, i32, "atomic_load_usub_sat">;
} // let SubtargetPredicate = isGFX12Plus
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index c9ef2e9087c58..8dfa926bdf5bf 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -1429,10 +1429,10 @@ defm : FlatAtomicPat <"FLAT_ATOMIC_MAX_F64", "atomic_load_fmax_"#as, f64>;
}
let SubtargetPredicate = isGFX12Plus in {
- defm : FlatAtomicRtnPat<"FLAT_ATOMIC_COND_SUB_U32", "atomic_load_cond_sub_"#as, i32 >;
+ defm : FlatAtomicRtnPat<"FLAT_ATOMIC_COND_SUB_U32", "atomic_load_usub_cond_"#as, i32 >;
let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
- defm : FlatAtomicNoRtnPat<"FLAT_ATOMIC_COND_SUB_U32", "atomic_load_cond_sub_"#as, i32>;
+ defm : FlatAtomicNoRtnPat<"FLAT_ATOMIC_COND_SUB_U32", "atomic_load_usub_cond_"#as, i32>;
}
} // end foreach as
@@ -1549,10 +1549,10 @@ defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_OR", "atomic_load_or_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_SWAP", "atomic_swap_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_CMPSWAP", "AMDGPUatomic_cmp_swap_global", i32, v2i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_XOR", "atomic_load_xor_global", i32>;
-defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_CSUB", "atomic_load_sub_clamp_global", i32>;
+defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_CSUB", "atomic_load_usub_sat_global", i32>;
let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
-defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_CSUB", "atomic_load_sub_clamp_global", i32>;
+defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_CSUB", "atomic_load_usub_sat_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_ADD_X2", "atomic_load_add_global", i64>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_SUB_X2", "atomic_load_sub_global", i64>;
@@ -1569,10 +1569,10 @@ defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_CMPSWAP_X2", "AMDGPUatomic_cmp_swap_
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_XOR_X2", "atomic_load_xor_global", i64>;
let SubtargetPredicate = isGFX12Plus in {
- defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_COND_SUB_U32", "atomic_load_cond_sub_global", i32>;
+ defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_COND_SUB_U32", "atomic_load_usub_cond_global", i32>;
let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
- defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_COND_SUB_U32", "atomic_load_cond_sub_global", i32>;
+ defm : GlobalFLATAtomicPatsNoRtn <"GLOBAL_ATOMIC_COND_SUB_U32", "atomic_load_usub_cond_global", i32>;
}
let OtherPredicates = [isGFX12Plus] in {
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index a317977a55b0f..aea30dd34cac0 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -2179,8 +2179,8 @@ R600TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
switch (RMW->getOperation()) {
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
- case AtomicRMWInst::CondSub:
- case AtomicRMWInst::SubClamp:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat:
// FIXME: Cayman at least appears to have instructions for this, but the
// instruction defintions appear to be missing.
return AtomicExpansionKind::CmpXChg;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0fc242af6d4d9..065c9db602b83 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -949,8 +949,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
ISD::ATOMIC_LOAD_FMAX,
ISD::ATOMIC_LOAD_UINC_WRAP,
ISD::ATOMIC_LOAD_UDEC_WRAP,
- ISD::ATOMIC_LOAD_COND_SUB,
- ISD::ATOMIC_LOAD_SUB_CLAMP,
+ ISD::ATOMIC_LOAD_USUB_COND,
+ ISD::ATOMIC_LOAD_USUB_SAT,
ISD::INTRINSIC_VOID,
ISD::INTRINSIC_W_CHAIN});
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 1183a6329e7b0..08b515e170f19 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -718,8 +718,8 @@ defm atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">;
defm atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
defm atomic_load_uinc_wrap : SIAtomicM0Glue2 <"LOAD_UINC_WRAP">;
defm atomic_load_udec_wrap : SIAtomicM0Glue2 <"LOAD_UDEC_WRAP">;
-defm atomic_load_cond_sub : SIAtomicM0Glue2 <"LOAD_COND_SUB">;
-defm atomic_load_sub_clamp : SIAtomicM0Glue2 <"LOAD_SUB_CLAMP">;
+defm atomic_load_usub_cond : SIAtomicM0Glue2 <"LOAD_USUB_COND">;
+defm atomic_load_usub_sat : SIAtomicM0Glue2 <"LOAD_USUB_SAT">;
defm atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">;
defm atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">;
defm atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">;
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index f9a751f72058d..b14630860827a 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -4540,8 +4540,8 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
if (AI->isFloatingPointOperation() ||
AI->getOperation() == AtomicRMWInst::UIncWrap ||
AI->getOperation() == AtomicRMWInst::UDecWrap ||
- AI->getOperation() == AtomicRMWInst::CondSub ||
- AI->getOperation() == AtomicRMWInst::SubClamp)
+ AI->getOperation() == AtomicRMWInst::USubCond ||
+ AI->getOperation() == AtomicRMWInst::USubSat)
return AtomicExpansionKind::CmpXChg;
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 267116a610506..30a3932ecaaa0 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -18774,8 +18774,8 @@ PPCTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
switch (AI->getOperation()) {
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
- case AtomicRMWInst::CondSub:
- case AtomicRMWInst::SubClamp:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat:
return AtomicExpansionKind::CmpXChg;
default:
return TargetLowering::shouldExpandAtomicRMWInIR(AI);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 44cfd652b66c7..214d610bbebf9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20758,8 +20758,8 @@ RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
if (AI->isFloatingPointOperation() ||
AI->getOperation() == AtomicRMWInst::UIncWrap ||
AI->getOperation() == AtomicRMWInst::UDecWrap ||
- AI->getOperation() == AtomicRMWInst::CondSub ||
- AI->getOperation() == AtomicRMWInst::SubClamp)
+ AI->getOperation() == AtomicRMWInst::USubCond ||
+ AI->getOperation() == AtomicRMWInst::USubSat)
return AtomicExpansionKind::CmpXChg;
// Don't expand forced atomics, we want to have __sync libcalls instead.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 13deedfa7c035..fada0c04b734a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -30912,8 +30912,8 @@ X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
case AtomicRMWInst::FMin:
case AtomicRMWInst::UIncWrap:
case AtomicRMWInst::UDecWrap:
- case AtomicRMWInst::CondSub:
- case AtomicRMWInst::SubClamp:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat:
default:
// These always require a non-trivial set of data operations on x86. We must
// use a cmpxchg loop.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
index e9ac96f68d60c..80a337db56d21 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
@@ -41,8 +41,8 @@ bool isIdempotentRMW(AtomicRMWInst& RMWI) {
case AtomicRMWInst::Sub:
case AtomicRMWInst::Or:
case AtomicRMWInst::Xor:
- case AtomicRMWInst::CondSub:
- case AtomicRMWInst::SubClamp:
+ case AtomicRMWInst::USubCond:
+ case AtomicRMWInst::USubSat:
return C->isZero();
case AtomicRMWInst::And:
return C->isMinusOne();
diff --git a/llvm/lib/Transforms/Utils/LowerAtomic.cpp b/llvm/lib/Transforms/Utils/LowerAtomic.cpp
index f516555d58947..3d30e01030f6b 100644
--- a/llvm/lib/Transforms/Utils/LowerAtomic.cpp
+++ b/llvm/lib/Transforms/Utils/LowerAtomic.cpp
@@ -94,12 +94,12 @@ Value *llvm::buildAtomicRMWValue(AtomicRMWInst::BinOp Op,
Value *Or = Builder.CreateOr(CmpEq0, CmpOldGtVal);
return Builder.CreateSelect(Or, Val, Dec, "new");
}
- case AtomicRMWInst::CondSub: {
+ case AtomicRMWInst::USubCond: {
Value *Cmp = Builder.CreateICmpUGE(Loaded, Val);
Value *Sub = Builder.CreateSub(Loaded, Val);
return Builder.CreateSelect(Cmp, Sub, Val, "new");
}
- case AtomicRMWInst::SubClamp: {
+ case AtomicRMWInst::USubSat: {
Constant *Zero = ConstantInt::get(Loaded->getType(), 0);
Value *Cmp = Builder.CreateICmpUGE(Loaded, Val);
Value *Sub = Builder.CreateSub(Loaded, Val);
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
index f7dfd530b2285..86507f9d0e2ff 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/atomics-gmir.mir
@@ -81,11 +81,11 @@ body: |
; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_UDEC_WRAP
%20:_(s32) = G_ATOMICRMW_UDEC_WRAP %1, %5
- ; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_COND_SUB
- %21:_(s32) = G_ATOMICRMW_COND_SUB %1, %5
+ ; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_USUB_COND
+ %21:_(s32) = G_ATOMICRMW_USUB_COND %1, %5
- ; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_SUB_CLAMP
- %22:_(s32) = G_ATOMICRMW_SUB_CLAMP %1, %5
+ ; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_ATOMICRMW_USUB_SAT
+ %22:_(s32) = G_ATOMICRMW_USUB_SAT %1, %5
$vgpr0 = COPY %4(s32)
SI_RETURN implicit $vgpr0
diff --git a/llvm/test/Assembler/atomic.ll b/llvm/test/Assembler/atomic.ll
index f2f6c792fa265..a44dcccc16bef 100644
--- a/llvm/test/Assembler/atomic.ll
+++ b/llvm/test/Assembler/atomic.ll
@@ -42,15 +42,15 @@ define void @f(ptr %x) {
; CHECK: atomicrmw volatile udec_wrap ptr %x, i32 10 syncscope("agent") monotonic
atomicrmw volatile udec_wrap ptr %x, i32 10 syncscope("agent") monotonic
- ; CHECK: atomicrmw volatile cond_sub ptr %x, i32 10 monotonic
- atomicrmw volatile cond_sub ptr %x, i32 10 monotonic
- ; CHECK: atomicrmw volatile cond_sub ptr %x, i32 10 syncscope("agent") monotonic
- atomicrmw volatile cond_sub ptr %x, i32 10 syncscope("agent") monotonic
-
- ; CHECK: atomicrmw volatile sub_clamp ptr %x, i32 10 monotonic
- atomicrmw volatile sub_clamp ptr %x, i32 10 monotonic
- ; CHECK: atomicrmw volatile sub_clamp ptr %x, i32 10 syncscope("agent") monotonic
- atomicrmw volatile sub_clamp ptr %x, i32 10 syncscope("agent") monotonic
+ ; CHECK: atomicrmw volatile usub_cond ptr %x, i32 10 monotonic
+ atomicrmw volatile usub_cond ptr %x, i32 10 monotonic
+ ; CHECK: atomicrmw volatile usub_cond ptr %x, i32 10 syncscope("agent") monotonic
+ atomicrmw volatile usub_cond ptr %x, i32 10 syncscope("agent") monotonic
+
+ ; CHECK: atomicrmw volatile usub_sat ptr %x, i32 10 monotonic
+ atomicrmw volatile usub_sat ptr %x, i32 10 monotonic
+ ; CHECK: atomicrmw volatile usub_sat ptr %x, i32 10 syncscope("agent") monotonic
+ atomicrmw volatile usub_sat ptr %x, i32 10 syncscope("agent") monotonic
; CHECK: fence syncscope("singlethread") release
fence syncscope("singlethread") release
diff --git a/llvm/test/Bitcode/amdgcn-atomic.ll b/llvm/test/Bitcode/amdgcn-atomic.ll
index b49f3ac254a1e..643059f43e87f 100644
--- a/llvm/test/Bitcode/amdgcn-atomic.ll
+++ b/llvm/test/Bitcode/amdgcn-atomic.ll
@@ -250,75 +250,75 @@ define <2 x i16> @upgrade_amdgcn_ds_fadd_v2bf16__missing_args_as_i16(ptr addrspa
attributes #0 = { argmemonly nounwind willreturn }
-define void @atomic_cond_sub(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr3) {
- ; CHECK: atomicrmw cond_sub ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+define void @atomic_usub_cond(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr3) {
+ ; CHECK: atomicrmw usub_cond ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
%result0 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 0, i1 false)
- ; CHECK: atomicrmw cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw usub_cond ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
%result1 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 0, i32 0, i1 false)
- ; CHECK: atomicrmw cond_sub ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw usub_cond ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4
%result2 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p3(ptr addrspace(3) %ptr3, i32 46, i32 0, i32 0, i1 false)
ret void
}
-define void @atomic_sub_clamp(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr3) {
- ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+define void @atomic_usub_sat(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr3) {
+ ; CHECK: atomicrmw usub_sat ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
%result0 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 0, i1 false)
- ; CHECK: atomicrmw sub_clamp ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw usub_sat ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
%result1 = call i32 @llvm.amdgcn.atomic.csub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 0, i32 0, i1 false)
- ; CHECK: atomicrmw sub_clamp ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw usub_sat ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4
%result2 = call i32 @llvm.amdgcn.atomic.csub.i32.p3(ptr addrspace(3) %ptr3, i32 46, i32 0, i32 0, i1 false)
ret void
}
; Test some invalid ordering handling
-define void @ordering_cond_sub_clamp(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr3) {
- ; CHECK: atomicrmw volatile cond_sub ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+define void @ordering_usub_cond_usub_sat(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr3) {
+ ; CHECK: atomicrmw volatile usub_cond ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
%result0 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p0(ptr %ptr0, i32 42, i32 -1, i32 0, i1 true)
- ; CHECK: atomicrmw volatile cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw volatile usub_cond ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
%result1 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 0, i32 0, i1 true)
- ; CHECK: atomicrmw cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw usub_cond ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
%result2 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 1, i32 0, i1 false)
- ; CHECK: atomicrmw volatile cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") monotonic, align 4
+ ; CHECK: atomicrmw volatile usub_cond ptr addrspace(1) %ptr1, i32 43 syncscope("agent") monotonic, align 4
%result3 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 2, i32 0, i1 true)
- ; CHECK: atomicrmw cond_sub ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw usub_cond ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
%result4 = call i32 @llvm.amdgcn.atomic.cond.sub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 3, i32 0, i1 false)
- ; CHECK: atomicrmw volatile sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw volatile usub_sat ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
%result5 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 4, i1 true)
- ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw usub_sat ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
%result6 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 5, i1 false)
- ; CHECK: atomicrmw volatile sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw volatile usub_sat ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
%result7 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 6, i1 true)
- ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ ; CHECK: atomicrmw usub_sat ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
%result8 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 7, i1 false)
- ; CHECK:= atomicrmw volatile sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+ ; CHECK:= atomicrmw volatile usub_sat ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
%result9 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 0, i32 8, i1 true)
- ; CHECK:= atomicrmw volatile sub_clamp ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
+ ; CHECK:= atomicrmw volatile usub_sat ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
%result10 = call i32 @llvm.amdgcn.atomic.csub.i32.p1(ptr addrspace(1) %ptr1, i32 43, i32 3, i32 0, i1 true)
ret void
}
-define void @immarg_violations_sub_clamp(ptr %ptr0, i32 %val32, i1 %val1) {
- ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
+define void @immarg_violations_usub_sat(ptr %ptr0, i32 %val32, i1 %val1) {
+ ; CHECK: atomicrmw usub_sat ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
%result0 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 %val32, i32 0, i1 false)
- ; CHECK: atomicrmw sub_clamp ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4
+ ; CHECK: atomicrmw usub_sat ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4
%result1 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 2, i32 %val32, i1 false)
- ; CHECK: atomicrmw volatile sub_clamp ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4
+ ; CHECK: atomicrmw volatile usub_sat ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4
%result2 = call i32 @llvm.amdgcn.atomic.csub.i32.p0(ptr %ptr0, i32 42, i32 2, i32 0, i1 %val1)
ret void
}
diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll
index 1c424cdaa1016..a180da20cfb73 100644
--- a/llvm/test/Bitcode/compatibility.ll
+++ b/llvm/test/Bitcode/compatibility.ll
@@ -906,30 +906,30 @@ define void @uinc_udec_wrap_atomics(ptr %word) {
ret void
}
-define void @cond_sub_clamp_atomics(ptr %word) {
-; CHECK: %atomicrmw.condsub0 = atomicrmw cond_sub ptr %word, i32 64 monotonic
- %atomicrmw.condsub0 = atomicrmw cond_sub ptr %word, i32 64 monotonic
+define void @usub_cond_usub_sat_atomics(ptr %word) {
+; CHECK: %atomicrmw.condsub0 = atomicrmw usub_cond ptr %word, i32 64 monotonic
+ %atomicrmw.condsub0 = atomicrmw usub_cond ptr %word, i32 64 monotonic
-; CHECK: %atomicrmw.condsub1 = atomicrmw cond_sub ptr %word, i32 128 seq_cst
- %atomicrmw.condsub1 = atomicrmw cond_sub ptr %word, i32 128 seq_cst
+; CHECK: %atomicrmw.condsub1 = atomicrmw usub_cond ptr %word, i32 128 seq_cst
+ %atomicrmw.condsub1 = atomicrmw usub_cond ptr %word, i32 128 seq_cst
-; CHECK: %atomicrmw.condsub2 = atomicrmw volatile cond_sub ptr %word, i32 128 seq_cst
- %atomicrmw.condsub2 = atomicrmw volatile cond_sub ptr %word, i32 128 seq_cst
+; CHECK: %atomicrmw.condsub2 = atomicrmw volatile usub_cond ptr %word, i32 128 seq_cst
+ %atomicrmw.condsub2 = atomicrmw volatile usub_cond ptr %word, i32 128 seq_cst
-; CHECK: %atomicrmw.condsub0.syncscope = atomicrmw cond_sub ptr %word, i32 27 syncscope("agent") monotonic
- %atomicrmw.condsub0.syncscope = atomicrmw cond_sub ptr %word, i32 27 syncscope("agent") monotonic
+; CHECK: %atomicrmw.condsub0.syncscope = atomicrmw usub_cond ptr %word, i32 27 syncscope("agent") monotonic
+ %atomicrmw.condsub0.syncscope = atomicrmw usub_cond ptr %word, i32 27 syncscope("agent") monotonic
-; CHECK: %atomicrmw.subclamp0 = atomicrmw sub_clamp ptr %word, i32 99 monotonic
- %atomicrmw.subclamp0 = atomicrmw sub_clamp ptr %word, i32 99 monotonic
+; CHECK: %atomicrmw.subclamp0 = atomicrmw usub_sat ptr %word, i32 99 monotonic
+ %atomicrmw.subclamp0 = atomicrmw usub_sat ptr %word, i32 99 monotonic
-; CHECK: %atomicrmw.subclamp1 = atomicrmw sub_clamp ptr %word, i32 12 seq_cst
- %atomicrmw.subclamp1 = atomicrmw sub_clamp ptr %word, i32 12 seq_cst
+; CHECK: %atomicrmw.subclamp1 = atomicrmw usub_sat ptr %word, i32 12 seq_cst
+ %atomicrmw.subclamp1 = atomicrmw usub_sat ptr %word, i32 12 seq_cst
-; CHECK: %atomicrmw.subclamp2 = atomicrmw volatile sub_clamp ptr %word, i32 12 seq_cst
- %atomicrmw.subclamp2 = atomicrmw volatile sub_clamp ptr %word, i32 12 seq_cst
+; CHECK: %atomicrmw.subclamp2 = atomicrmw volatile usub_sat ptr %word, i32 12 seq_cst
+ %atomicrmw.subclamp2 = atomicrmw volatile usub_sat ptr %word, i32 12 seq_cst
-; CHECK: %atomicrmw.subclamp0.syncscope = atomicrmw sub_clamp ptr %word, i32 5 syncscope("system") monotonic
- %atomicrmw.subclamp0.syncscope = atomicrmw sub_clamp ptr %word, i32 5 syncscope("system") monotonic
+; CHECK: %atomicrmw.subclamp0.syncscope = atomicrmw usub_sat ptr %word, i32 5 syncscope("system") monotonic
+ %atomicrmw.subclamp0.syncscope = atomicrmw usub_sat ptr %word, i32 5 syncscope("system") monotonic
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index 6be8c3a180c33..d12b1e2f61f2d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -261,10 +261,10 @@
# DEBUG-NEXT: G_ATOMICRMW_UDEC_WRAP (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: G_ATOMICRMW_COND_SUB (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
+# DEBUG-NEXT: G_ATOMICRMW_USUB_COND (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: G_ATOMICRMW_SUB_CLAMP (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
+# DEBUG-NEXT: G_ATOMICRMW_USUB_SAT (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_FENCE (opcode {{[0-9]+}}): 0 type indices
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/AArch64/atomicrmw-cond-sub-clamp.ll
index b59505faccdeb..53c8d2e37d16b 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-cond-sub-clamp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i8:
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: .LBB0_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
@@ -15,12 +15,12 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i16:
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: .LBB1_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
@@ -33,12 +33,12 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i32:
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: .LBB2_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
@@ -50,12 +50,12 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i64:
+define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: mov x8, x0
; CHECK-NEXT: .LBB3_1: // %atomicrmw.start
@@ -67,12 +67,12 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: cbnz w10, .LBB3_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: .LBB4_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
@@ -85,12 +85,12 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: .LBB5_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
@@ -103,12 +103,12 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: .LBB6_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
@@ -120,12 +120,12 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: mov x8, x0
; CHECK-NEXT: .LBB7_1: // %atomicrmw.start
@@ -137,6 +137,6 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: cbnz w10, .LBB7_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_cond_sub.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_cond_sub.ll
index ceefb5da1e3cc..bf44ccea2a157 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_cond_sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_cond_sub.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX12
-define i32 @global_atomic_cond_sub(ptr addrspace(1) %ptr, i32 %data) {
-; GFX12-LABEL: global_atomic_cond_sub:
+define i32 @global_atomic_usub_cond(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_usub_cond:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
@@ -14,12 +14,12 @@ define i32 @global_atomic_cond_sub(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
- %ret = atomicrmw cond_sub ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
ret i32 %ret
}
-define i32 @global_atomic_cond_sub_offset(ptr addrspace(1) %ptr, i32 %data) {
-; GFX12-LABEL: global_atomic_cond_sub_offset:
+define i32 @global_atomic_usub_cond_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_usub_cond_offset:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
@@ -32,12 +32,12 @@ define i32 @global_atomic_cond_sub_offset(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
ret i32 %ret
}
-define void @global_atomic_cond_sub_nortn(ptr addrspace(1) %ptr, i32 %data) {
-; GFX12-LABEL: global_atomic_cond_sub_nortn:
+define void @global_atomic_usub_cond_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_usub_cond_nortn:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
@@ -49,12 +49,12 @@ define void @global_atomic_cond_sub_nortn(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
- %ret = atomicrmw cond_sub ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_cond ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
ret void
}
-define void @global_atomic_cond_sub_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
-; GFX12-LABEL: global_atomic_cond_sub_offset_nortn:
+define void @global_atomic_usub_cond_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_usub_cond_offset_nortn:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
@@ -67,12 +67,12 @@ define void @global_atomic_cond_sub_offset_nortn(ptr addrspace(1) %ptr, i32 %dat
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
ret void
}
-define amdgpu_kernel void @global_atomic_cond_sub_sgpr_base_offset(ptr addrspace(1) %ptr, i32 %data) {
-; GFX12-LABEL: global_atomic_cond_sub_sgpr_base_offset:
+define amdgpu_kernel void @global_atomic_usub_cond_sgpr_base_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_usub_cond_sgpr_base_offset:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
@@ -85,13 +85,13 @@ define amdgpu_kernel void @global_atomic_cond_sub_sgpr_base_offset(ptr addrspace
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
store i32 %ret, ptr addrspace(1) undef
ret void
}
-define amdgpu_kernel void @global_atomic_cond_sub_sgpr_base_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
-; GFX12-LABEL: global_atomic_cond_sub_sgpr_base_offset_nortn:
+define amdgpu_kernel void @global_atomic_usub_cond_sgpr_base_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX12-LABEL: global_atomic_usub_cond_sgpr_base_offset_nortn:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
@@ -101,7 +101,7 @@ define amdgpu_kernel void @global_atomic_cond_sub_sgpr_base_offset_nortn(ptr add
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_sub_clamp.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_sub_clamp.ll
index 1cd6dcb193964..22575f0f8b89e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_sub_clamp.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_sub_clamp.ll
@@ -4,8 +4,8 @@
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX11
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX12
-define i32 @global_atomic_sub_clamp(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_sub_clamp:
+define i32 @global_atomic_usub_sat(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_usub_sat:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
@@ -15,7 +15,7 @@ define i32 @global_atomic_sub_clamp(ptr addrspace(1) %ptr, i32 %data) {
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_atomic_sub_clamp:
+; GFX11-LABEL: global_atomic_usub_sat:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
@@ -25,7 +25,7 @@ define i32 @global_atomic_sub_clamp(ptr addrspace(1) %ptr, i32 %data) {
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: global_atomic_sub_clamp:
+; GFX12-LABEL: global_atomic_usub_sat:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
@@ -37,12 +37,12 @@ define i32 @global_atomic_sub_clamp(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
- %ret = atomicrmw sub_clamp ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
ret i32 %ret
}
-define i32 @global_atomic_sub_clamp_offset(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_sub_clamp_offset:
+define i32 @global_atomic_usub_sat_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_usub_sat_offset:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
@@ -54,7 +54,7 @@ define i32 @global_atomic_sub_clamp_offset(ptr addrspace(1) %ptr, i32 %data) {
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_atomic_sub_clamp_offset:
+; GFX11-LABEL: global_atomic_usub_sat_offset:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
@@ -66,7 +66,7 @@ define i32 @global_atomic_sub_clamp_offset(ptr addrspace(1) %ptr, i32 %data) {
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: global_atomic_sub_clamp_offset:
+; GFX12-LABEL: global_atomic_usub_sat_offset:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
@@ -79,12 +79,12 @@ define i32 @global_atomic_sub_clamp_offset(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = atomicrmw sub_clamp ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
ret i32 %ret
}
-define void @global_atomic_sub_clamp_nortn(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_sub_clamp_nortn:
+define void @global_atomic_usub_sat_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_usub_sat_nortn:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
@@ -94,7 +94,7 @@ define void @global_atomic_sub_clamp_nortn(ptr addrspace(1) %ptr, i32 %data) {
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_atomic_sub_clamp_nortn:
+; GFX11-LABEL: global_atomic_usub_sat_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
@@ -104,7 +104,7 @@ define void @global_atomic_sub_clamp_nortn(ptr addrspace(1) %ptr, i32 %data) {
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: global_atomic_sub_clamp_nortn:
+; GFX12-LABEL: global_atomic_usub_sat_nortn:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
@@ -116,12 +116,12 @@ define void @global_atomic_sub_clamp_nortn(ptr addrspace(1) %ptr, i32 %data) {
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
- %ret = atomicrmw sub_clamp ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, align 4
ret void
}
-define void @global_atomic_sub_clamp_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_sub_clamp_offset_nortn:
+define void @global_atomic_usub_sat_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_usub_sat_offset_nortn:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
@@ -133,7 +133,7 @@ define void @global_atomic_sub_clamp_offset_nortn(ptr addrspace(1) %ptr, i32 %da
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_atomic_sub_clamp_offset_nortn:
+; GFX11-LABEL: global_atomic_usub_sat_offset_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v0
@@ -145,7 +145,7 @@ define void @global_atomic_sub_clamp_offset_nortn(ptr addrspace(1) %ptr, i32 %da
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: global_atomic_sub_clamp_offset_nortn:
+; GFX12-LABEL: global_atomic_usub_sat_offset_nortn:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: s_wait_expcnt 0x0
@@ -158,12 +158,12 @@ define void @global_atomic_sub_clamp_offset_nortn(ptr addrspace(1) %ptr, i32 %da
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = atomicrmw sub_clamp ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
ret void
}
-define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_sub_clamp_sgpr_base_offset:
+define amdgpu_kernel void @global_atomic_usub_sat_sgpr_base_offset(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_usub_sat_sgpr_base_offset:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: s_load_dword s2, s[4:5], 0x8
@@ -178,7 +178,7 @@ define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset(ptr addrspac
; GFX10-NEXT: global_store_dword v[0:1], v0, off
; GFX10-NEXT: s_endpgm
;
-; GFX11-LABEL: global_atomic_sub_clamp_sgpr_base_offset:
+; GFX11-LABEL: global_atomic_usub_sat_sgpr_base_offset:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8
@@ -194,7 +194,7 @@ define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset(ptr addrspac
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
-; GFX12-LABEL: global_atomic_sub_clamp_sgpr_base_offset:
+; GFX12-LABEL: global_atomic_usub_sat_sgpr_base_offset:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
@@ -207,13 +207,13 @@ define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset(ptr addrspac
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = atomicrmw sub_clamp ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
store i32 %ret, ptr addrspace(1) undef
ret void
}
-define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
-; GFX10-LABEL: global_atomic_sub_clamp_sgpr_base_offset_nortn:
+define amdgpu_kernel void @global_atomic_usub_sat_sgpr_base_offset_nortn(ptr addrspace(1) %ptr, i32 %data) {
+; GFX10-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: s_load_dword s2, s[4:5], 0x8
@@ -227,7 +227,7 @@ define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset_nortn(ptr ad
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
-; GFX11-LABEL: global_atomic_sub_clamp_sgpr_base_offset_nortn:
+; GFX11-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8
@@ -240,7 +240,7 @@ define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset_nortn(ptr ad
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
;
-; GFX12-LABEL: global_atomic_sub_clamp_sgpr_base_offset_nortn:
+; GFX12-LABEL: global_atomic_usub_sat_sgpr_base_offset_nortn:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
@@ -250,7 +250,7 @@ define amdgpu_kernel void @global_atomic_sub_clamp_sgpr_base_offset_nortn(ptr ad
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 1024
- %ret = atomicrmw sub_clamp ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
+ %ret = atomicrmw usub_sat ptr addrspace(1) %gep, i32 %data syncscope("agent") seq_cst, align 4
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll b/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
index 7685e0a58e7ce..470338cba89a5 100644
--- a/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll
@@ -2,8 +2,8 @@
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12-SDAG %s
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12-GISEL %s
-define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32(ptr %addr, i32 %in) {
-; GFX12-SDAG-LABEL: flat_atomic_cond_sub_no_rtn_u32:
+define amdgpu_kernel void @flat_atomic_usub_cond_no_rtn_u32(ptr %addr, i32 %in) {
+; GFX12-SDAG-LABEL: flat_atomic_usub_cond_no_rtn_u32:
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
@@ -14,7 +14,7 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32(ptr %addr, i32 %in) {
; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
; GFX12-SDAG-NEXT: s_endpgm
;
-; GFX12-GISEL-LABEL: flat_atomic_cond_sub_no_rtn_u32:
+; GFX12-GISEL-LABEL: flat_atomic_usub_cond_no_rtn_u32:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -26,12 +26,12 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32(ptr %addr, i32 %in) {
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %addr, i32 -4
- %unused = atomicrmw cond_sub ptr %gep, i32 %in seq_cst
+ %unused = atomicrmw usub_cond ptr %gep, i32 %in seq_cst
ret void
}
-define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32_forced(ptr %addr, i32 %in) "target-features"="+atomic-csub-no-rtn-insts" {
-; GFX12-SDAG-LABEL: flat_atomic_cond_sub_no_rtn_u32_forced:
+define amdgpu_kernel void @flat_atomic_usub_cond_no_rtn_u32_forced(ptr %addr, i32 %in) "target-features"="+atomic-csub-no-rtn-insts" {
+; GFX12-SDAG-LABEL: flat_atomic_usub_cond_no_rtn_u32_forced:
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
@@ -42,7 +42,7 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32_forced(ptr %addr, i32
; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
; GFX12-SDAG-NEXT: s_endpgm
;
-; GFX12-GISEL-LABEL: flat_atomic_cond_sub_no_rtn_u32_forced:
+; GFX12-GISEL-LABEL: flat_atomic_usub_cond_no_rtn_u32_forced:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -54,12 +54,12 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32_forced(ptr %addr, i32
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %addr, i32 -4
- %unused = atomicrmw cond_sub ptr %gep, i32 %in seq_cst
+ %unused = atomicrmw usub_cond ptr %gep, i32 %in seq_cst
ret void
}
-define amdgpu_kernel void @flat_atomic_cond_sub_rtn_u32(ptr %addr, i32 %in, ptr %use) {
-; GFX12-SDAG-LABEL: flat_atomic_cond_sub_rtn_u32:
+define amdgpu_kernel void @flat_atomic_usub_cond_rtn_u32(ptr %addr, i32 %in, ptr %use) {
+; GFX12-SDAG-LABEL: flat_atomic_usub_cond_rtn_u32:
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_clause 0x1
; GFX12-SDAG-NEXT: s_load_b96 s[4:6], s[0:1], 0x24
@@ -74,7 +74,7 @@ define amdgpu_kernel void @flat_atomic_cond_sub_rtn_u32(ptr %addr, i32 %in, ptr
; GFX12-SDAG-NEXT: flat_store_b32 v[0:1], v2
; GFX12-SDAG-NEXT: s_endpgm
;
-; GFX12-GISEL-LABEL: flat_atomic_cond_sub_rtn_u32:
+; GFX12-GISEL-LABEL: flat_atomic_usub_cond_rtn_u32:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_clause 0x1
; GFX12-GISEL-NEXT: s_load_b96 s[4:6], s[0:1], 0x24
@@ -90,13 +90,13 @@ define amdgpu_kernel void @flat_atomic_cond_sub_rtn_u32(ptr %addr, i32 %in, ptr
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %addr, i32 4
- %val = atomicrmw cond_sub ptr %gep, i32 %in seq_cst
+ %val = atomicrmw usub_cond ptr %gep, i32 %in seq_cst
store i32 %val, ptr %use
ret void
}
-define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32(ptr addrspace(1) %addr, i32 %in) {
-; GFX12-SDAG-LABEL: global_atomic_cond_sub_no_rtn_u32:
+define amdgpu_kernel void @global_atomic_usub_cond_no_rtn_u32(ptr addrspace(1) %addr, i32 %in) {
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_no_rtn_u32:
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
@@ -106,7 +106,7 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32(ptr addrspace(1) %a
; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
; GFX12-SDAG-NEXT: s_endpgm
;
-; GFX12-GISEL-LABEL: global_atomic_cond_sub_no_rtn_u32:
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_no_rtn_u32:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -117,12 +117,12 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32(ptr addrspace(1) %a
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %addr, i32 -4
- %unused = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %in seq_cst
+ %unused = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %in seq_cst
ret void
}
-define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32_forced(ptr addrspace(1) %addr, i32 %in) "target-features"="+atomic-csub-no-rtn-insts" {
-; GFX12-SDAG-LABEL: global_atomic_cond_sub_no_rtn_u32_forced:
+define amdgpu_kernel void @global_atomic_usub_cond_no_rtn_u32_forced(ptr addrspace(1) %addr, i32 %in) "target-features"="+atomic-csub-no-rtn-insts" {
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_no_rtn_u32_forced:
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
@@ -132,7 +132,7 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32_forced(ptr addrspac
; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SYS
; GFX12-SDAG-NEXT: s_endpgm
;
-; GFX12-GISEL-LABEL: global_atomic_cond_sub_no_rtn_u32_forced:
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_no_rtn_u32_forced:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -143,12 +143,12 @@ define amdgpu_kernel void @global_atomic_cond_sub_no_rtn_u32_forced(ptr addrspac
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %addr, i32 -4
- %unused = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %in seq_cst
+ %unused = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %in seq_cst
ret void
}
-define amdgpu_kernel void @global_atomic_cond_sub_rtn_u32(ptr addrspace(1) %addr, i32 %in, ptr addrspace(1) %use) {
-; GFX12-SDAG-LABEL: global_atomic_cond_sub_rtn_u32:
+define amdgpu_kernel void @global_atomic_usub_cond_rtn_u32(ptr addrspace(1) %addr, i32 %in, ptr addrspace(1) %use) {
+; GFX12-SDAG-LABEL: global_atomic_usub_cond_rtn_u32:
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b96 s[4:6], s[0:1], 0x24
; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 0
@@ -163,7 +163,7 @@ define amdgpu_kernel void @global_atomic_cond_sub_rtn_u32(ptr addrspace(1) %addr
; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX12-SDAG-NEXT: s_endpgm
;
-; GFX12-GISEL-LABEL: global_atomic_cond_sub_rtn_u32:
+; GFX12-GISEL-LABEL: global_atomic_usub_cond_rtn_u32:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_clause 0x1
; GFX12-GISEL-NEXT: s_load_b96 s[4:6], s[0:1], 0x24
@@ -179,13 +179,13 @@ define amdgpu_kernel void @global_atomic_cond_sub_rtn_u32(ptr addrspace(1) %addr
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %addr, i32 4
- %val = atomicrmw cond_sub ptr addrspace(1) %gep, i32 %in seq_cst
+ %val = atomicrmw usub_cond ptr addrspace(1) %gep, i32 %in seq_cst
store i32 %val, ptr addrspace(1) %use
ret void
}
-define amdgpu_kernel void @ds_cond_sub_no_rtn_u32(ptr addrspace(3) %addr, i32 %in) {
-; GFX12-SDAG-LABEL: ds_cond_sub_no_rtn_u32:
+define amdgpu_kernel void @ds_usub_cond_no_rtn_u32(ptr addrspace(3) %addr, i32 %in) {
+; GFX12-SDAG-LABEL: ds_usub_cond_no_rtn_u32:
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
@@ -197,7 +197,7 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32(ptr addrspace(3) %addr, i32 %i
; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SE
; GFX12-SDAG-NEXT: s_endpgm
;
-; GFX12-GISEL-LABEL: ds_cond_sub_no_rtn_u32:
+; GFX12-GISEL-LABEL: ds_usub_cond_no_rtn_u32:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -210,12 +210,12 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32(ptr addrspace(3) %addr, i32 %i
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(3) %addr, i32 -4
- %unused = atomicrmw cond_sub ptr addrspace(3) %gep, i32 %in seq_cst
+ %unused = atomicrmw usub_cond ptr addrspace(3) %gep, i32 %in seq_cst
ret void
}
-define amdgpu_kernel void @ds_cond_sub_no_rtn_u32_forced(ptr addrspace(3) %addr, i32 %in) "target-features"="+atomic-csub-no-rtn-insts" {
-; GFX12-SDAG-LABEL: ds_cond_sub_no_rtn_u32_forced:
+define amdgpu_kernel void @ds_usub_cond_no_rtn_u32_forced(ptr addrspace(3) %addr, i32 %in) "target-features"="+atomic-csub-no-rtn-insts" {
+; GFX12-SDAG-LABEL: ds_usub_cond_no_rtn_u32_forced:
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
@@ -227,7 +227,7 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32_forced(ptr addrspace(3) %addr,
; GFX12-SDAG-NEXT: global_inv scope:SCOPE_SE
; GFX12-SDAG-NEXT: s_endpgm
;
-; GFX12-GISEL-LABEL: ds_cond_sub_no_rtn_u32_forced:
+; GFX12-GISEL-LABEL: ds_usub_cond_no_rtn_u32_forced:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -240,12 +240,12 @@ define amdgpu_kernel void @ds_cond_sub_no_rtn_u32_forced(ptr addrspace(3) %addr,
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(3) %addr, i32 -4
- %unused = atomicrmw cond_sub ptr addrspace(3) %gep, i32 %in seq_cst
+ %unused = atomicrmw usub_cond ptr addrspace(3) %gep, i32 %in seq_cst
ret void
}
-define amdgpu_kernel void @ds_cond_sub_rtn_u32(ptr addrspace(3) %addr, i32 %in, ptr addrspace(3) %use) {
-; GFX12-SDAG-LABEL: ds_cond_sub_rtn_u32:
+define amdgpu_kernel void @ds_usub_cond_rtn_u32(ptr addrspace(3) %addr, i32 %in, ptr addrspace(3) %use) {
+; GFX12-SDAG-LABEL: ds_usub_cond_rtn_u32:
; GFX12-SDAG: ; %bb.0: ; %entry
; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
@@ -257,7 +257,7 @@ define amdgpu_kernel void @ds_cond_sub_rtn_u32(ptr addrspace(3) %addr, i32 %in,
; GFX12-SDAG-NEXT: ds_store_b32 v1, v0
; GFX12-SDAG-NEXT: s_endpgm
;
-; GFX12-GISEL-LABEL: ds_cond_sub_rtn_u32:
+; GFX12-GISEL-LABEL: ds_usub_cond_rtn_u32:
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -270,7 +270,7 @@ define amdgpu_kernel void @ds_cond_sub_rtn_u32(ptr addrspace(3) %addr, i32 %in,
; GFX12-GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(3) %addr, i32 4
- %val = atomicrmw cond_sub ptr addrspace(3) %gep, i32 %in seq_cst
+ %val = atomicrmw usub_cond ptr addrspace(3) %gep, i32 %in seq_cst
store i32 %val, ptr addrspace(3) %use
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
index c679e3b57bc2a..8a104d7539a8e 100644
--- a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
+++ b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx1030.ll
@@ -13,7 +13,7 @@ define amdgpu_kernel void @test_sink_small_offset_global_atomic_csub_i32(ptr add
; OPT-NEXT: br i1 [[CMP]], label [[ENDIF:%.*]], label [[IF:%.*]]
; OPT: if:
; OPT-NEXT: [[IN_GEP:%.*]] = getelementptr i32, ptr addrspace(1) [[IN:%.*]], i32 7
-; OPT-NEXT: [[VAL:%.*]] = atomicrmw sub_clamp ptr addrspace(1) [[IN_GEP]], i32 2 seq_cst, align 4
+; OPT-NEXT: [[VAL:%.*]] = atomicrmw usub_sat ptr addrspace(1) [[IN_GEP]], i32 2 seq_cst, align 4
; OPT-NEXT: br label [[ENDIF]]
; OPT: endif:
; OPT-NEXT: [[X:%.*]] = phi i32 [ [[VAL]], [[IF]] ], [ 0, [[ENTRY:%.*]] ]
@@ -52,7 +52,7 @@ entry:
if:
%in.gep = getelementptr i32, ptr addrspace(1) %in, i32 7
- %val = atomicrmw sub_clamp ptr addrspace(1) %in.gep, i32 2 seq_cst
+ %val = atomicrmw usub_sat ptr addrspace(1) %in.gep, i32 2 seq_cst
br label %endif
endif:
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
index f2e889f73314e..bd0fd79bab834 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx1030.ll
@@ -15,7 +15,7 @@ define amdgpu_ps float @global_csub_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GCN-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw sub_clamp ptr addrspace(1) %gep0, i32 %data seq_cst
+ %rtn = atomicrmw usub_sat ptr addrspace(1) %gep0, i32 %data seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -31,7 +31,7 @@ define amdgpu_ps float @global_csub_saddr_i32_rtn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw sub_clamp ptr addrspace(1) %gep1, i32 %data seq_cst
+ %rtn = atomicrmw usub_sat ptr addrspace(1) %gep1, i32 %data seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -46,7 +46,7 @@ define amdgpu_ps void @global_csub_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GCN-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw sub_clamp ptr addrspace(1) %gep0, i32 %data seq_cst
+ %unused = atomicrmw usub_sat ptr addrspace(1) %gep0, i32 %data seq_cst
ret void
}
@@ -61,7 +61,7 @@ define amdgpu_ps void @global_csub_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw sub_clamp ptr addrspace(1) %gep1, i32 %data seq_cst
+ %unused = atomicrmw usub_sat ptr addrspace(1) %gep1, i32 %data seq_cst
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
index 93ae2b6391d81..245a0abe4215d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.csub.ll
@@ -7,7 +7,7 @@
; GFX12PLUS: global_atomic_sub_clamp_u32 v0, v0, v1, s[0:1] th:TH_ATOMIC_RETURN
define amdgpu_kernel void @global_atomic_csub_rtn(ptr addrspace(1) %ptr, i32 %data) {
main_body:
- %ret = atomicrmw sub_clamp ptr addrspace(1) %ptr, i32 %data seq_cst
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data seq_cst
ret void
}
@@ -16,7 +16,7 @@ main_body:
; GFX12PLUS: global_atomic_sub_clamp_u32 v0, v1, s[0:1]
define amdgpu_kernel void @global_atomic_csub_no_rtn(ptr addrspace(1) %ptr, i32 %data) #0 {
main_body:
- %ret = atomicrmw sub_clamp ptr addrspace(1) %ptr, i32 %data seq_cst
+ %ret = atomicrmw usub_sat ptr addrspace(1) %ptr, i32 %data seq_cst
ret void
}
@@ -26,7 +26,7 @@ main_body:
define amdgpu_kernel void @global_atomic_csub_off4_rtn(ptr addrspace(1) %ptr, i32 %data) {
main_body:
%p = getelementptr i32, ptr addrspace(1) %ptr, i64 1
- %ret = atomicrmw sub_clamp ptr addrspace(1) %p, i32 %data seq_cst
+ %ret = atomicrmw usub_sat ptr addrspace(1) %p, i32 %data seq_cst
ret void
}
@@ -36,7 +36,7 @@ main_body:
define amdgpu_kernel void @global_atomic_csub_off4_no_rtn(ptr addrspace(1) %ptr, i32 %data) #0 {
main_body:
%p = getelementptr i32, ptr addrspace(1) %ptr, i64 1
- %ret = atomicrmw sub_clamp ptr addrspace(1) %p, i32 %data seq_cst
+ %ret = atomicrmw usub_sat ptr addrspace(1) %p, i32 %data seq_cst
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
index 3f0b6d08e36e8..c6229c22f53c5 100644
--- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -633,8 +633,8 @@ define i32 @atomicrmw_dec_private_i32(ptr addrspace(5) %ptr) {
ret i32 %result
}
-define i32 @atomicrmw_cond_sub_private_i32(ptr addrspace(5) %ptr) {
-; IR-LABEL: define i32 @atomicrmw_cond_sub_private_i32(
+define i32 @atomicrmw_usub_cond_private_i32(ptr addrspace(5) %ptr) {
+; IR-LABEL: define i32 @atomicrmw_usub_cond_private_i32(
; IR-SAME: ptr addrspace(5) [[PTR:%.*]]) #[[ATTR0]] {
; IR-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[PTR]], align 4
; IR-NEXT: [[TMP2:%.*]] = icmp uge i32 [[TMP1]], 4
@@ -643,7 +643,7 @@ define i32 @atomicrmw_cond_sub_private_i32(ptr addrspace(5) %ptr) {
; IR-NEXT: store i32 [[NEW]], ptr addrspace(5) [[PTR]], align 4
; IR-NEXT: ret i32 [[TMP1]]
;
-; GCN-LABEL: atomicrmw_cond_sub_private_i32:
+; GCN-LABEL: atomicrmw_usub_cond_private_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
@@ -655,12 +655,12 @@ define i32 @atomicrmw_cond_sub_private_i32(ptr addrspace(5) %ptr) {
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
- %result = atomicrmw cond_sub ptr addrspace(5) %ptr, i32 4 seq_cst
+ %result = atomicrmw usub_cond ptr addrspace(5) %ptr, i32 4 seq_cst
ret i32 %result
}
-define i32 @atomicrmw_sub_clamp_private_i32(ptr addrspace(5) %ptr) {
-; IR-LABEL: define i32 @atomicrmw_sub_clamp_private_i32(
+define i32 @atomicrmw_usub_sat_private_i32(ptr addrspace(5) %ptr) {
+; IR-LABEL: define i32 @atomicrmw_usub_sat_private_i32(
; IR-SAME: ptr addrspace(5) [[PTR:%.*]]) #[[ATTR0]] {
; IR-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[PTR]], align 4
; IR-NEXT: [[TMP2:%.*]] = icmp uge i32 [[TMP1]], 4
@@ -669,7 +669,7 @@ define i32 @atomicrmw_sub_clamp_private_i32(ptr addrspace(5) %ptr) {
; IR-NEXT: store i32 [[NEW]], ptr addrspace(5) [[PTR]], align 4
; IR-NEXT: ret i32 [[TMP1]]
;
-; GCN-LABEL: atomicrmw_sub_clamp_private_i32:
+; GCN-LABEL: atomicrmw_usub_sat_private_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
@@ -681,6 +681,6 @@ define i32 @atomicrmw_sub_clamp_private_i32(ptr addrspace(5) %ptr) {
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
- %result = atomicrmw sub_clamp ptr addrspace(5) %ptr, i32 4 seq_cst
+ %result = atomicrmw usub_sat ptr addrspace(5) %ptr, i32 4 seq_cst
ret i32 %result
}
diff --git a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
index f948d4bd3280a..2beef9fd8e718 100644
--- a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_csub.ll
@@ -12,7 +12,7 @@ define i32 @shl_base_atomicrmw_global_atomic_csub_ptr(ptr addrspace(1) %out, ptr
%cast = ptrtoint ptr addrspace(1) %arrayidx0 to i64
%shl = shl i64 %cast, 2
%castback = inttoptr i64 %shl to ptr addrspace(1)
- %val = atomicrmw sub_clamp ptr addrspace(1) %castback, i32 43 seq_cst
+ %val = atomicrmw usub_sat ptr addrspace(1) %castback, i32 43 seq_cst
store volatile i64 %cast, ptr addrspace(1) %extra.use, align 4
ret i32 %val
}
diff --git a/llvm/test/CodeGen/ARM/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/ARM/atomicrmw-cond-sub-clamp.ll
index 3364b2050dcc6..a3449f233d9ae 100644
--- a/llvm/test/CodeGen/ARM/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/ARM/atomicrmw-cond-sub-clamp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 < %s | FileCheck %s
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i8:
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i8:
; CHECK: @ %bb.0:
; CHECK-NEXT: dmb ish
; CHECK-NEXT: .LBB0_1: @ %atomicrmw.start
@@ -19,12 +19,12 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: dmb ish
; CHECK-NEXT: bx lr
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i16:
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i16:
; CHECK: @ %bb.0:
; CHECK-NEXT: dmb ish
; CHECK-NEXT: .LBB1_1: @ %atomicrmw.start
@@ -41,12 +41,12 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: dmb ish
; CHECK-NEXT: bx lr
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i32:
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i32:
; CHECK: @ %bb.0:
; CHECK-NEXT: dmb ish
; CHECK-NEXT: .LBB2_1: @ %atomicrmw.start
@@ -61,12 +61,12 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: dmb ish
; CHECK-NEXT: bx lr
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i64:
+define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i64:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
@@ -89,12 +89,12 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: mov r1, r5
; CHECK-NEXT: dmb ish
; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i8:
; CHECK: @ %bb.0:
; CHECK-NEXT: dmb ish
; CHECK-NEXT: .LBB4_1: @ %atomicrmw.start
@@ -111,12 +111,12 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: dmb ish
; CHECK-NEXT: bx lr
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i16:
; CHECK: @ %bb.0:
; CHECK-NEXT: dmb ish
; CHECK-NEXT: .LBB5_1: @ %atomicrmw.start
@@ -133,12 +133,12 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: dmb ish
; CHECK-NEXT: bx lr
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i32:
; CHECK: @ %bb.0:
; CHECK-NEXT: dmb ish
; CHECK-NEXT: .LBB6_1: @ %atomicrmw.start
@@ -153,12 +153,12 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: dmb ish
; CHECK-NEXT: bx lr
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i64:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
@@ -181,6 +181,6 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: mov r1, r5
; CHECK-NEXT: dmb ish
; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/Hexagon/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/Hexagon/atomicrmw-cond-sub-clamp.ll
index 23d5d2f8a72e8..19a67df330e8d 100644
--- a/llvm/test/CodeGen/Hexagon/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/Hexagon/atomicrmw-cond-sub-clamp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=hexagon < %s | FileCheck %s
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i8:
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i8:
; CHECK: .cfi_startproc
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: {
@@ -52,12 +52,12 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: r0 = r2
; CHECK-NEXT: jumpr r31
; CHECK-NEXT: }
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i16:
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i16:
; CHECK: .cfi_startproc
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: {
@@ -107,12 +107,12 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: r0 = r2
; CHECK-NEXT: jumpr r31
; CHECK-NEXT: }
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i32:
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i32:
; CHECK: .cfi_startproc
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: .p2align 4
@@ -137,12 +137,12 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: r0 = r2
; CHECK-NEXT: jumpr r31
; CHECK-NEXT: }
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i64:
+define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i64:
; CHECK: .cfi_startproc
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: .p2align 4
@@ -172,12 +172,12 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: r1:0 = combine(r5,r4)
; CHECK-NEXT: jumpr r31
; CHECK-NEXT: }
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i8:
; CHECK: .cfi_startproc
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: {
@@ -227,12 +227,12 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: r0 = r2
; CHECK-NEXT: jumpr r31
; CHECK-NEXT: }
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i16:
; CHECK: .cfi_startproc
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: {
@@ -282,12 +282,12 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: r0 = r2
; CHECK-NEXT: jumpr r31
; CHECK-NEXT: }
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i32:
; CHECK: .cfi_startproc
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: .p2align 4
@@ -312,12 +312,12 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: r0 = r2
; CHECK-NEXT: jumpr r31
; CHECK-NEXT: }
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i64:
; CHECK: .cfi_startproc
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: {
@@ -350,6 +350,6 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: r1:0 = combine(r5,r4)
; CHECK-NEXT: jumpr r31
; CHECK-NEXT: }
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/LoongArch/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/LoongArch/atomicrmw-cond-sub-clamp.ll
index 154e057c2dc57..72d4a68c3945a 100644
--- a/llvm/test/CodeGen/LoongArch/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/LoongArch/atomicrmw-cond-sub-clamp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch64 < %s | FileCheck --check-prefix=LA64 %s
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; LA64-LABEL: atomicrmw_cond_sub_i8:
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; LA64-LABEL: atomicrmw_usub_cond_i8:
; LA64: # %bb.0:
; LA64-NEXT: slli.d $a4, $a0, 3
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
@@ -49,12 +49,12 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; LA64-NEXT: # %bb.2: # %atomicrmw.end
; LA64-NEXT: srl.w $a0, $a3, $a2
; LA64-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; LA64-LABEL: atomicrmw_cond_sub_i16:
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; LA64-LABEL: atomicrmw_usub_cond_i16:
; LA64: # %bb.0:
; LA64-NEXT: slli.d $a4, $a0, 3
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
@@ -102,12 +102,12 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; LA64-NEXT: # %bb.2: # %atomicrmw.end
; LA64-NEXT: srl.w $a0, $a3, $a2
; LA64-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; LA64-LABEL: atomicrmw_cond_sub_i32:
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; LA64-LABEL: atomicrmw_usub_cond_i32:
; LA64: # %bb.0:
; LA64-NEXT: ld.w $a2, $a0, 0
; LA64-NEXT: addi.w $a3, $a1, 0
@@ -142,12 +142,12 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; LA64-NEXT: # %bb.2: # %atomicrmw.end
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; LA64-LABEL: atomicrmw_cond_sub_i64:
+define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
+; LA64-LABEL: atomicrmw_usub_cond_i64:
; LA64: # %bb.0:
; LA64-NEXT: ld.d $a2, $a0, 0
; LA64-NEXT: .p2align 4, , 16
@@ -181,12 +181,12 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; LA64-NEXT: # %bb.2: # %atomicrmw.end
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; LA64-LABEL: atomicrmw_sub_clamp_i8:
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; LA64-LABEL: atomicrmw_usub_sat_i8:
; LA64: # %bb.0:
; LA64-NEXT: slli.d $a4, $a0, 3
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
@@ -231,12 +231,12 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; LA64-NEXT: # %bb.2: # %atomicrmw.end
; LA64-NEXT: srl.w $a0, $a3, $a2
; LA64-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; LA64-LABEL: atomicrmw_sub_clamp_i16:
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; LA64-LABEL: atomicrmw_usub_sat_i16:
; LA64: # %bb.0:
; LA64-NEXT: slli.d $a4, $a0, 3
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
@@ -282,12 +282,12 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; LA64-NEXT: # %bb.2: # %atomicrmw.end
; LA64-NEXT: srl.w $a0, $a3, $a2
; LA64-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; LA64-LABEL: atomicrmw_sub_clamp_i32:
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; LA64-LABEL: atomicrmw_usub_sat_i32:
; LA64: # %bb.0:
; LA64-NEXT: ld.w $a2, $a0, 0
; LA64-NEXT: addi.w $a3, $a1, 0
@@ -320,12 +320,12 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; LA64-NEXT: # %bb.2: # %atomicrmw.end
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; LA64-LABEL: atomicrmw_sub_clamp_i64:
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; LA64-LABEL: atomicrmw_usub_sat_i64:
; LA64: # %bb.0:
; LA64-NEXT: ld.d $a2, $a0, 0
; LA64-NEXT: .p2align 4, , 16
@@ -357,6 +357,6 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; LA64-NEXT: # %bb.2: # %atomicrmw.end
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll
index 4ed8ff788597b..e9d598092411f 100644
--- a/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i8:
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: mr 5, 3
@@ -54,12 +54,12 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: .LBB0_8: # %atomicrmw.end
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i16:
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: mr 5, 3
@@ -112,12 +112,12 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: .LBB1_8: # %atomicrmw.end
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i32:
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: lwz 6, 0(3)
@@ -153,12 +153,12 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i64:
+define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: ld 6, 0(3)
@@ -194,12 +194,12 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: mr 5, 3
@@ -251,12 +251,12 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: .LBB4_8: # %atomicrmw.end
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: mr 5, 3
@@ -309,12 +309,12 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: .LBB5_8: # %atomicrmw.end
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: lwz 6, 0(3)
@@ -350,12 +350,12 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: ld 6, 0(3)
@@ -391,6 +391,6 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
index 45f8120e0d10b..84a763eb68f0c 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
@@ -13,8 +13,8 @@
; RUN: | FileCheck -check-prefix=RV64IA %s
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; RV32I-LABEL: atomicrmw_cond_sub_i8:
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; RV32I-LABEL: atomicrmw_usub_cond_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: .cfi_def_cfa_offset 32
@@ -60,7 +60,7 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
-; RV32IA-LABEL: atomicrmw_cond_sub_i8:
+; RV32IA-LABEL: atomicrmw_usub_cond_i8:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a3, a0, 3
@@ -106,7 +106,7 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; RV32IA-NEXT: srl a0, a6, a0
; RV32IA-NEXT: ret
;
-; RV64I-LABEL: atomicrmw_cond_sub_i8:
+; RV64I-LABEL: atomicrmw_usub_cond_i8:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: .cfi_def_cfa_offset 48
@@ -152,7 +152,7 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
-; RV64IA-LABEL: atomicrmw_cond_sub_i8:
+; RV64IA-LABEL: atomicrmw_usub_cond_i8:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a4, a0, 3
@@ -197,12 +197,12 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; RV64IA-NEXT: .LBB0_5: # %atomicrmw.end
; RV64IA-NEXT: srlw a0, a3, a0
; RV64IA-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; RV32I-LABEL: atomicrmw_cond_sub_i16:
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; RV32I-LABEL: atomicrmw_usub_cond_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: .cfi_def_cfa_offset 32
@@ -253,7 +253,7 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
-; RV32IA-LABEL: atomicrmw_cond_sub_i16:
+; RV32IA-LABEL: atomicrmw_usub_cond_i16:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a4, a0, 3
@@ -300,7 +300,7 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; RV32IA-NEXT: srl a0, a7, a0
; RV32IA-NEXT: ret
;
-; RV64I-LABEL: atomicrmw_cond_sub_i16:
+; RV64I-LABEL: atomicrmw_usub_cond_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: .cfi_def_cfa_offset 48
@@ -351,7 +351,7 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
-; RV64IA-LABEL: atomicrmw_cond_sub_i16:
+; RV64IA-LABEL: atomicrmw_usub_cond_i16:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a5, a0, 3
@@ -397,12 +397,12 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; RV64IA-NEXT: .LBB1_5: # %atomicrmw.end
; RV64IA-NEXT: srlw a0, a4, a0
; RV64IA-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; RV32I-LABEL: atomicrmw_cond_sub_i32:
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; RV32I-LABEL: atomicrmw_usub_cond_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: .cfi_def_cfa_offset 16
@@ -443,7 +443,7 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
-; RV32IA-LABEL: atomicrmw_cond_sub_i32:
+; RV32IA-LABEL: atomicrmw_usub_cond_i32:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a2, 0(a0)
; RV32IA-NEXT: j .LBB2_2
@@ -482,7 +482,7 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; RV32IA-NEXT: mv a0, a2
; RV32IA-NEXT: ret
;
-; RV64I-LABEL: atomicrmw_cond_sub_i32:
+; RV64I-LABEL: atomicrmw_usub_cond_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: .cfi_def_cfa_offset 48
@@ -527,7 +527,7 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
-; RV64IA-LABEL: atomicrmw_cond_sub_i32:
+; RV64IA-LABEL: atomicrmw_usub_cond_i32:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lw a2, 0(a0)
; RV64IA-NEXT: sext.w a3, a1
@@ -566,12 +566,12 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; RV64IA-NEXT: .LBB2_4: # %atomicrmw.end
; RV64IA-NEXT: mv a0, a2
; RV64IA-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; RV32I-LABEL: atomicrmw_cond_sub_i64:
+define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
+; RV32I-LABEL: atomicrmw_usub_cond_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: .cfi_def_cfa_offset 32
@@ -631,7 +631,7 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
-; RV32IA-LABEL: atomicrmw_cond_sub_i64:
+; RV32IA-LABEL: atomicrmw_usub_cond_i64:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
; RV32IA-NEXT: .cfi_def_cfa_offset 32
@@ -691,7 +691,7 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
-; RV64I-LABEL: atomicrmw_cond_sub_i64:
+; RV64I-LABEL: atomicrmw_usub_cond_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
; RV64I-NEXT: .cfi_def_cfa_offset 32
@@ -732,7 +732,7 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
-; RV64IA-LABEL: atomicrmw_cond_sub_i64:
+; RV64IA-LABEL: atomicrmw_usub_cond_i64:
; RV64IA: # %bb.0:
; RV64IA-NEXT: ld a2, 0(a0)
; RV64IA-NEXT: j .LBB3_2
@@ -770,12 +770,12 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; RV64IA-NEXT: .LBB3_4: # %atomicrmw.end
; RV64IA-NEXT: mv a0, a2
; RV64IA-NEXT: ret
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; RV32I-LABEL: atomicrmw_sub_clamp_i8:
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; RV32I-LABEL: atomicrmw_usub_sat_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: .cfi_def_cfa_offset 32
@@ -815,7 +815,7 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
-; RV32IA-LABEL: atomicrmw_sub_clamp_i8:
+; RV32IA-LABEL: atomicrmw_usub_sat_i8:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a3, a0, 3
@@ -853,7 +853,7 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; RV32IA-NEXT: srl a0, a5, a0
; RV32IA-NEXT: ret
;
-; RV64I-LABEL: atomicrmw_sub_clamp_i8:
+; RV64I-LABEL: atomicrmw_usub_sat_i8:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: .cfi_def_cfa_offset 48
@@ -893,7 +893,7 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
-; RV64IA-LABEL: atomicrmw_sub_clamp_i8:
+; RV64IA-LABEL: atomicrmw_usub_sat_i8:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a4, a0, 3
@@ -930,12 +930,12 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
; RV64IA-NEXT: srlw a0, a3, a0
; RV64IA-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; RV32I-LABEL: atomicrmw_sub_clamp_i16:
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; RV32I-LABEL: atomicrmw_usub_sat_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: .cfi_def_cfa_offset 32
@@ -980,7 +980,7 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
-; RV32IA-LABEL: atomicrmw_sub_clamp_i16:
+; RV32IA-LABEL: atomicrmw_usub_sat_i16:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a4, a0, 3
@@ -1019,7 +1019,7 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; RV32IA-NEXT: srl a0, a6, a0
; RV32IA-NEXT: ret
;
-; RV64I-LABEL: atomicrmw_sub_clamp_i16:
+; RV64I-LABEL: atomicrmw_usub_sat_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: .cfi_def_cfa_offset 48
@@ -1064,7 +1064,7 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
-; RV64IA-LABEL: atomicrmw_sub_clamp_i16:
+; RV64IA-LABEL: atomicrmw_usub_sat_i16:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a5, a0, 3
@@ -1102,12 +1102,12 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
; RV64IA-NEXT: srlw a0, a4, a0
; RV64IA-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; RV32I-LABEL: atomicrmw_sub_clamp_i32:
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; RV32I-LABEL: atomicrmw_usub_sat_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: .cfi_def_cfa_offset 16
@@ -1142,7 +1142,7 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
-; RV32IA-LABEL: atomicrmw_sub_clamp_i32:
+; RV32IA-LABEL: atomicrmw_usub_sat_i32:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a2, 0(a0)
; RV32IA-NEXT: .LBB6_1: # %atomicrmw.start
@@ -1167,7 +1167,7 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; RV32IA-NEXT: mv a0, a2
; RV32IA-NEXT: ret
;
-; RV64I-LABEL: atomicrmw_sub_clamp_i32:
+; RV64I-LABEL: atomicrmw_usub_sat_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: .cfi_def_cfa_offset 48
@@ -1206,7 +1206,7 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
-; RV64IA-LABEL: atomicrmw_sub_clamp_i32:
+; RV64IA-LABEL: atomicrmw_usub_sat_i32:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lw a2, 0(a0)
; RV64IA-NEXT: sext.w a3, a1
@@ -1231,12 +1231,12 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
; RV64IA-NEXT: mv a0, a2
; RV64IA-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; RV32I-LABEL: atomicrmw_sub_clamp_i64:
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; RV32I-LABEL: atomicrmw_usub_sat_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: .cfi_def_cfa_offset 32
@@ -1292,7 +1292,7 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
-; RV32IA-LABEL: atomicrmw_sub_clamp_i64:
+; RV32IA-LABEL: atomicrmw_usub_sat_i64:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
; RV32IA-NEXT: .cfi_def_cfa_offset 32
@@ -1348,7 +1348,7 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
-; RV64I-LABEL: atomicrmw_sub_clamp_i64:
+; RV64I-LABEL: atomicrmw_usub_sat_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
; RV64I-NEXT: .cfi_def_cfa_offset 32
@@ -1383,7 +1383,7 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
-; RV64IA-LABEL: atomicrmw_sub_clamp_i64:
+; RV64IA-LABEL: atomicrmw_usub_sat_i64:
; RV64IA: # %bb.0:
; RV64IA-NEXT: ld a2, 0(a0)
; RV64IA-NEXT: .LBB7_1: # %atomicrmw.start
@@ -1407,6 +1407,6 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
; RV64IA-NEXT: mv a0, a2
; RV64IA-NEXT: ret
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll
index ec8ad74d69479..e092facdc5fea 100644
--- a/llvm/test/CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/SPARC/atomicrmw-cond-sub-clamp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=sparc -mcpu=v9 < %s | FileCheck %s
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i8:
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i8:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
@@ -40,12 +40,12 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
; CHECK-NEXT: retl
; CHECK-NEXT: nop
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i16:
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i16:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
@@ -84,12 +84,12 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
; CHECK-NEXT: retl
; CHECK-NEXT: nop
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i32:
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i32:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
@@ -112,12 +112,12 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
; CHECK-NEXT: retl
; CHECK-NEXT: mov %o2, %o0
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i64:
+define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i64:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
; CHECK-NEXT: save %sp, -104, %sp
@@ -157,12 +157,12 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: mov %g2, %i0
; CHECK-NEXT: ret
; CHECK-NEXT: restore %g0, %g3, %o1
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i8:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
@@ -200,12 +200,12 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
; CHECK-NEXT: retl
; CHECK-NEXT: nop
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i16:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
; CHECK-NEXT: save %sp, -96, %sp
@@ -248,12 +248,12 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
; CHECK-NEXT: ret
; CHECK-NEXT: restore
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i32:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
@@ -276,12 +276,12 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
; CHECK-NEXT: retl
; CHECK-NEXT: mov %o2, %o0
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i64:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
; CHECK-NEXT: save %sp, -104, %sp
@@ -321,6 +321,6 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: mov %g2, %i0
; CHECK-NEXT: ret
; CHECK-NEXT: restore %g0, %g3, %o1
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/VE/Scalar/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/VE/Scalar/atomicrmw-cond-sub-clamp.ll
index 85774dde7bb39..58316c8032607 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomicrmw-cond-sub-clamp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=ve-unknown-unknown < %s | FileCheck %s
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i8:
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s1, %s1, (32)0
; CHECK-NEXT: fencem 3
@@ -35,12 +35,12 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: srl %s0, %s1, %s0
; CHECK-NEXT: fencem 3
; CHECK-NEXT: b.l.t (, %s10)
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i16:
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s1, %s1, (32)0
; CHECK-NEXT: fencem 3
@@ -73,12 +73,12 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: srl %s0, %s1, %s0
; CHECK-NEXT: fencem 3
; CHECK-NEXT: b.l.t (, %s10)
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i32:
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: fencem 3
; CHECK-NEXT: ldl.sx %s2, (, %s0)
@@ -96,12 +96,12 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: fencem 3
; CHECK-NEXT: or %s0, 0, %s2
; CHECK-NEXT: b.l.t (, %s10)
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i64:
+define i64 @atomicrmw_usub_cond_sub_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_cond_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: fencem 3
; CHECK-NEXT: ld %s2, (, %s0)
@@ -118,12 +118,12 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: fencem 3
; CHECK-NEXT: or %s0, 0, %s2
; CHECK-NEXT: b.l.t (, %s10)
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i8:
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s1, %s1, (32)0
; CHECK-NEXT: fencem 3
@@ -155,12 +155,12 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: srl %s0, %s1, %s0
; CHECK-NEXT: fencem 3
; CHECK-NEXT: b.l.t (, %s10)
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i16:
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s1, %s1, (32)0
; CHECK-NEXT: fencem 3
@@ -192,12 +192,12 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: srl %s0, %s1, %s0
; CHECK-NEXT: fencem 3
; CHECK-NEXT: b.l.t (, %s10)
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i32:
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: fencem 3
; CHECK-NEXT: ldl.sx %s2, (, %s0)
@@ -214,12 +214,12 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; CHECK-NEXT: fencem 3
; CHECK-NEXT: or %s0, 0, %s2
; CHECK-NEXT: b.l.t (, %s10)
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i64:
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; CHECK-LABEL: atomicrmw_usub_sat_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: fencem 3
; CHECK-NEXT: ld %s2, (, %s0)
@@ -235,6 +235,6 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: fencem 3
; CHECK-NEXT: or %s0, 0, %s2
; CHECK-NEXT: b.l.t (, %s10)
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/WebAssembly/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/WebAssembly/atomicrmw-cond-sub-clamp.ll
index e634b332c588b..3c105fcff09a9 100644
--- a/llvm/test/CodeGen/WebAssembly/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/WebAssembly/atomicrmw-cond-sub-clamp.ll
@@ -2,9 +2,9 @@
; RUN: llc -mtriple=wasm32-unknown-unknown < %s | FileCheck -check-prefix=WASM32 %s
; RUN: llc -mtriple=wasm64-unknown-unknown < %s | FileCheck -check-prefix=WASM64 %s
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; WASM32-LABEL: atomicrmw_cond_sub_i8:
-; WASM32: .functype atomicrmw_cond_sub_i8 (i32, i32) -> (i32)
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; WASM32-LABEL: atomicrmw_usub_cond_i8:
+; WASM32: .functype atomicrmw_usub_cond_i8 (i32, i32) -> (i32)
; WASM32-NEXT: .local i32
; WASM32-NEXT: # %bb.0:
; WASM32-NEXT: local.get 0
@@ -24,8 +24,8 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; WASM32-NEXT: local.get 2
; WASM32-NEXT: # fallthrough-return
;
-; WASM64-LABEL: atomicrmw_cond_sub_i8:
-; WASM64: .functype atomicrmw_cond_sub_i8 (i64, i32) -> (i32)
+; WASM64-LABEL: atomicrmw_usub_cond_i8:
+; WASM64: .functype atomicrmw_usub_cond_i8 (i64, i32) -> (i32)
; WASM64-NEXT: .local i32
; WASM64-NEXT: # %bb.0:
; WASM64-NEXT: local.get 0
@@ -44,13 +44,13 @@ define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
; WASM64-NEXT: i32.store8 0
; WASM64-NEXT: local.get 2
; WASM64-NEXT: # fallthrough-return
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; WASM32-LABEL: atomicrmw_cond_sub_i16:
-; WASM32: .functype atomicrmw_cond_sub_i16 (i32, i32) -> (i32)
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; WASM32-LABEL: atomicrmw_usub_cond_i16:
+; WASM32: .functype atomicrmw_usub_cond_i16 (i32, i32) -> (i32)
; WASM32-NEXT: .local i32
; WASM32-NEXT: # %bb.0:
; WASM32-NEXT: local.get 0
@@ -70,8 +70,8 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; WASM32-NEXT: local.get 2
; WASM32-NEXT: # fallthrough-return
;
-; WASM64-LABEL: atomicrmw_cond_sub_i16:
-; WASM64: .functype atomicrmw_cond_sub_i16 (i64, i32) -> (i32)
+; WASM64-LABEL: atomicrmw_usub_cond_i16:
+; WASM64: .functype atomicrmw_usub_cond_i16 (i64, i32) -> (i32)
; WASM64-NEXT: .local i32
; WASM64-NEXT: # %bb.0:
; WASM64-NEXT: local.get 0
@@ -90,13 +90,13 @@ define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
; WASM64-NEXT: i32.store16 0
; WASM64-NEXT: local.get 2
; WASM64-NEXT: # fallthrough-return
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; WASM32-LABEL: atomicrmw_cond_sub_i32:
-; WASM32: .functype atomicrmw_cond_sub_i32 (i32, i32) -> (i32)
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; WASM32-LABEL: atomicrmw_usub_cond_i32:
+; WASM32: .functype atomicrmw_usub_cond_i32 (i32, i32) -> (i32)
; WASM32-NEXT: .local i32
; WASM32-NEXT: # %bb.0:
; WASM32-NEXT: local.get 0
@@ -114,8 +114,8 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; WASM32-NEXT: local.get 2
; WASM32-NEXT: # fallthrough-return
;
-; WASM64-LABEL: atomicrmw_cond_sub_i32:
-; WASM64: .functype atomicrmw_cond_sub_i32 (i64, i32) -> (i32)
+; WASM64-LABEL: atomicrmw_usub_cond_i32:
+; WASM64: .functype atomicrmw_usub_cond_i32 (i64, i32) -> (i32)
; WASM64-NEXT: .local i32
; WASM64-NEXT: # %bb.0:
; WASM64-NEXT: local.get 0
@@ -132,13 +132,13 @@ define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
; WASM64-NEXT: i32.store 0
; WASM64-NEXT: local.get 2
; WASM64-NEXT: # fallthrough-return
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; WASM32-LABEL: atomicrmw_cond_sub_i64:
-; WASM32: .functype atomicrmw_cond_sub_i64 (i32, i64) -> (i64)
+define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
+; WASM32-LABEL: atomicrmw_usub_cond_i64:
+; WASM32: .functype atomicrmw_usub_cond_i64 (i32, i64) -> (i64)
; WASM32-NEXT: .local i64
; WASM32-NEXT: # %bb.0:
; WASM32-NEXT: local.get 0
@@ -156,8 +156,8 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; WASM32-NEXT: local.get 2
; WASM32-NEXT: # fallthrough-return
;
-; WASM64-LABEL: atomicrmw_cond_sub_i64:
-; WASM64: .functype atomicrmw_cond_sub_i64 (i64, i64) -> (i64)
+; WASM64-LABEL: atomicrmw_usub_cond_i64:
+; WASM64: .functype atomicrmw_usub_cond_i64 (i64, i64) -> (i64)
; WASM64-NEXT: .local i64
; WASM64-NEXT: # %bb.0:
; WASM64-NEXT: local.get 0
@@ -174,13 +174,13 @@ define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
; WASM64-NEXT: i64.store 0
; WASM64-NEXT: local.get 2
; WASM64-NEXT: # fallthrough-return
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; WASM32-LABEL: atomicrmw_sub_clamp_i8:
-; WASM32: .functype atomicrmw_sub_clamp_i8 (i32, i32) -> (i32)
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; WASM32-LABEL: atomicrmw_usub_sat_i8:
+; WASM32: .functype atomicrmw_usub_sat_i8 (i32, i32) -> (i32)
; WASM32-NEXT: .local i32
; WASM32-NEXT: # %bb.0:
; WASM32-NEXT: local.get 0
@@ -200,8 +200,8 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; WASM32-NEXT: local.get 2
; WASM32-NEXT: # fallthrough-return
;
-; WASM64-LABEL: atomicrmw_sub_clamp_i8:
-; WASM64: .functype atomicrmw_sub_clamp_i8 (i64, i32) -> (i32)
+; WASM64-LABEL: atomicrmw_usub_sat_i8:
+; WASM64: .functype atomicrmw_usub_sat_i8 (i64, i32) -> (i32)
; WASM64-NEXT: .local i32
; WASM64-NEXT: # %bb.0:
; WASM64-NEXT: local.get 0
@@ -220,13 +220,13 @@ define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
; WASM64-NEXT: i32.store8 0
; WASM64-NEXT: local.get 2
; WASM64-NEXT: # fallthrough-return
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; WASM32-LABEL: atomicrmw_sub_clamp_i16:
-; WASM32: .functype atomicrmw_sub_clamp_i16 (i32, i32) -> (i32)
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; WASM32-LABEL: atomicrmw_usub_sat_i16:
+; WASM32: .functype atomicrmw_usub_sat_i16 (i32, i32) -> (i32)
; WASM32-NEXT: .local i32
; WASM32-NEXT: # %bb.0:
; WASM32-NEXT: local.get 0
@@ -246,8 +246,8 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; WASM32-NEXT: local.get 2
; WASM32-NEXT: # fallthrough-return
;
-; WASM64-LABEL: atomicrmw_sub_clamp_i16:
-; WASM64: .functype atomicrmw_sub_clamp_i16 (i64, i32) -> (i32)
+; WASM64-LABEL: atomicrmw_usub_sat_i16:
+; WASM64: .functype atomicrmw_usub_sat_i16 (i64, i32) -> (i32)
; WASM64-NEXT: .local i32
; WASM64-NEXT: # %bb.0:
; WASM64-NEXT: local.get 0
@@ -266,13 +266,13 @@ define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
; WASM64-NEXT: i32.store16 0
; WASM64-NEXT: local.get 2
; WASM64-NEXT: # fallthrough-return
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; WASM32-LABEL: atomicrmw_sub_clamp_i32:
-; WASM32: .functype atomicrmw_sub_clamp_i32 (i32, i32) -> (i32)
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; WASM32-LABEL: atomicrmw_usub_sat_i32:
+; WASM32: .functype atomicrmw_usub_sat_i32 (i32, i32) -> (i32)
; WASM32-NEXT: .local i32
; WASM32-NEXT: # %bb.0:
; WASM32-NEXT: local.get 0
@@ -290,8 +290,8 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; WASM32-NEXT: local.get 2
; WASM32-NEXT: # fallthrough-return
;
-; WASM64-LABEL: atomicrmw_sub_clamp_i32:
-; WASM64: .functype atomicrmw_sub_clamp_i32 (i64, i32) -> (i32)
+; WASM64-LABEL: atomicrmw_usub_sat_i32:
+; WASM64: .functype atomicrmw_usub_sat_i32 (i64, i32) -> (i32)
; WASM64-NEXT: .local i32
; WASM64-NEXT: # %bb.0:
; WASM64-NEXT: local.get 0
@@ -308,13 +308,13 @@ define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
; WASM64-NEXT: i32.store 0
; WASM64-NEXT: local.get 2
; WASM64-NEXT: # fallthrough-return
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; WASM32-LABEL: atomicrmw_sub_clamp_i64:
-; WASM32: .functype atomicrmw_sub_clamp_i64 (i32, i64) -> (i64)
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; WASM32-LABEL: atomicrmw_usub_sat_i64:
+; WASM32: .functype atomicrmw_usub_sat_i64 (i32, i64) -> (i64)
; WASM32-NEXT: .local i64
; WASM32-NEXT: # %bb.0:
; WASM32-NEXT: local.get 0
@@ -332,8 +332,8 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; WASM32-NEXT: local.get 2
; WASM32-NEXT: # fallthrough-return
;
-; WASM64-LABEL: atomicrmw_sub_clamp_i64:
-; WASM64: .functype atomicrmw_sub_clamp_i64 (i64, i64) -> (i64)
+; WASM64-LABEL: atomicrmw_usub_sat_i64:
+; WASM64: .functype atomicrmw_usub_sat_i64 (i64, i64) -> (i64)
; WASM64-NEXT: .local i64
; WASM64-NEXT: # %bb.0:
; WASM64-NEXT: local.get 0
@@ -350,6 +350,6 @@ define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
; WASM64-NEXT: i64.store 0
; WASM64-NEXT: local.get 2
; WASM64-NEXT: # fallthrough-return
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
index 5c49107e0fdef..ada8c9fff0d11 100644
--- a/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
@@ -1,153 +1,413 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple x86_64-pc-linux < %s | FileCheck %s
+; RUN: llc -mtriple i686-pc-linux < %s | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc -mtriple x86_64-pc-linux < %s | FileCheck %s --check-prefix=CHECK-64
-define i8 @atomicrmw_cond_sub_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movzbl (%rdi), %eax
-; CHECK-NEXT: movzbl %sil, %ecx
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: subb %cl, %dl
-; CHECK-NEXT: movzbl %dl, %edx
-; CHECK-NEXT: cmovbl %ecx, %edx
-; CHECK-NEXT: lock cmpxchgb %dl, (%rdi)
-; CHECK-NEXT: jne .LBB0_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: retq
- %result = atomicrmw cond_sub ptr %ptr, i8 %val seq_cst
+define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
+; CHECK-32-LABEL: atomicrmw_usub_cond_i8:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT: movzbl (%edx), %eax
+; CHECK-32-NEXT: jmp .LBB0_1
+; CHECK-32-NEXT: .p2align 4, 0x90
+; CHECK-32-NEXT: .LBB0_3: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB0_1 Depth=1
+; CHECK-32-NEXT: lock cmpxchgb %ah, (%edx)
+; CHECK-32-NEXT: je .LBB0_4
+; CHECK-32-NEXT: .LBB0_1: # %atomicrmw.start
+; CHECK-32-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-32-NEXT: movb %al, %ah
+; CHECK-32-NEXT: subb %cl, %ah
+; CHECK-32-NEXT: jae .LBB0_3
+; CHECK-32-NEXT: # %bb.2: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB0_1 Depth=1
+; CHECK-32-NEXT: movb %cl, %ah
+; CHECK-32-NEXT: jmp .LBB0_3
+; CHECK-32-NEXT: .LBB0_4: # %atomicrmw.end
+; CHECK-32-NEXT: retl
+;
+; CHECK-64-LABEL: atomicrmw_usub_cond_i8:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: movzbl (%rdi), %eax
+; CHECK-64-NEXT: movzbl %sil, %ecx
+; CHECK-64-NEXT: .p2align 4, 0x90
+; CHECK-64-NEXT: .LBB0_1: # %atomicrmw.start
+; CHECK-64-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-64-NEXT: movl %eax, %edx
+; CHECK-64-NEXT: subb %cl, %dl
+; CHECK-64-NEXT: movzbl %dl, %edx
+; CHECK-64-NEXT: cmovbl %ecx, %edx
+; CHECK-64-NEXT: lock cmpxchgb %dl, (%rdi)
+; CHECK-64-NEXT: jne .LBB0_1
+; CHECK-64-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-64-NEXT: retq
+ %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_cond_sub_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movzwl (%rdi), %eax
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB1_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: subw %si, %cx
-; CHECK-NEXT: cmovbl %esi, %ecx
-; CHECK-NEXT: lock cmpxchgw %cx, (%rdi)
-; CHECK-NEXT: jne .LBB1_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: retq
- %result = atomicrmw cond_sub ptr %ptr, i16 %val seq_cst
+define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
+; CHECK-32-LABEL: atomicrmw_usub_cond_i16:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: pushl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: .cfi_offset %esi, -8
+; CHECK-32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT: movzwl (%edx), %eax
+; CHECK-32-NEXT: jmp .LBB1_1
+; CHECK-32-NEXT: .p2align 4, 0x90
+; CHECK-32-NEXT: .LBB1_3: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB1_1 Depth=1
+; CHECK-32-NEXT: lock cmpxchgw %si, (%edx)
+; CHECK-32-NEXT: je .LBB1_4
+; CHECK-32-NEXT: .LBB1_1: # %atomicrmw.start
+; CHECK-32-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-32-NEXT: movl %eax, %esi
+; CHECK-32-NEXT: subw %cx, %si
+; CHECK-32-NEXT: jae .LBB1_3
+; CHECK-32-NEXT: # %bb.2: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB1_1 Depth=1
+; CHECK-32-NEXT: movl %ecx, %esi
+; CHECK-32-NEXT: jmp .LBB1_3
+; CHECK-32-NEXT: .LBB1_4: # %atomicrmw.end
+; CHECK-32-NEXT: popl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 4
+; CHECK-32-NEXT: retl
+;
+; CHECK-64-LABEL: atomicrmw_usub_cond_i16:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: movzwl (%rdi), %eax
+; CHECK-64-NEXT: .p2align 4, 0x90
+; CHECK-64-NEXT: .LBB1_1: # %atomicrmw.start
+; CHECK-64-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-64-NEXT: movl %eax, %ecx
+; CHECK-64-NEXT: subw %si, %cx
+; CHECK-64-NEXT: cmovbl %esi, %ecx
+; CHECK-64-NEXT: lock cmpxchgw %cx, (%rdi)
+; CHECK-64-NEXT: jne .LBB1_1
+; CHECK-64-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-64-NEXT: retq
+ %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_cond_sub_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: subl %esi, %ecx
-; CHECK-NEXT: cmovbl %esi, %ecx
-; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
-; CHECK-NEXT: jne .LBB2_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: retq
- %result = atomicrmw cond_sub ptr %ptr, i32 %val seq_cst
+define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
+; CHECK-32-LABEL: atomicrmw_usub_cond_i32:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: pushl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: .cfi_offset %esi, -8
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT: movl (%edx), %eax
+; CHECK-32-NEXT: jmp .LBB2_1
+; CHECK-32-NEXT: .p2align 4, 0x90
+; CHECK-32-NEXT: .LBB2_3: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB2_1 Depth=1
+; CHECK-32-NEXT: lock cmpxchgl %esi, (%edx)
+; CHECK-32-NEXT: je .LBB2_4
+; CHECK-32-NEXT: .LBB2_1: # %atomicrmw.start
+; CHECK-32-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-32-NEXT: movl %eax, %esi
+; CHECK-32-NEXT: subl %ecx, %esi
+; CHECK-32-NEXT: jae .LBB2_3
+; CHECK-32-NEXT: # %bb.2: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB2_1 Depth=1
+; CHECK-32-NEXT: movl %ecx, %esi
+; CHECK-32-NEXT: jmp .LBB2_3
+; CHECK-32-NEXT: .LBB2_4: # %atomicrmw.end
+; CHECK-32-NEXT: popl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 4
+; CHECK-32-NEXT: retl
+;
+; CHECK-64-LABEL: atomicrmw_usub_cond_i32:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: movl (%rdi), %eax
+; CHECK-64-NEXT: .p2align 4, 0x90
+; CHECK-64-NEXT: .LBB2_1: # %atomicrmw.start
+; CHECK-64-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-64-NEXT: movl %eax, %ecx
+; CHECK-64-NEXT: subl %esi, %ecx
+; CHECK-64-NEXT: cmovbl %esi, %ecx
+; CHECK-64-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-64-NEXT: jne .LBB2_1
+; CHECK-64-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-64-NEXT: retq
+ %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_cond_sub_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_cond_sub_i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movq (%rdi), %rax
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB3_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movq %rax, %rcx
-; CHECK-NEXT: subq %rsi, %rcx
-; CHECK-NEXT: cmovbq %rsi, %rcx
-; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi)
-; CHECK-NEXT: jne .LBB3_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: retq
- %result = atomicrmw cond_sub ptr %ptr, i64 %val seq_cst
+define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
+; CHECK-32-LABEL: atomicrmw_usub_cond_i64:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: pushl %ebp
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: pushl %ebx
+; CHECK-32-NEXT: .cfi_def_cfa_offset 12
+; CHECK-32-NEXT: pushl %edi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-32-NEXT: pushl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 20
+; CHECK-32-NEXT: .cfi_offset %esi, -20
+; CHECK-32-NEXT: .cfi_offset %edi, -16
+; CHECK-32-NEXT: .cfi_offset %ebx, -12
+; CHECK-32-NEXT: .cfi_offset %ebp, -8
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; CHECK-32-NEXT: movl (%ebp), %eax
+; CHECK-32-NEXT: movl 4(%ebp), %edx
+; CHECK-32-NEXT: jmp .LBB3_1
+; CHECK-32-NEXT: .p2align 4, 0x90
+; CHECK-32-NEXT: .LBB3_3: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB3_1 Depth=1
+; CHECK-32-NEXT: lock cmpxchg8b (%ebp)
+; CHECK-32-NEXT: je .LBB3_4
+; CHECK-32-NEXT: .LBB3_1: # %atomicrmw.start
+; CHECK-32-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-32-NEXT: movl %eax, %ebx
+; CHECK-32-NEXT: subl %edi, %ebx
+; CHECK-32-NEXT: movl %edx, %ecx
+; CHECK-32-NEXT: sbbl %esi, %ecx
+; CHECK-32-NEXT: jae .LBB3_3
+; CHECK-32-NEXT: # %bb.2: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB3_1 Depth=1
+; CHECK-32-NEXT: movl %esi, %ecx
+; CHECK-32-NEXT: movl %edi, %ebx
+; CHECK-32-NEXT: jmp .LBB3_3
+; CHECK-32-NEXT: .LBB3_4: # %atomicrmw.end
+; CHECK-32-NEXT: popl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-32-NEXT: popl %edi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 12
+; CHECK-32-NEXT: popl %ebx
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: popl %ebp
+; CHECK-32-NEXT: .cfi_def_cfa_offset 4
+; CHECK-32-NEXT: retl
+;
+; CHECK-64-LABEL: atomicrmw_usub_cond_i64:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: movq (%rdi), %rax
+; CHECK-64-NEXT: .p2align 4, 0x90
+; CHECK-64-NEXT: .LBB3_1: # %atomicrmw.start
+; CHECK-64-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-64-NEXT: movq %rax, %rcx
+; CHECK-64-NEXT: subq %rsi, %rcx
+; CHECK-64-NEXT: cmovbq %rsi, %rcx
+; CHECK-64-NEXT: lock cmpxchgq %rcx, (%rdi)
+; CHECK-64-NEXT: jne .LBB3_1
+; CHECK-64-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-64-NEXT: retq
+ %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
ret i64 %result
}
-define i8 @atomicrmw_sub_clamp_i8(ptr %ptr, i8 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movzbl (%rdi), %eax
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB4_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: subb %sil, %dl
-; CHECK-NEXT: movzbl %dl, %edx
-; CHECK-NEXT: cmovbl %ecx, %edx
-; CHECK-NEXT: lock cmpxchgb %dl, (%rdi)
-; CHECK-NEXT: jne .LBB4_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: retq
- %result = atomicrmw sub_clamp ptr %ptr, i8 %val seq_cst
+define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
+; CHECK-32-LABEL: atomicrmw_usub_sat_i8:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: pushl %ebx
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: .cfi_offset %ebx, -8
+; CHECK-32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT: movzbl (%edx), %eax
+; CHECK-32-NEXT: jmp .LBB4_1
+; CHECK-32-NEXT: .p2align 4, 0x90
+; CHECK-32-NEXT: .LBB4_3: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB4_1 Depth=1
+; CHECK-32-NEXT: lock cmpxchgb %bl, (%edx)
+; CHECK-32-NEXT: je .LBB4_4
+; CHECK-32-NEXT: .LBB4_1: # %atomicrmw.start
+; CHECK-32-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-32-NEXT: movl %eax, %ebx
+; CHECK-32-NEXT: subb %cl, %bl
+; CHECK-32-NEXT: jae .LBB4_3
+; CHECK-32-NEXT: # %bb.2: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB4_1 Depth=1
+; CHECK-32-NEXT: xorl %ebx, %ebx
+; CHECK-32-NEXT: jmp .LBB4_3
+; CHECK-32-NEXT: .LBB4_4: # %atomicrmw.end
+; CHECK-32-NEXT: popl %ebx
+; CHECK-32-NEXT: .cfi_def_cfa_offset 4
+; CHECK-32-NEXT: retl
+;
+; CHECK-64-LABEL: atomicrmw_usub_sat_i8:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: movzbl (%rdi), %eax
+; CHECK-64-NEXT: xorl %ecx, %ecx
+; CHECK-64-NEXT: .p2align 4, 0x90
+; CHECK-64-NEXT: .LBB4_1: # %atomicrmw.start
+; CHECK-64-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-64-NEXT: movl %eax, %edx
+; CHECK-64-NEXT: subb %sil, %dl
+; CHECK-64-NEXT: movzbl %dl, %edx
+; CHECK-64-NEXT: cmovbl %ecx, %edx
+; CHECK-64-NEXT: lock cmpxchgb %dl, (%rdi)
+; CHECK-64-NEXT: jne .LBB4_1
+; CHECK-64-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-64-NEXT: retq
+ %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
ret i8 %result
}
-define i16 @atomicrmw_sub_clamp_i16(ptr %ptr, i16 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movzwl (%rdi), %eax
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB5_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: subw %si, %dx
-; CHECK-NEXT: cmovbl %ecx, %edx
-; CHECK-NEXT: lock cmpxchgw %dx, (%rdi)
-; CHECK-NEXT: jne .LBB5_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: retq
- %result = atomicrmw sub_clamp ptr %ptr, i16 %val seq_cst
+define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
+; CHECK-32-LABEL: atomicrmw_usub_sat_i16:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: pushl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: .cfi_offset %esi, -8
+; CHECK-32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT: movzwl (%edx), %eax
+; CHECK-32-NEXT: jmp .LBB5_1
+; CHECK-32-NEXT: .p2align 4, 0x90
+; CHECK-32-NEXT: .LBB5_3: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB5_1 Depth=1
+; CHECK-32-NEXT: lock cmpxchgw %si, (%edx)
+; CHECK-32-NEXT: je .LBB5_4
+; CHECK-32-NEXT: .LBB5_1: # %atomicrmw.start
+; CHECK-32-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-32-NEXT: movl %eax, %esi
+; CHECK-32-NEXT: subw %cx, %si
+; CHECK-32-NEXT: jae .LBB5_3
+; CHECK-32-NEXT: # %bb.2: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB5_1 Depth=1
+; CHECK-32-NEXT: xorl %esi, %esi
+; CHECK-32-NEXT: jmp .LBB5_3
+; CHECK-32-NEXT: .LBB5_4: # %atomicrmw.end
+; CHECK-32-NEXT: popl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 4
+; CHECK-32-NEXT: retl
+;
+; CHECK-64-LABEL: atomicrmw_usub_sat_i16:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: movzwl (%rdi), %eax
+; CHECK-64-NEXT: xorl %ecx, %ecx
+; CHECK-64-NEXT: .p2align 4, 0x90
+; CHECK-64-NEXT: .LBB5_1: # %atomicrmw.start
+; CHECK-64-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-64-NEXT: movl %eax, %edx
+; CHECK-64-NEXT: subw %si, %dx
+; CHECK-64-NEXT: cmovbl %ecx, %edx
+; CHECK-64-NEXT: lock cmpxchgw %dx, (%rdi)
+; CHECK-64-NEXT: jne .LBB5_1
+; CHECK-64-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-64-NEXT: retq
+ %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
ret i16 %result
}
-define i32 @atomicrmw_sub_clamp_i32(ptr %ptr, i32 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: subl %esi, %edx
-; CHECK-NEXT: cmovbl %ecx, %edx
-; CHECK-NEXT: lock cmpxchgl %edx, (%rdi)
-; CHECK-NEXT: jne .LBB6_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: retq
- %result = atomicrmw sub_clamp ptr %ptr, i32 %val seq_cst
+define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
+; CHECK-32-LABEL: atomicrmw_usub_sat_i32:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: pushl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: .cfi_offset %esi, -8
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT: movl (%edx), %eax
+; CHECK-32-NEXT: jmp .LBB6_1
+; CHECK-32-NEXT: .p2align 4, 0x90
+; CHECK-32-NEXT: .LBB6_3: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB6_1 Depth=1
+; CHECK-32-NEXT: lock cmpxchgl %esi, (%edx)
+; CHECK-32-NEXT: je .LBB6_4
+; CHECK-32-NEXT: .LBB6_1: # %atomicrmw.start
+; CHECK-32-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-32-NEXT: movl %eax, %esi
+; CHECK-32-NEXT: subl %ecx, %esi
+; CHECK-32-NEXT: jae .LBB6_3
+; CHECK-32-NEXT: # %bb.2: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB6_1 Depth=1
+; CHECK-32-NEXT: xorl %esi, %esi
+; CHECK-32-NEXT: jmp .LBB6_3
+; CHECK-32-NEXT: .LBB6_4: # %atomicrmw.end
+; CHECK-32-NEXT: popl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 4
+; CHECK-32-NEXT: retl
+;
+; CHECK-64-LABEL: atomicrmw_usub_sat_i32:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: movl (%rdi), %eax
+; CHECK-64-NEXT: xorl %ecx, %ecx
+; CHECK-64-NEXT: .p2align 4, 0x90
+; CHECK-64-NEXT: .LBB6_1: # %atomicrmw.start
+; CHECK-64-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-64-NEXT: movl %eax, %edx
+; CHECK-64-NEXT: subl %esi, %edx
+; CHECK-64-NEXT: cmovbl %ecx, %edx
+; CHECK-64-NEXT: lock cmpxchgl %edx, (%rdi)
+; CHECK-64-NEXT: jne .LBB6_1
+; CHECK-64-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-64-NEXT: retq
+ %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
ret i32 %result
}
-define i64 @atomicrmw_sub_clamp_i64(ptr %ptr, i64 %val) {
-; CHECK-LABEL: atomicrmw_sub_clamp_i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movq (%rdi), %rax
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movq %rax, %rdx
-; CHECK-NEXT: subq %rsi, %rdx
-; CHECK-NEXT: cmovbq %rcx, %rdx
-; CHECK-NEXT: lock cmpxchgq %rdx, (%rdi)
-; CHECK-NEXT: jne .LBB7_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: retq
- %result = atomicrmw sub_clamp ptr %ptr, i64 %val seq_cst
+define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
+; CHECK-32-LABEL: atomicrmw_usub_sat_i64:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: pushl %ebp
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: pushl %ebx
+; CHECK-32-NEXT: .cfi_def_cfa_offset 12
+; CHECK-32-NEXT: pushl %edi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-32-NEXT: pushl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 20
+; CHECK-32-NEXT: .cfi_offset %esi, -20
+; CHECK-32-NEXT: .cfi_offset %edi, -16
+; CHECK-32-NEXT: .cfi_offset %ebx, -12
+; CHECK-32-NEXT: .cfi_offset %ebp, -8
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; CHECK-32-NEXT: movl (%ebp), %eax
+; CHECK-32-NEXT: movl 4(%ebp), %edx
+; CHECK-32-NEXT: jmp .LBB7_1
+; CHECK-32-NEXT: .p2align 4, 0x90
+; CHECK-32-NEXT: .LBB7_3: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB7_1 Depth=1
+; CHECK-32-NEXT: lock cmpxchg8b (%ebp)
+; CHECK-32-NEXT: je .LBB7_4
+; CHECK-32-NEXT: .LBB7_1: # %atomicrmw.start
+; CHECK-32-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-32-NEXT: movl %eax, %ebx
+; CHECK-32-NEXT: subl %edi, %ebx
+; CHECK-32-NEXT: movl %edx, %ecx
+; CHECK-32-NEXT: sbbl %esi, %ecx
+; CHECK-32-NEXT: jae .LBB7_3
+; CHECK-32-NEXT: # %bb.2: # %atomicrmw.start
+; CHECK-32-NEXT: # in Loop: Header=BB7_1 Depth=1
+; CHECK-32-NEXT: xorl %ecx, %ecx
+; CHECK-32-NEXT: xorl %ebx, %ebx
+; CHECK-32-NEXT: jmp .LBB7_3
+; CHECK-32-NEXT: .LBB7_4: # %atomicrmw.end
+; CHECK-32-NEXT: popl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-32-NEXT: popl %edi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 12
+; CHECK-32-NEXT: popl %ebx
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: popl %ebp
+; CHECK-32-NEXT: .cfi_def_cfa_offset 4
+; CHECK-32-NEXT: retl
+;
+; CHECK-64-LABEL: atomicrmw_usub_sat_i64:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: movq (%rdi), %rax
+; CHECK-64-NEXT: xorl %ecx, %ecx
+; CHECK-64-NEXT: .p2align 4, 0x90
+; CHECK-64-NEXT: .LBB7_1: # %atomicrmw.start
+; CHECK-64-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-64-NEXT: movq %rax, %rdx
+; CHECK-64-NEXT: subq %rsi, %rdx
+; CHECK-64-NEXT: cmovbq %rcx, %rdx
+; CHECK-64-NEXT: lock cmpxchgq %rdx, (%rdi)
+; CHECK-64-NEXT: jne .LBB7_1
+; CHECK-64-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-64-NEXT: retq
+ %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index 44a9790129510..2d2f0c4886117 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -1366,8 +1366,8 @@ define i16 @test_atomicrmw_add_i16_buffer_fat_agent_align4(ptr addrspace(7) %ptr
ret i16 %res
}
-define i16 @test_atomicrmw_cond_sub_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_global_agent(
+define i16 @test_atomicrmw_usub_cond_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_global_agent(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -1397,12 +1397,12 @@ define i16 @test_atomicrmw_cond_sub_i16_global_agent(ptr addrspace(1) %ptr, i16
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED3]]
;
- %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
-define i16 @test_atomicrmw_cond_sub_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_global_agent_align4(
+define i16 @test_atomicrmw_usub_cond_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_global_agent_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -1422,12 +1422,12 @@ define i16 @test_atomicrmw_cond_sub_i16_global_agent_align4(ptr addrspace(1) %pt
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED1]]
;
- %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
ret i16 %res
}
-define i16 @test_atomicrmw_cond_sub_i16_local(ptr addrspace(3) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_local(
+define i16 @test_atomicrmw_usub_cond_i16_local(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_local(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -1456,12 +1456,12 @@ define i16 @test_atomicrmw_cond_sub_i16_local(ptr addrspace(3) %ptr, i16 %value)
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED3]]
;
- %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
-define i16 @test_atomicrmw_cond_sub_i16_local_align4(ptr addrspace(3) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_local_align4(
+define i16 @test_atomicrmw_usub_cond_i16_local_align4(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_local_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -1481,12 +1481,12 @@ define i16 @test_atomicrmw_cond_sub_i16_local_align4(ptr addrspace(3) %ptr, i16
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED1]]
;
- %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i16 %value syncscope("agent") seq_cst, align 4
ret i16 %res
}
-define i16 @test_atomicrmw_cond_sub_i16_flat_agent(ptr %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_flat_agent(
+define i16 @test_atomicrmw_usub_cond_i16_flat_agent(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_flat_agent(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -1516,12 +1516,12 @@ define i16 @test_atomicrmw_cond_sub_i16_flat_agent(ptr %ptr, i16 %value) {
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED3]]
;
- %res = atomicrmw cond_sub ptr %ptr, i16 %value syncscope("agent") seq_cst
+ %res = atomicrmw usub_cond ptr %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
-define i16 @test_atomicrmw_cond_sub_i16_flat_agent_align4(ptr %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i16_flat_agent_align4(
+define i16 @test_atomicrmw_usub_cond_i16_flat_agent_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i16_flat_agent_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -1541,12 +1541,12 @@ define i16 @test_atomicrmw_cond_sub_i16_flat_agent_align4(ptr %ptr, i16 %value)
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED1]]
;
- %res = atomicrmw cond_sub ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ %res = atomicrmw usub_cond ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
ret i16 %res
}
-define i16 @test_atomicrmw_sub_clamp_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_global_agent(
+define i16 @test_atomicrmw_usub_sat_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_global_agent(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -1576,12 +1576,12 @@ define i16 @test_atomicrmw_sub_clamp_i16_global_agent(ptr addrspace(1) %ptr, i16
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED3]]
;
- %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
-define i16 @test_atomicrmw_sub_clamp_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_global_agent_align4(
+define i16 @test_atomicrmw_usub_sat_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_global_agent_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -1601,12 +1601,12 @@ define i16 @test_atomicrmw_sub_clamp_i16_global_agent_align4(ptr addrspace(1) %p
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED1]]
;
- %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
ret i16 %res
}
-define i16 @test_atomicrmw_sub_clamp_i16_local(ptr addrspace(3) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_local(
+define i16 @test_atomicrmw_usub_sat_i16_local(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_local(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -1635,12 +1635,12 @@ define i16 @test_atomicrmw_sub_clamp_i16_local(ptr addrspace(3) %ptr, i16 %value
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED3]]
;
- %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i16 %value seq_cst
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i16 %value seq_cst
ret i16 %res
}
-define i16 @test_atomicrmw_sub_clamp_i16_local_align4(ptr addrspace(3) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_local_align4(
+define i16 @test_atomicrmw_usub_sat_i16_local_align4(ptr addrspace(3) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_local_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -1660,12 +1660,12 @@ define i16 @test_atomicrmw_sub_clamp_i16_local_align4(ptr addrspace(3) %ptr, i16
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED1]]
;
- %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i16 %value seq_cst, align 4
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i16 %value seq_cst, align 4
ret i16 %res
}
-define i16 @test_atomicrmw_sub_clamp_i16_flat_agent(ptr %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_flat_agent(
+define i16 @test_atomicrmw_usub_sat_i16_flat_agent(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_flat_agent(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -1695,12 +1695,12 @@ define i16 @test_atomicrmw_sub_clamp_i16_flat_agent(ptr %ptr, i16 %value) {
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED3]]
;
- %res = atomicrmw sub_clamp ptr %ptr, i16 %value syncscope("agent") seq_cst
+ %res = atomicrmw usub_sat ptr %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
-define i16 @test_atomicrmw_sub_clamp_i16_flat_agent_align4(ptr %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i16_flat_agent_align4(
+define i16 @test_atomicrmw_usub_sat_i16_flat_agent_align4(ptr %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i16_flat_agent_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -1720,7 +1720,7 @@ define i16 @test_atomicrmw_sub_clamp_i16_flat_agent_align4(ptr %ptr, i16 %value)
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED1]]
;
- %res = atomicrmw sub_clamp ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
+ %res = atomicrmw usub_sat ptr %ptr, i16 %value syncscope("agent") seq_cst, align 4
ret i16 %res
}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
index f5158e43861bc..6d0540b46b200 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
@@ -1713,8 +1713,8 @@ define i8 @test_atomicrmw_add_i8_buffer_fat_agent_align4(ptr addrspace(7) %ptr,
ret i8 %res
}
-define i8 @test_atomicrmw_cond_sub_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
-; GCN-LABEL: @test_atomicrmw_cond_sub_i8_global_agent(
+define i8 @test_atomicrmw_usub_cond_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_cond_i8_global_agent(
; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -1744,7 +1744,7 @@ define i8 @test_atomicrmw_cond_sub_i8_global_agent(ptr addrspace(1) %ptr, i8 %va
; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED3]]
;
-; R600-LABEL: @test_atomicrmw_cond_sub_i8_global_agent(
+; R600-LABEL: @test_atomicrmw_usub_cond_i8_global_agent(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -1773,12 +1773,12 @@ define i8 @test_atomicrmw_cond_sub_i8_global_agent(ptr addrspace(1) %ptr, i8 %va
; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; R600-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
ret i8 %res
}
-define i8 @test_atomicrmw_cond_sub_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
-; GCN-LABEL: @test_atomicrmw_cond_sub_i8_global_agent_align2(
+define i8 @test_atomicrmw_usub_cond_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_cond_i8_global_agent_align2(
; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -1808,7 +1808,7 @@ define i8 @test_atomicrmw_cond_sub_i8_global_agent_align2(ptr addrspace(1) %ptr,
; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED3]]
;
-; R600-LABEL: @test_atomicrmw_cond_sub_i8_global_agent_align2(
+; R600-LABEL: @test_atomicrmw_usub_cond_i8_global_agent_align2(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -1837,12 +1837,12 @@ define i8 @test_atomicrmw_cond_sub_i8_global_agent_align2(ptr addrspace(1) %ptr,
; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; R600-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
ret i8 %res
}
-define i8 @test_atomicrmw_cond_sub_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_global_agent_align4(
+define i8 @test_atomicrmw_usub_cond_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_global_agent_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -1862,12 +1862,12 @@ define i8 @test_atomicrmw_cond_sub_i8_global_agent_align4(ptr addrspace(1) %ptr,
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED1]]
;
- %res = atomicrmw cond_sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ %res = atomicrmw usub_cond ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
ret i8 %res
}
-define i8 @test_atomicrmw_cond_sub_i8_local(ptr addrspace(3) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_local(
+define i8 @test_atomicrmw_usub_cond_i8_local(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_local(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -1896,12 +1896,12 @@ define i8 @test_atomicrmw_cond_sub_i8_local(ptr addrspace(3) %ptr, i8 %value) {
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i8 %value seq_cst
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i8 %value seq_cst
ret i8 %res
}
-define i8 @test_atomicrmw_cond_sub_i8_local_align2(ptr addrspace(3) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_local_align2(
+define i8 @test_atomicrmw_usub_cond_i8_local_align2(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_local_align2(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -1930,12 +1930,12 @@ define i8 @test_atomicrmw_cond_sub_i8_local_align2(ptr addrspace(3) %ptr, i8 %va
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i8 %value seq_cst, align 2
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i8 %value seq_cst, align 2
ret i8 %res
}
-define i8 @test_atomicrmw_cond_sub_i8_local_align4(ptr addrspace(3) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_local_align4(
+define i8 @test_atomicrmw_usub_cond_i8_local_align4(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_local_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -1955,12 +1955,12 @@ define i8 @test_atomicrmw_cond_sub_i8_local_align4(ptr addrspace(3) %ptr, i8 %va
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED1]]
;
- %res = atomicrmw cond_sub ptr addrspace(3) %ptr, i8 %value seq_cst, align 4
+ %res = atomicrmw usub_cond ptr addrspace(3) %ptr, i8 %value seq_cst, align 4
ret i8 %res
}
-define i8 @test_atomicrmw_cond_sub_i8_flat_agent(ptr %ptr, i8 %value) {
-; GCN-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent(
+define i8 @test_atomicrmw_usub_cond_i8_flat_agent(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent(
; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -1990,7 +1990,7 @@ define i8 @test_atomicrmw_cond_sub_i8_flat_agent(ptr %ptr, i8 %value) {
; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED3]]
;
-; R600-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent(
+; R600-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -2019,12 +2019,12 @@ define i8 @test_atomicrmw_cond_sub_i8_flat_agent(ptr %ptr, i8 %value) {
; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; R600-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw cond_sub ptr %ptr, i8 %value syncscope("agent") seq_cst
+ %res = atomicrmw usub_cond ptr %ptr, i8 %value syncscope("agent") seq_cst
ret i8 %res
}
-define i8 @test_atomicrmw_cond_sub_i8_flat_agent_align2(ptr %ptr, i8 %value) {
-; GCN-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent_align2(
+define i8 @test_atomicrmw_usub_cond_i8_flat_agent_align2(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent_align2(
; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -2054,7 +2054,7 @@ define i8 @test_atomicrmw_cond_sub_i8_flat_agent_align2(ptr %ptr, i8 %value) {
; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED3]]
;
-; R600-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent_align2(
+; R600-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent_align2(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -2083,12 +2083,12 @@ define i8 @test_atomicrmw_cond_sub_i8_flat_agent_align2(ptr %ptr, i8 %value) {
; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; R600-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw cond_sub ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ %res = atomicrmw usub_cond ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
ret i8 %res
}
-define i8 @test_atomicrmw_cond_sub_i8_flat_agent_align4(ptr %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_cond_sub_i8_flat_agent_align4(
+define i8 @test_atomicrmw_usub_cond_i8_flat_agent_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_cond_i8_flat_agent_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -2108,12 +2108,12 @@ define i8 @test_atomicrmw_cond_sub_i8_flat_agent_align4(ptr %ptr, i8 %value) {
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED1]]
;
- %res = atomicrmw cond_sub ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ %res = atomicrmw usub_cond ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
ret i8 %res
}
-define i8 @test_atomicrmw_sub_clamp_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
-; GCN-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent(
+define i8 @test_atomicrmw_usub_sat_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_sat_i8_global_agent(
; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -2143,7 +2143,7 @@ define i8 @test_atomicrmw_sub_clamp_i8_global_agent(ptr addrspace(1) %ptr, i8 %v
; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED3]]
;
-; R600-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent(
+; R600-LABEL: @test_atomicrmw_usub_sat_i8_global_agent(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -2172,12 +2172,12 @@ define i8 @test_atomicrmw_sub_clamp_i8_global_agent(ptr addrspace(1) %ptr, i8 %v
; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; R600-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst
ret i8 %res
}
-define i8 @test_atomicrmw_sub_clamp_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
-; GCN-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent_align2(
+define i8 @test_atomicrmw_usub_sat_i8_global_agent_align2(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_sat_i8_global_agent_align2(
; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -2207,7 +2207,7 @@ define i8 @test_atomicrmw_sub_clamp_i8_global_agent_align2(ptr addrspace(1) %ptr
; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED3]]
;
-; R600-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent_align2(
+; R600-LABEL: @test_atomicrmw_usub_sat_i8_global_agent_align2(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -2236,12 +2236,12 @@ define i8 @test_atomicrmw_sub_clamp_i8_global_agent_align2(ptr addrspace(1) %ptr
; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; R600-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 2
ret i8 %res
}
-define i8 @test_atomicrmw_sub_clamp_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_global_agent_align4(
+define i8 @test_atomicrmw_usub_sat_i8_global_agent_align4(ptr addrspace(1) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_global_agent_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -2261,12 +2261,12 @@ define i8 @test_atomicrmw_sub_clamp_i8_global_agent_align4(ptr addrspace(1) %ptr
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED1]]
;
- %res = atomicrmw sub_clamp ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ %res = atomicrmw usub_sat ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, align 4
ret i8 %res
}
-define i8 @test_atomicrmw_sub_clamp_i8_local(ptr addrspace(3) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_local(
+define i8 @test_atomicrmw_usub_sat_i8_local(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_local(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -2295,12 +2295,12 @@ define i8 @test_atomicrmw_sub_clamp_i8_local(ptr addrspace(3) %ptr, i8 %value) {
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i8 %value seq_cst
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i8 %value seq_cst
ret i8 %res
}
-define i8 @test_atomicrmw_sub_clamp_i8_local_align2(ptr addrspace(3) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_local_align2(
+define i8 @test_atomicrmw_usub_sat_i8_local_align2(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_local_align2(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; CHECK-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -2329,12 +2329,12 @@ define i8 @test_atomicrmw_sub_clamp_i8_local_align2(ptr addrspace(3) %ptr, i8 %v
; CHECK-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i8 %value seq_cst, align 2
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i8 %value seq_cst, align 2
ret i8 %res
}
-define i8 @test_atomicrmw_sub_clamp_i8_local_align4(ptr addrspace(3) %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_local_align4(
+define i8 @test_atomicrmw_usub_sat_i8_local_align4(ptr addrspace(3) %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_local_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(3) [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -2354,12 +2354,12 @@ define i8 @test_atomicrmw_sub_clamp_i8_local_align4(ptr addrspace(3) %ptr, i8 %v
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED1]]
;
- %res = atomicrmw sub_clamp ptr addrspace(3) %ptr, i8 %value seq_cst, align 4
+ %res = atomicrmw usub_sat ptr addrspace(3) %ptr, i8 %value seq_cst, align 4
ret i8 %res
}
-define i8 @test_atomicrmw_sub_clamp_i8_flat_agent(ptr %ptr, i8 %value) {
-; GCN-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent(
+define i8 @test_atomicrmw_usub_sat_i8_flat_agent(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent(
; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -2389,7 +2389,7 @@ define i8 @test_atomicrmw_sub_clamp_i8_flat_agent(ptr %ptr, i8 %value) {
; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED3]]
;
-; R600-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent(
+; R600-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -2418,12 +2418,12 @@ define i8 @test_atomicrmw_sub_clamp_i8_flat_agent(ptr %ptr, i8 %value) {
; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; R600-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw sub_clamp ptr %ptr, i8 %value syncscope("agent") seq_cst
+ %res = atomicrmw usub_sat ptr %ptr, i8 %value syncscope("agent") seq_cst
ret i8 %res
}
-define i8 @test_atomicrmw_sub_clamp_i8_flat_agent_align2(ptr %ptr, i8 %value) {
-; GCN-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent_align2(
+define i8 @test_atomicrmw_usub_sat_i8_flat_agent_align2(ptr %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent_align2(
; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i64
; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -2453,7 +2453,7 @@ define i8 @test_atomicrmw_sub_clamp_i8_flat_agent_align2(ptr %ptr, i8 %value) {
; GCN-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED3]]
;
-; R600-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent_align2(
+; R600-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent_align2(
; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -4)
; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR]] to i32
; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
@@ -2482,12 +2482,12 @@ define i8 @test_atomicrmw_sub_clamp_i8_flat_agent_align2(ptr %ptr, i8 %value) {
; R600-NEXT: [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i8
; R600-NEXT: ret i8 [[EXTRACTED3]]
;
- %res = atomicrmw sub_clamp ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
+ %res = atomicrmw usub_sat ptr %ptr, i8 %value syncscope("agent") seq_cst, align 2
ret i8 %res
}
-define i8 @test_atomicrmw_sub_clamp_i8_flat_agent_align4(ptr %ptr, i8 %value) {
-; CHECK-LABEL: @test_atomicrmw_sub_clamp_i8_flat_agent_align4(
+define i8 @test_atomicrmw_usub_sat_i8_flat_agent_align4(ptr %ptr, i8 %value) {
+; CHECK-LABEL: @test_atomicrmw_usub_sat_i8_flat_agent_align4(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
@@ -2507,6 +2507,6 @@ define i8 @test_atomicrmw_sub_clamp_i8_flat_agent_align4(ptr %ptr, i8 %value) {
; CHECK-NEXT: [[EXTRACTED1:%.*]] = trunc i32 [[NEWLOADED]] to i8
; CHECK-NEXT: ret i8 [[EXTRACTED1]]
;
- %res = atomicrmw sub_clamp ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
+ %res = atomicrmw usub_sat ptr %ptr, i8 %value syncscope("agent") seq_cst, align 4
ret i8 %res
}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
index b8dcc4f0d2939..025494222febc 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
@@ -106,10 +106,10 @@ def AtomicBinOpUIncWrap : LLVM_EnumAttrCase<"uinc_wrap",
"uinc_wrap", "UIncWrap", 15>;
def AtomicBinOpUDecWrap : LLVM_EnumAttrCase<"udec_wrap",
"udec_wrap", "UDecWrap", 16>;
-def AtomicBinOpCondSub : LLVM_EnumAttrCase<"cond_sub",
- "cond_sub", "CondSub", 17>;
-def AtomicBinOpSubClamp : LLVM_EnumAttrCase<"sub_clamp",
- "sub_clamp", "SubClamp", 18>;
+def AtomicBinOpUSubCond : LLVM_EnumAttrCase<"usub_cond",
+ "usub_cond", "USubCond", 17>;
+def AtomicBinOpUSubSat : LLVM_EnumAttrCase<"usub_sat",
+ "usub_sat", "USubSat", 18>;
// A sentinel value that has no MLIR counterpart.
def AtomicBadBinOp : LLVM_EnumAttrCase<"", "", "BAD_BINOP", 0>;
@@ -122,7 +122,7 @@ def AtomicBinOp : LLVM_EnumAttr<
AtomicBinOpNand, AtomicBinOpOr, AtomicBinOpXor, AtomicBinOpMax,
AtomicBinOpMin, AtomicBinOpUMax, AtomicBinOpUMin, AtomicBinOpFAdd,
AtomicBinOpFSub, AtomicBinOpFMax, AtomicBinOpFMin, AtomicBinOpUIncWrap,
- AtomicBinOpUDecWrap, AtomicBinOpCondSub, AtomicBinOpSubClamp],
+ AtomicBinOpUDecWrap, AtomicBinOpUSubCond, AtomicBinOpUSubSat],
[AtomicBadBinOp]> {
let cppNamespace = "::mlir::LLVM";
}
diff --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll
index 01a86aea20768..c6bad2cc8bf35 100644
--- a/mlir/test/Target/LLVMIR/Import/instructions.ll
+++ b/mlir/test/Target/LLVMIR/Import/instructions.ll
@@ -440,10 +440,10 @@ define void @atomic_rmw(ptr %ptr1, i32 %val1, ptr %ptr2, float %val2) {
%16 = atomicrmw uinc_wrap ptr %ptr1, i32 %val1 acquire
; CHECK: llvm.atomicrmw udec_wrap %[[PTR1]], %[[VAL1]] acquire
%17 = atomicrmw udec_wrap ptr %ptr1, i32 %val1 acquire
- ; CHECK: llvm.atomicrmw cond_sub %[[PTR1]], %[[VAL1]] acquire
- %18 = atomicrmw cond_sub ptr %ptr1, i32 %val1 acquire
- ; CHECK: llvm.atomicrmw sub_clamp %[[PTR1]], %[[VAL1]] acquire
- %19 = atomicrmw sub_clamp ptr %ptr1, i32 %val1 acquire
+ ; CHECK: llvm.atomicrmw usub_cond %[[PTR1]], %[[VAL1]] acquire
+ %18 = atomicrmw usub_cond ptr %ptr1, i32 %val1 acquire
+ ; CHECK: llvm.atomicrmw usub_sat %[[PTR1]], %[[VAL1]] acquire
+ %19 = atomicrmw usub_sat ptr %ptr1, i32 %val1 acquire
; CHECK: llvm.atomicrmw volatile
; CHECK-SAME: syncscope("singlethread")
diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index 3dbd4aab6ae47..cef365c94afc2 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -1501,10 +1501,10 @@ llvm.func @atomicrmw(
%15 = llvm.atomicrmw uinc_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw udec_wrap ptr %{{.*}}, i32 %{{.*}} monotonic
%16 = llvm.atomicrmw udec_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
- // CHECK: atomicrmw cond_sub ptr %{{.*}}, i32 %{{.*}} monotonic
- %17 = llvm.atomicrmw cond_sub %i32_ptr, %i32 monotonic : !llvm.ptr, i32
- // CHECK: atomicrmw sub_clamp ptr %{{.*}}, i32 %{{.*}} monotonic
- %18 = llvm.atomicrmw sub_clamp %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ // CHECK: atomicrmw usub_cond ptr %{{.*}}, i32 %{{.*}} monotonic
+ %17 = llvm.atomicrmw usub_cond %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ // CHECK: atomicrmw usub_sat ptr %{{.*}}, i32 %{{.*}} monotonic
+ %18 = llvm.atomicrmw usub_sat %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw volatile
// CHECK-SAME: syncscope("singlethread")
More information about the Mlir-commits
mailing list