[llvm] [RISCV][GlobalISel] Lower G_ATOMICRMW_SUB via G_ATOMICRMW_ADD (PR #155972)

Kane Wang via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 28 22:15:06 PDT 2025


https://github.com/ReVe1uv updated https://github.com/llvm/llvm-project/pull/155972

>From 52ecdd48d6d5369042d3125ccdd8aa6eaca0cfad Mon Sep 17 00:00:00 2001
From: Kane Wang <kanewang95 at foxmail.com>
Date: Fri, 29 Aug 2025 11:39:33 +0800
Subject: [PATCH] [RISCV][GlobalISel] Lower G_ATOMICRMW_SUB via G_ATOMICRMW_ADD

RISCV does not provide a native atomic subtract instruction, so this patch lowers G_ATOMICRMW_SUB by negating the RHS value and performing an atomic add. The legalization rules in RISCVLegalizerInfo are updated accordingly, with libcall fallbacks when StdExtA is not available, and intrinsic legalization is extended to support riscv_masked_atomicrmw_sub.
---
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    |  12 +
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |   6 +
 .../RISCV/GlobalISel/atomicrmw-add-sub.ll     | 930 ++++++++++++++++++
 .../CodeGen/RISCV/GlobalISel/atomicrmw-add.ll | 299 ------
 .../instruction-select/atomicrmw-add-rv32.mir |  73 --
 .../instruction-select/atomicrmw-add-rv64.mir |  96 --
 .../atomicrmw-add-sub-rv32.mir                | 154 +++
 .../atomicrmw-add-sub-rv64.mir                | 204 ++++
 .../GlobalISel/legalizer-info-validation.mir  |   4 +-
 .../legalizer/legalize-atomicrmw-add-rv32.mir |  97 --
 .../legalizer/legalize-atomicrmw-add-rv64.mir | 128 ---
 .../legalize-atomicrmw-add-sub-rv32.mir       | 206 ++++
 .../legalize-atomicrmw-add-sub-rv64.mir       | 274 ++++++
 13 files changed, 1788 insertions(+), 695 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add-sub.ll
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-sub-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-sub-rv64.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-sub-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-sub-rv64.mir

diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index a4353967698bf..0330c21b22699 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -4773,6 +4773,18 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
     return lowerVectorReduction(MI);
   case G_VAARG:
     return lowerVAArg(MI);
+  case G_ATOMICRMW_SUB: {
+    auto [Ret, RetLLT, Mem, MemLLT, Val, ValLLT] = MI.getFirst3RegLLTs();
+    MachineMemOperand *MMO = *MI.memoperands_begin();
+
+    auto VNeg = MIRBuilder.buildNeg(ValLLT, Val);
+    auto NewRMW =
+        MIRBuilder.buildAtomicRMW(G_ATOMICRMW_ADD, RetLLT, Mem, VNeg, *MMO);
+
+    MIRBuilder.buildCopy(Ret, NewRMW);
+    MI.eraseFromParent();
+    return Legalized;
+  }
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 9fd9639e3a1da..d908e5f482e2b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -700,6 +700,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
       .libcallFor(!ST.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
       .clampScalar(0, sXLen, sXLen);
 
+  getActionDefinitionsBuilder(G_ATOMICRMW_SUB)
+      .libcallFor(!ST.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
+      .clampScalar(0, sXLen, sXLen)
+      .lower();
+
   getLegacyLegalizerInfo().computeTables();
   verify(*ST.getInstrInfo());
 }
@@ -738,6 +743,7 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     return true;
   }
   case Intrinsic::riscv_masked_atomicrmw_add:
+  case Intrinsic::riscv_masked_atomicrmw_sub:
     return true;
   }
 }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add-sub.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add-sub.ll
new file mode 100644
index 0000000000000..21b2bbfc59241
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add-sub.ll
@@ -0,0 +1,930 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32IA-ZABHA
+; RUN: llc -mtriple=riscv32 -mattr=+a -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32IA
+; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32I
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64IA-ZABHA
+; RUN: llc -mtriple=riscv64 -mattr=+a -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64IA
+; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64I
+
+define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i8:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    amoadd.b.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_add_i8:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    li a2, 255
+; RV32IA-NEXT:    andi a3, a0, -4
+; RV32IA-NEXT:    andi a0, a0, 3
+; RV32IA-NEXT:    zext.b a1, a1
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aqrl a4, (a3)
+; RV32IA-NEXT:    add a5, a4, a1
+; RV32IA-NEXT:    xor a5, a4, a5
+; RV32IA-NEXT:    and a5, a5, a2
+; RV32IA-NEXT:    xor a5, a4, a5
+; RV32IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV32IA-NEXT:    bnez a5, .LBB0_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a4, a0
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_add_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_add_1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i8:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    amoadd.b.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_add_i8:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    li a2, 255
+; RV64IA-NEXT:    andi a3, a0, -4
+; RV64IA-NEXT:    andi a0, a0, 3
+; RV64IA-NEXT:    zext.b a1, a1
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aqrl a4, (a3)
+; RV64IA-NEXT:    add a5, a4, a1
+; RV64IA-NEXT:    xor a5, a4, a5
+; RV64IA-NEXT:    and a5, a5, a2
+; RV64IA-NEXT:    xor a5, a4, a5
+; RV64IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV64IA-NEXT:    bnez a5, .LBB0_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a4, a0
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %res = atomicrmw add ptr %ptr, i8 %rhs seq_cst
+  ret i8 %res
+}
+
+define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i16:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    amoadd.h.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_add_i16:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    lui a2, 16
+; RV32IA-NEXT:    andi a3, a0, -4
+; RV32IA-NEXT:    andi a0, a0, 3
+; RV32IA-NEXT:    addi a2, a2, -1
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    sll a4, a2, a0
+; RV32IA-NEXT:    and a1, a1, a2
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aqrl a2, (a3)
+; RV32IA-NEXT:    add a5, a2, a1
+; RV32IA-NEXT:    xor a5, a2, a5
+; RV32IA-NEXT:    and a5, a5, a4
+; RV32IA-NEXT:    xor a5, a2, a5
+; RV32IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV32IA-NEXT:    bnez a5, .LBB1_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a2, a0
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_add_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_add_2
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i16:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    amoadd.h.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_add_i16:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    lui a2, 16
+; RV64IA-NEXT:    andi a3, a0, -4
+; RV64IA-NEXT:    andi a0, a0, 3
+; RV64IA-NEXT:    addi a2, a2, -1
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    sllw a4, a2, a0
+; RV64IA-NEXT:    and a1, a1, a2
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aqrl a2, (a3)
+; RV64IA-NEXT:    add a5, a2, a1
+; RV64IA-NEXT:    xor a5, a2, a5
+; RV64IA-NEXT:    and a5, a5, a4
+; RV64IA-NEXT:    xor a5, a2, a5
+; RV64IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV64IA-NEXT:    bnez a5, .LBB1_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a2, a0
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %res = atomicrmw add ptr %ptr, i16 %rhs seq_cst
+  ret i16 %res
+}
+
+define i32 @atomicrmw_add_i32(ptr %ptr, i32 %rhs) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i32:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_add_i32:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_add_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_add_4
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i32:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_add_i32:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %res = atomicrmw add ptr %ptr, i32 %rhs seq_cst
+  ret i32 %res
+}
+
+define i64 @atomicrmw_add_i64(ptr %ptr, i64 %rhs) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i64:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    addi sp, sp, -16
+; RV32IA-ZABHA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-ZABHA-NEXT:    li a3, 5
+; RV32IA-ZABHA-NEXT:    call __atomic_fetch_add_8
+; RV32IA-ZABHA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-ZABHA-NEXT:    addi sp, sp, 16
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_add_i64:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    addi sp, sp, -16
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    li a3, 5
+; RV32IA-NEXT:    call __atomic_fetch_add_8
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    addi sp, sp, 16
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_add_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    call __atomic_fetch_add_8
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i64:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_add_i64:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %res = atomicrmw add ptr %ptr, i64 %rhs seq_cst
+  ret i64 %res
+}
+
+define i8 @atomicrmw_sub_i8(ptr %ptr, i8 %rhs) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i8:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    neg a1, a1
+; RV32IA-ZABHA-NEXT:    amoadd.b.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i8:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    li a2, 255
+; RV32IA-NEXT:    andi a3, a0, -4
+; RV32IA-NEXT:    andi a0, a0, 3
+; RV32IA-NEXT:    zext.b a1, a1
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aqrl a4, (a3)
+; RV32IA-NEXT:    sub a5, a4, a1
+; RV32IA-NEXT:    xor a5, a4, a5
+; RV32IA-NEXT:    and a5, a5, a2
+; RV32IA-NEXT:    xor a5, a4, a5
+; RV32IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV32IA-NEXT:    bnez a5, .LBB4_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a4, a0
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i8:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    neg a1, a1
+; RV64IA-ZABHA-NEXT:    amoadd.b.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i8:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    li a2, 255
+; RV64IA-NEXT:    andi a3, a0, -4
+; RV64IA-NEXT:    andi a0, a0, 3
+; RV64IA-NEXT:    zext.b a1, a1
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aqrl a4, (a3)
+; RV64IA-NEXT:    sub a5, a4, a1
+; RV64IA-NEXT:    xor a5, a4, a5
+; RV64IA-NEXT:    and a5, a5, a2
+; RV64IA-NEXT:    xor a5, a4, a5
+; RV64IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV64IA-NEXT:    bnez a5, .LBB4_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a4, a0
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %res = atomicrmw sub ptr %ptr, i8 %rhs seq_cst
+  ret i8 %res
+}
+
+define i16 @atomicrmw_sub_i16(ptr %ptr, i16 %rhs) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i16:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    neg a1, a1
+; RV32IA-ZABHA-NEXT:    amoadd.h.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i16:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    lui a2, 16
+; RV32IA-NEXT:    andi a3, a0, -4
+; RV32IA-NEXT:    andi a0, a0, 3
+; RV32IA-NEXT:    addi a2, a2, -1
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    sll a4, a2, a0
+; RV32IA-NEXT:    and a1, a1, a2
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aqrl a2, (a3)
+; RV32IA-NEXT:    sub a5, a2, a1
+; RV32IA-NEXT:    xor a5, a2, a5
+; RV32IA-NEXT:    and a5, a5, a4
+; RV32IA-NEXT:    xor a5, a2, a5
+; RV32IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV32IA-NEXT:    bnez a5, .LBB5_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a2, a0
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_2
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i16:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    neg a1, a1
+; RV64IA-ZABHA-NEXT:    amoadd.h.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i16:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    lui a2, 16
+; RV64IA-NEXT:    andi a3, a0, -4
+; RV64IA-NEXT:    andi a0, a0, 3
+; RV64IA-NEXT:    addi a2, a2, -1
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    sllw a4, a2, a0
+; RV64IA-NEXT:    and a1, a1, a2
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aqrl a2, (a3)
+; RV64IA-NEXT:    sub a5, a2, a1
+; RV64IA-NEXT:    xor a5, a2, a5
+; RV64IA-NEXT:    and a5, a5, a4
+; RV64IA-NEXT:    xor a5, a2, a5
+; RV64IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV64IA-NEXT:    bnez a5, .LBB5_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a2, a0
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %res = atomicrmw sub ptr %ptr, i16 %rhs seq_cst
+  ret i16 %res
+}
+
+define i32 @atomicrmw_sub_i32(ptr %ptr, i32 %rhs) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i32:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    neg a1, a1
+; RV32IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i32:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    neg a1, a1
+; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_4
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i32:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    neg a1, a1
+; RV64IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i32:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    neg a1, a1
+; RV64IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %res = atomicrmw sub ptr %ptr, i32 %rhs seq_cst
+  ret i32 %res
+}
+
+define i64 @atomicrmw_sub_i64(ptr %ptr, i64 %rhs) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i64:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    addi sp, sp, -16
+; RV32IA-ZABHA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-ZABHA-NEXT:    li a3, 5
+; RV32IA-ZABHA-NEXT:    call __atomic_fetch_sub_8
+; RV32IA-ZABHA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-ZABHA-NEXT:    addi sp, sp, 16
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i64:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    addi sp, sp, -16
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    li a3, 5
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    addi sp, sp, 16
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_8
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i64:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    neg a1, a1
+; RV64IA-ZABHA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i64:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    neg a1, a1
+; RV64IA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %res = atomicrmw sub ptr %ptr, i64 %rhs seq_cst
+  ret i64 %res
+}
+
+define i16 @atomicrmw_sub_i16_constant(ptr %a) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i16_constant:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    li a1, -1
+; RV32IA-ZABHA-NEXT:    amoadd.h.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i16_constant:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    lui a1, 16
+; RV32IA-NEXT:    li a2, 1
+; RV32IA-NEXT:    andi a3, a0, -4
+; RV32IA-NEXT:    andi a0, a0, 3
+; RV32IA-NEXT:    addi a1, a1, -1
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aqrl a4, (a3)
+; RV32IA-NEXT:    sub a5, a4, a2
+; RV32IA-NEXT:    xor a5, a4, a5
+; RV32IA-NEXT:    and a5, a5, a1
+; RV32IA-NEXT:    xor a5, a4, a5
+; RV32IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV32IA-NEXT:    bnez a5, .LBB8_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a4, a0
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i16_constant:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a1, 1
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_2
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i16_constant:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    li a1, 1
+; RV64IA-ZABHA-NEXT:    neg a1, a1
+; RV64IA-ZABHA-NEXT:    amoadd.h.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i16_constant:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    lui a1, 16
+; RV64IA-NEXT:    li a2, 1
+; RV64IA-NEXT:    andi a3, a0, -4
+; RV64IA-NEXT:    andi a0, a0, 3
+; RV64IA-NEXT:    addi a1, a1, -1
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aqrl a4, (a3)
+; RV64IA-NEXT:    sub a5, a4, a2
+; RV64IA-NEXT:    xor a5, a4, a5
+; RV64IA-NEXT:    and a5, a5, a1
+; RV64IA-NEXT:    xor a5, a4, a5
+; RV64IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV64IA-NEXT:    bnez a5, .LBB8_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a4, a0
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a1, 1
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i16 1 seq_cst
+  ret i16 %1
+}
+
+define i8 @atomicrmw_sub_i8_constant(ptr %a) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i8_constant:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    li a1, -1
+; RV32IA-ZABHA-NEXT:    amoadd.b.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i8_constant:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    li a1, 255
+; RV32IA-NEXT:    li a2, 1
+; RV32IA-NEXT:    andi a3, a0, -4
+; RV32IA-NEXT:    andi a0, a0, 3
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aqrl a4, (a3)
+; RV32IA-NEXT:    sub a5, a4, a2
+; RV32IA-NEXT:    xor a5, a4, a5
+; RV32IA-NEXT:    and a5, a5, a1
+; RV32IA-NEXT:    xor a5, a4, a5
+; RV32IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV32IA-NEXT:    bnez a5, .LBB9_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a4, a0
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i8_constant:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a1, 1
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i8_constant:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    li a1, 1
+; RV64IA-ZABHA-NEXT:    neg a1, a1
+; RV64IA-ZABHA-NEXT:    amoadd.b.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i8_constant:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    li a1, 255
+; RV64IA-NEXT:    li a2, 1
+; RV64IA-NEXT:    andi a3, a0, -4
+; RV64IA-NEXT:    andi a0, a0, 3
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aqrl a4, (a3)
+; RV64IA-NEXT:    sub a5, a4, a2
+; RV64IA-NEXT:    xor a5, a4, a5
+; RV64IA-NEXT:    and a5, a5, a1
+; RV64IA-NEXT:    xor a5, a4, a5
+; RV64IA-NEXT:    sc.w.rl a5, a5, (a3)
+; RV64IA-NEXT:    bnez a5, .LBB9_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a4, a0
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a1, 1
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i8 1 seq_cst
+  ret i8 %1
+}
+
+define i32 @atomicrmw_sub_i32_constant(ptr %a) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i32_constant:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    li a1, -1
+; RV32IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i32_constant:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    li a1, -1
+; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i32_constant:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a1, 1
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_4
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i32_constant:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    li a1, 1
+; RV64IA-ZABHA-NEXT:    neg a1, a1
+; RV64IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i32_constant:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    li a1, 1
+; RV64IA-NEXT:    neg a1, a1
+; RV64IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a1, 1
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i32 1 seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i64_constant:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    addi sp, sp, -16
+; RV32IA-ZABHA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-ZABHA-NEXT:    li a1, 1
+; RV32IA-ZABHA-NEXT:    li a3, 5
+; RV32IA-ZABHA-NEXT:    li a2, 0
+; RV32IA-ZABHA-NEXT:    call __atomic_fetch_sub_8
+; RV32IA-ZABHA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-ZABHA-NEXT:    addi sp, sp, 16
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i64_constant:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    addi sp, sp, -16
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    li a1, 1
+; RV32IA-NEXT:    li a3, 5
+; RV32IA-NEXT:    li a2, 0
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    addi sp, sp, 16
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i64_constant:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a1, 1
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    call __atomic_fetch_sub_8
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i64_constant:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    li a1, -1
+; RV64IA-ZABHA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i64_constant:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    li a1, -1
+; RV64IA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a1, 1
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i64 1 seq_cst
+  ret i64 %1
+}
+
+define i32 @atomicrmw_sub_i32_neg(ptr %a, i32 %x, i32 %y) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i32_neg:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    sub a1, a1, a2
+; RV32IA-ZABHA-NEXT:    neg a1, a1
+; RV32IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i32_neg:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    sub a1, a1, a2
+; RV32IA-NEXT:    neg a1, a1
+; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i32_neg:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_4
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i32_neg:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    subw a1, a1, a2
+; RV64IA-ZABHA-NEXT:    neg a1, a1
+; RV64IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i32_neg:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    subw a1, a1, a2
+; RV64IA-NEXT:    neg a1, a1
+; RV64IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_neg:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    subw a1, a1, a2
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %b = sub i32 %x, %y
+  %1 = atomicrmw sub ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
+; RV32IA-ZABHA-LABEL: atomicrmw_sub_i64_neg:
+; RV32IA-ZABHA:       # %bb.0:
+; RV32IA-ZABHA-NEXT:    addi sp, sp, -16
+; RV32IA-ZABHA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-ZABHA-NEXT:    sub a5, a1, a3
+; RV32IA-ZABHA-NEXT:    sltu a1, a1, a3
+; RV32IA-ZABHA-NEXT:    sub a2, a2, a4
+; RV32IA-ZABHA-NEXT:    sub a2, a2, a1
+; RV32IA-ZABHA-NEXT:    li a3, 5
+; RV32IA-ZABHA-NEXT:    mv a1, a5
+; RV32IA-ZABHA-NEXT:    call __atomic_fetch_sub_8
+; RV32IA-ZABHA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-ZABHA-NEXT:    addi sp, sp, 16
+; RV32IA-ZABHA-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_sub_i64_neg:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    addi sp, sp, -16
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sub a5, a1, a3
+; RV32IA-NEXT:    sltu a1, a1, a3
+; RV32IA-NEXT:    sub a2, a2, a4
+; RV32IA-NEXT:    sub a2, a2, a1
+; RV32IA-NEXT:    li a3, 5
+; RV32IA-NEXT:    mv a1, a5
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    addi sp, sp, 16
+; RV32IA-NEXT:    ret
+;
+; RV32I-LABEL: atomicrmw_sub_i64_neg:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sub a5, a1, a3
+; RV32I-NEXT:    sltu a1, a1, a3
+; RV32I-NEXT:    sub a2, a2, a4
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    mv a1, a5
+; RV32I-NEXT:    call __atomic_fetch_sub_8
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_sub_i64_neg:
+; RV64IA-ZABHA:       # %bb.0:
+; RV64IA-ZABHA-NEXT:    sub a1, a1, a2
+; RV64IA-ZABHA-NEXT:    neg a1, a1
+; RV64IA-ZABHA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i64_neg:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    sub a1, a1, a2
+; RV64IA-NEXT:    neg a1, a1
+; RV64IA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_neg:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sub a1, a1, a2
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %b = sub i64 %x, %y
+  %1 = atomicrmw sub ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll
deleted file mode 100644
index 8bd105462842d..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll
+++ /dev/null
@@ -1,299 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32IA-ZABHA
-; RUN: llc -mtriple=riscv32 -mattr=+a -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32IA
-; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
-; RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64IA-ZABHA
-; RUN: llc -mtriple=riscv64 -mattr=+a -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64IA
-; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
-
-define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) {
-; RV32IA-ZABHA-LABEL: atomicrmw_add_i8:
-; RV32IA-ZABHA:       # %bb.0:
-; RV32IA-ZABHA-NEXT:    amoadd.b.aqrl a0, a1, (a0)
-; RV32IA-ZABHA-NEXT:    ret
-;
-; RV32IA-LABEL: atomicrmw_add_i8:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    li a2, 255
-; RV32IA-NEXT:    andi a3, a0, -4
-; RV32IA-NEXT:    andi a0, a0, 3
-; RV32IA-NEXT:    zext.b a1, a1
-; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    sll a2, a2, a0
-; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aqrl a4, (a3)
-; RV32IA-NEXT:    add a5, a4, a1
-; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:    and a5, a5, a2
-; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:    sc.w.rl a5, a5, (a3)
-; RV32IA-NEXT:    bnez a5, .LBB0_1
-; RV32IA-NEXT:  # %bb.2:
-; RV32IA-NEXT:    srl a0, a4, a0
-; RV32IA-NEXT:    ret
-;
-; RV32-LABEL: atomicrmw_add_i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    li a2, 5
-; RV32-NEXT:    call __atomic_fetch_add_1
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
-;
-; RV64IA-ZABHA-LABEL: atomicrmw_add_i8:
-; RV64IA-ZABHA:       # %bb.0:
-; RV64IA-ZABHA-NEXT:    amoadd.b.aqrl a0, a1, (a0)
-; RV64IA-ZABHA-NEXT:    ret
-;
-; RV64IA-LABEL: atomicrmw_add_i8:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    li a2, 255
-; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    andi a0, a0, 3
-; RV64IA-NEXT:    zext.b a1, a1
-; RV64IA-NEXT:    slli a0, a0, 3
-; RV64IA-NEXT:    sllw a2, a2, a0
-; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aqrl a4, (a3)
-; RV64IA-NEXT:    add a5, a4, a1
-; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:    and a5, a5, a2
-; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:    sc.w.rl a5, a5, (a3)
-; RV64IA-NEXT:    bnez a5, .LBB0_1
-; RV64IA-NEXT:  # %bb.2:
-; RV64IA-NEXT:    srlw a0, a4, a0
-; RV64IA-NEXT:    ret
-;
-; RV64-LABEL: atomicrmw_add_i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    li a2, 5
-; RV64-NEXT:    call __atomic_fetch_add_1
-; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    .cfi_restore ra
-; RV64-NEXT:    addi sp, sp, 16
-; RV64-NEXT:    .cfi_def_cfa_offset 0
-; RV64-NEXT:    ret
-  %res = atomicrmw add ptr %ptr, i8 %rhs seq_cst
-  ret i8 %res
-}
-
-define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) {
-; RV32IA-ZABHA-LABEL: atomicrmw_add_i16:
-; RV32IA-ZABHA:       # %bb.0:
-; RV32IA-ZABHA-NEXT:    amoadd.h.aqrl a0, a1, (a0)
-; RV32IA-ZABHA-NEXT:    ret
-;
-; RV32IA-LABEL: atomicrmw_add_i16:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    lui a2, 16
-; RV32IA-NEXT:    andi a3, a0, -4
-; RV32IA-NEXT:    andi a0, a0, 3
-; RV32IA-NEXT:    addi a2, a2, -1
-; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    sll a4, a2, a0
-; RV32IA-NEXT:    and a1, a1, a2
-; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aqrl a2, (a3)
-; RV32IA-NEXT:    add a5, a2, a1
-; RV32IA-NEXT:    xor a5, a2, a5
-; RV32IA-NEXT:    and a5, a5, a4
-; RV32IA-NEXT:    xor a5, a2, a5
-; RV32IA-NEXT:    sc.w.rl a5, a5, (a3)
-; RV32IA-NEXT:    bnez a5, .LBB1_1
-; RV32IA-NEXT:  # %bb.2:
-; RV32IA-NEXT:    srl a0, a2, a0
-; RV32IA-NEXT:    ret
-;
-; RV32-LABEL: atomicrmw_add_i16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    li a2, 5
-; RV32-NEXT:    call __atomic_fetch_add_2
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
-;
-; RV64IA-ZABHA-LABEL: atomicrmw_add_i16:
-; RV64IA-ZABHA:       # %bb.0:
-; RV64IA-ZABHA-NEXT:    amoadd.h.aqrl a0, a1, (a0)
-; RV64IA-ZABHA-NEXT:    ret
-;
-; RV64IA-LABEL: atomicrmw_add_i16:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    lui a2, 16
-; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    andi a0, a0, 3
-; RV64IA-NEXT:    addi a2, a2, -1
-; RV64IA-NEXT:    slli a0, a0, 3
-; RV64IA-NEXT:    sllw a4, a2, a0
-; RV64IA-NEXT:    and a1, a1, a2
-; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aqrl a2, (a3)
-; RV64IA-NEXT:    add a5, a2, a1
-; RV64IA-NEXT:    xor a5, a2, a5
-; RV64IA-NEXT:    and a5, a5, a4
-; RV64IA-NEXT:    xor a5, a2, a5
-; RV64IA-NEXT:    sc.w.rl a5, a5, (a3)
-; RV64IA-NEXT:    bnez a5, .LBB1_1
-; RV64IA-NEXT:  # %bb.2:
-; RV64IA-NEXT:    srlw a0, a2, a0
-; RV64IA-NEXT:    ret
-;
-; RV64-LABEL: atomicrmw_add_i16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    li a2, 5
-; RV64-NEXT:    call __atomic_fetch_add_2
-; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    .cfi_restore ra
-; RV64-NEXT:    addi sp, sp, 16
-; RV64-NEXT:    .cfi_def_cfa_offset 0
-; RV64-NEXT:    ret
-  %res = atomicrmw add ptr %ptr, i16 %rhs seq_cst
-  ret i16 %res
-}
-
-define i32 @atomicrmw_add_i32(ptr %ptr, i32 %rhs) {
-; RV32IA-ZABHA-LABEL: atomicrmw_add_i32:
-; RV32IA-ZABHA:       # %bb.0:
-; RV32IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
-; RV32IA-ZABHA-NEXT:    ret
-;
-; RV32IA-LABEL: atomicrmw_add_i32:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
-; RV32IA-NEXT:    ret
-;
-; RV32-LABEL: atomicrmw_add_i32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    li a2, 5
-; RV32-NEXT:    call __atomic_fetch_add_4
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
-;
-; RV64IA-ZABHA-LABEL: atomicrmw_add_i32:
-; RV64IA-ZABHA:       # %bb.0:
-; RV64IA-ZABHA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
-; RV64IA-ZABHA-NEXT:    ret
-;
-; RV64IA-LABEL: atomicrmw_add_i32:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
-;
-; RV64-LABEL: atomicrmw_add_i32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    li a2, 5
-; RV64-NEXT:    call __atomic_fetch_add_4
-; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    .cfi_restore ra
-; RV64-NEXT:    addi sp, sp, 16
-; RV64-NEXT:    .cfi_def_cfa_offset 0
-; RV64-NEXT:    ret
-  %res = atomicrmw add ptr %ptr, i32 %rhs seq_cst
-  ret i32 %res
-}
-
-define i64 @atomicrmw_add_i64(ptr %ptr, i64 %rhs) {
-; RV32IA-ZABHA-LABEL: atomicrmw_add_i64:
-; RV32IA-ZABHA:       # %bb.0:
-; RV32IA-ZABHA-NEXT:    addi sp, sp, -16
-; RV32IA-ZABHA-NEXT:    .cfi_def_cfa_offset 16
-; RV32IA-ZABHA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-ZABHA-NEXT:    .cfi_offset ra, -4
-; RV32IA-ZABHA-NEXT:    li a3, 5
-; RV32IA-ZABHA-NEXT:    call __atomic_fetch_add_8
-; RV32IA-ZABHA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IA-ZABHA-NEXT:    .cfi_restore ra
-; RV32IA-ZABHA-NEXT:    addi sp, sp, 16
-; RV32IA-ZABHA-NEXT:    .cfi_def_cfa_offset 0
-; RV32IA-ZABHA-NEXT:    ret
-;
-; RV32IA-LABEL: atomicrmw_add_i64:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    .cfi_def_cfa_offset 16
-; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    .cfi_offset ra, -4
-; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_fetch_add_8
-; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    .cfi_restore ra
-; RV32IA-NEXT:    addi sp, sp, 16
-; RV32IA-NEXT:    .cfi_def_cfa_offset 0
-; RV32IA-NEXT:    ret
-;
-; RV32-LABEL: atomicrmw_add_i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    li a3, 5
-; RV32-NEXT:    call __atomic_fetch_add_8
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
-;
-; RV64IA-ZABHA-LABEL: atomicrmw_add_i64:
-; RV64IA-ZABHA:       # %bb.0:
-; RV64IA-ZABHA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
-; RV64IA-ZABHA-NEXT:    ret
-;
-; RV64IA-LABEL: atomicrmw_add_i64:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
-;
-; RV64-LABEL: atomicrmw_add_i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    li a2, 5
-; RV64-NEXT:    call __atomic_fetch_add_8
-; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    .cfi_restore ra
-; RV64-NEXT:    addi sp, sp, 16
-; RV64-NEXT:    .cfi_def_cfa_offset 0
-; RV64-NEXT:    ret
-  %res = atomicrmw add ptr %ptr, i64 %rhs seq_cst
-  ret i64 %res
-}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir
deleted file mode 100644
index 07478686011d0..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir
+++ /dev/null
@@ -1,73 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -run-pass=instruction-select -verify-machineinstrs %s -o - \
-# RUN: | FileCheck %s
-
----
-name:            atomicrmw_add_i8_monotonic
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
-    ; CHECK: liveins: $x10, $x11
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
-    ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[COPY1]] :: (load store monotonic (s8))
-    ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(p0) = COPY $x10
-    %1:gprb(s32) = COPY $x11
-    %6:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s8))
-    $x10 = COPY %6(s32)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i16_monotonic
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
-    ; CHECK: liveins: $x10, $x11
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
-    ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[COPY1]] :: (load store monotonic (s16))
-    ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(p0) = COPY $x10
-    %1:gprb(s32) = COPY $x11
-    %6:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s16))
-    $x10 = COPY %6(s32)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i32_monotonic
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
-    ; CHECK: liveins: $x10, $x11
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
-    ; CHECK-NEXT: [[AMOADD_W:%[0-9]+]]:gpr = AMOADD_W [[COPY]], [[COPY1]] :: (load store monotonic (s32))
-    ; CHECK-NEXT: $x10 = COPY [[AMOADD_W]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(p0) = COPY $x10
-    %1:gprb(s32) = COPY $x11
-    %2:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s32))
-    $x10 = COPY %2(s32)
-    PseudoRET implicit $x10
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir
deleted file mode 100644
index 2e54a42748aaa..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir
+++ /dev/null
@@ -1,96 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -run-pass=instruction-select -verify-machineinstrs %s -o - \
-# RUN: | FileCheck %s
-
----
-name:            atomicrmw_add_i8_monotonic
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
-    ; CHECK: liveins: $x10, $x11
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
-    ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[COPY1]] :: (load store monotonic (s8))
-    ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(p0) = COPY $x10
-    %1:gprb(s64) = COPY $x11
-    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s8))
-    $x10 = COPY %6(s64)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i16_monotonic
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
-    ; CHECK: liveins: $x10, $x11
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
-    ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[COPY1]] :: (load store monotonic (s16))
-    ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(p0) = COPY $x10
-    %1:gprb(s64) = COPY $x11
-    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s16))
-    $x10 = COPY %6(s64)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i32_monotonic
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
-    ; CHECK: liveins: $x10, $x11
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
-    ; CHECK-NEXT: [[AMOADD_W:%[0-9]+]]:gpr = AMOADD_W [[COPY]], [[COPY1]] :: (load store monotonic (s32))
-    ; CHECK-NEXT: $x10 = COPY [[AMOADD_W]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(p0) = COPY $x10
-    %1:gprb(s64) = COPY $x11
-    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s32))
-    $x10 = COPY %6(s64)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i64_monotonic
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; CHECK-LABEL: name: atomicrmw_add_i64_monotonic
-    ; CHECK: liveins: $x10, $x11
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
-    ; CHECK-NEXT: [[AMOADD_D:%[0-9]+]]:gpr = AMOADD_D [[COPY]], [[COPY1]] :: (load store monotonic (s64))
-    ; CHECK-NEXT: $x10 = COPY [[AMOADD_D]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(p0) = COPY $x10
-    %1:gprb(s64) = COPY $x11
-    %2:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s64))
-    $x10 = COPY %2(s64)
-    PseudoRET implicit $x10
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-sub-rv32.mir
new file mode 100644
index 0000000000000..737a766fa8a59
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-sub-rv32.mir
@@ -0,0 +1,154 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -run-pass=instruction-select -verify-machineinstrs %s -o - \
+# RUN: | FileCheck %s
+
+---
+name:            atomicrmw_add_i8_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[COPY1]] :: (load store monotonic (s8))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %6:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s8))
+    $x10 = COPY %6(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i16_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[COPY1]] :: (load store monotonic (s16))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %6:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s16))
+    $x10 = COPY %6(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i32_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[AMOADD_W:%[0-9]+]]:gpr = AMOADD_W [[COPY]], [[COPY1]] :: (load store monotonic (s32))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_W]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s32))
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i8_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_sub_i8_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY2]], [[COPY1]]
+    ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[SUB]] :: (load store monotonic (s8))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %4:gprb(s32) = G_SUB %3(s32), %1(s32)
+    %6:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %4 :: (load store monotonic (s8))
+    $x10 = COPY %6(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i16_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_sub_i16_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY2]], [[COPY1]]
+    ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[SUB]] :: (load store monotonic (s16))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %4:gprb(s32) = G_SUB %3(s32), %1(s32)
+    %6:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %4 :: (load store monotonic (s16))
+    $x10 = COPY %6(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i32_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_sub_i32_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY2]], [[COPY1]]
+    ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[SUB]] :: (load store monotonic (s8))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %4:gprb(s32) = G_SUB %3(s32), %1(s32)
+    %6:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %4 :: (load store monotonic (s8))
+    $x10 = COPY %6(s32)
+    PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-sub-rv64.mir
new file mode 100644
index 0000000000000..288258a2138a7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-sub-rv64.mir
@@ -0,0 +1,204 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -run-pass=instruction-select -verify-machineinstrs %s -o - \
+# RUN: | FileCheck %s
+
+---
+name:            atomicrmw_add_i8_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[COPY1]] :: (load store monotonic (s8))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s8))
+    $x10 = COPY %6(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i16_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[COPY1]] :: (load store monotonic (s16))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s16))
+    $x10 = COPY %6(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i32_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[AMOADD_W:%[0-9]+]]:gpr = AMOADD_W [[COPY]], [[COPY1]] :: (load store monotonic (s32))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_W]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s32))
+    $x10 = COPY %6(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i64_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_add_i64_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[AMOADD_D:%[0-9]+]]:gpr = AMOADD_D [[COPY]], [[COPY1]] :: (load store monotonic (s64))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_D]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s64))
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i8_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_sub_i8_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY2]], [[COPY1]]
+    ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[SUB]] :: (load store monotonic (s8))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %3:gprb(s64) = G_CONSTANT i64 0
+    %4:gprb(s64) = G_SUB %3(s64), %1(s64)
+    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %4 :: (load store monotonic (s8))
+    $x10 = COPY %6(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i16_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_sub_i16_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY2]], [[COPY1]]
+    ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[SUB]] :: (load store monotonic (s16))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %3:gprb(s64) = G_CONSTANT i64 0
+    %4:gprb(s64) = G_SUB %3(s64), %1(s64)
+    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %4 :: (load store monotonic (s16))
+    $x10 = COPY %6(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i32_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_sub_i32_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY2]], [[COPY1]]
+    ; CHECK-NEXT: [[AMOADD_W:%[0-9]+]]:gpr = AMOADD_W [[COPY]], [[SUB]] :: (load store monotonic (s32))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_W]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %3:gprb(s64) = G_CONSTANT i64 0
+    %4:gprb(s64) = G_SUB %3(s64), %1(s64)
+    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %4 :: (load store monotonic (s32))
+    $x10 = COPY %6(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i64_monotonic
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: atomicrmw_sub_i64_monotonic
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY2]], [[COPY1]]
+    ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[SUB]] :: (load store monotonic (s8))
+    ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:gprb(p0) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %3:gprb(s64) = G_CONSTANT i64 0
+    %4:gprb(s64) = G_SUB %3(s64), %1(s64)
+    %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %4 :: (load store monotonic (s8))
+    $x10 = COPY %6(s64)
+    PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
index 9d68a6d72c486..8edc7e4f8b57c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
@@ -225,8 +225,8 @@
 # DEBUG-NEXT: .. the first uncovered type index: 2, OK
 # DEBUG-NEXT: .. the first uncovered imm index: 0, OK
 # DEBUG-NEXT: G_ATOMICRMW_SUB (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_ATOMICRMW_AND (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
 # DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
 # DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir
deleted file mode 100644
index 2589564608230..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir
+++ /dev/null
@@ -1,97 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s --check-prefixes=RV32IA-ZABHA
-# RUN: llc -mtriple=riscv32 -mattr=+a -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s --check-prefixes=RV32IA
-
----
-name:            atomicrmw_add_i8_monotonic
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32IA-ZABHA-LABEL: name: atomicrmw_add_i8_monotonic
-    ; RV32IA-ZABHA: liveins: $x10, $x11
-    ; RV32IA-ZABHA-NEXT: {{  $}}
-    ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
-    ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
-    ; RV32IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
-    ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
-    ;
-    ; RV32IA-LABEL: name: atomicrmw_add_i8_monotonic
-    ; RV32IA: liveins: $x10, $x11
-    ; RV32IA-NEXT: {{  $}}
-    ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
-    ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
-    ; RV32IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
-    ; RV32IA-NEXT: PseudoRET implicit $x10
-    %0:_(p0) = COPY $x10
-    %1:_(s32) = COPY $x11
-    %2:_(s8) = G_TRUNC %1(s32)
-    %3:_(s8) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s8))
-    %4:_(s32) = G_ANYEXT %3(s8)
-    $x10 = COPY %4(s32)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i16_monotonic
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32IA-ZABHA-LABEL: name: atomicrmw_add_i16_monotonic
-    ; RV32IA-ZABHA: liveins: $x10, $x11
-    ; RV32IA-ZABHA-NEXT: {{  $}}
-    ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
-    ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
-    ; RV32IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
-    ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
-    ;
-    ; RV32IA-LABEL: name: atomicrmw_add_i16_monotonic
-    ; RV32IA: liveins: $x10, $x11
-    ; RV32IA-NEXT: {{  $}}
-    ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
-    ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
-    ; RV32IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
-    ; RV32IA-NEXT: PseudoRET implicit $x10
-    %0:_(p0) = COPY $x10
-    %1:_(s32) = COPY $x11
-    %2:_(s16) = G_TRUNC %1(s32)
-    %3:_(s16) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s16))
-    %4:_(s32) = G_ANYEXT %3(s16)
-    $x10 = COPY %4(s32)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i32_monotonic
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32IA-ZABHA-LABEL: name: atomicrmw_add_i32_monotonic
-    ; RV32IA-ZABHA: liveins: $x10, $x11
-    ; RV32IA-ZABHA-NEXT: {{  $}}
-    ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
-    ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
-    ; RV32IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
-    ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
-    ;
-    ; RV32IA-LABEL: name: atomicrmw_add_i32_monotonic
-    ; RV32IA: liveins: $x10, $x11
-    ; RV32IA-NEXT: {{  $}}
-    ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
-    ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
-    ; RV32IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
-    ; RV32IA-NEXT: PseudoRET implicit $x10
-    %0:_(p0) = COPY $x10
-    %1:_(s32) = COPY $x11
-    %2:_(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s32))
-    $x10 = COPY %2(s32)
-    PseudoRET implicit $x10
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir
deleted file mode 100644
index 719ba2c590f1a..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir
+++ /dev/null
@@ -1,128 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s --check-prefixes=RV64IA-ZABHA
-# RUN: llc -mtriple=riscv64 -mattr=+a -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s --check-prefixes=RV64IA
-
----
-name:            atomicrmw_add_i8_monotonic
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i8_monotonic
-    ; RV64IA-ZABHA: liveins: $x10, $x11
-    ; RV64IA-ZABHA-NEXT: {{  $}}
-    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
-    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
-    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
-    ;
-    ; RV64IA-LABEL: name: atomicrmw_add_i8_monotonic
-    ; RV64IA: liveins: $x10, $x11
-    ; RV64IA-NEXT: {{  $}}
-    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
-    ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
-    ; RV64IA-NEXT: PseudoRET implicit $x10
-    %0:_(p0) = COPY $x10
-    %1:_(s64) = COPY $x11
-    %2:_(s8) = G_TRUNC %1(s64)
-    %3:_(s8) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s8))
-    %4:_(s64) = G_ANYEXT %3(s8)
-    $x10 = COPY %4(s64)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i16_monotonic
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i16_monotonic
-    ; RV64IA-ZABHA: liveins: $x10, $x11
-    ; RV64IA-ZABHA-NEXT: {{  $}}
-    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
-    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
-    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
-    ;
-    ; RV64IA-LABEL: name: atomicrmw_add_i16_monotonic
-    ; RV64IA: liveins: $x10, $x11
-    ; RV64IA-NEXT: {{  $}}
-    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
-    ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
-    ; RV64IA-NEXT: PseudoRET implicit $x10
-    %0:_(p0) = COPY $x10
-    %1:_(s64) = COPY $x11
-    %2:_(s16) = G_TRUNC %1(s64)
-    %3:_(s16) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s16))
-    %4:_(s64) = G_ANYEXT %3(s16)
-    $x10 = COPY %4(s64)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i32_monotonic
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i32_monotonic
-    ; RV64IA-ZABHA: liveins: $x10, $x11
-    ; RV64IA-ZABHA-NEXT: {{  $}}
-    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
-    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
-    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
-    ;
-    ; RV64IA-LABEL: name: atomicrmw_add_i32_monotonic
-    ; RV64IA: liveins: $x10, $x11
-    ; RV64IA-NEXT: {{  $}}
-    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
-    ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
-    ; RV64IA-NEXT: PseudoRET implicit $x10
-    %0:_(p0) = COPY $x10
-    %1:_(s64) = COPY $x11
-    %2:_(s32) = G_TRUNC %1(s64)
-    %3:_(s32) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s32))
-    %4:_(s64) = G_ANYEXT %3(s32)
-    $x10 = COPY %4(s64)
-    PseudoRET implicit $x10
-...
----
-name:            atomicrmw_add_i64_monotonic
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i64_monotonic
-    ; RV64IA-ZABHA: liveins: $x10, $x11
-    ; RV64IA-ZABHA-NEXT: {{  $}}
-    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s64))
-    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
-    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
-    ;
-    ; RV64IA-LABEL: name: atomicrmw_add_i64_monotonic
-    ; RV64IA: liveins: $x10, $x11
-    ; RV64IA-NEXT: {{  $}}
-    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s64))
-    ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
-    ; RV64IA-NEXT: PseudoRET implicit $x10
-    %0:_(p0) = COPY $x10
-    %1:_(s64) = COPY $x11
-    %2:_(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s64))
-    $x10 = COPY %2(s64)
-    PseudoRET implicit $x10
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-sub-rv32.mir
new file mode 100644
index 0000000000000..d6fce835dc209
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-sub-rv32.mir
@@ -0,0 +1,206 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=RV32IA-ZABHA
+# RUN: llc -mtriple=riscv32 -mattr=+a -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=RV32IA
+
+---
+name:            atomicrmw_add_i8_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32IA-ZABHA-LABEL: name: atomicrmw_add_i8_monotonic
+    ; RV32IA-ZABHA: liveins: $x10, $x11
+    ; RV32IA-ZABHA-NEXT: {{  $}}
+    ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+    ; RV32IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV32IA-LABEL: name: atomicrmw_add_i8_monotonic
+    ; RV32IA: liveins: $x10, $x11
+    ; RV32IA-NEXT: {{  $}}
+    ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+    ; RV32IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s32) = COPY $x11
+    %2:_(s8) = G_TRUNC %1(s32)
+    %3:_(s8) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s8))
+    %4:_(s32) = G_ANYEXT %3(s8)
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i16_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32IA-ZABHA-LABEL: name: atomicrmw_add_i16_monotonic
+    ; RV32IA-ZABHA: liveins: $x10, $x11
+    ; RV32IA-ZABHA-NEXT: {{  $}}
+    ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+    ; RV32IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV32IA-LABEL: name: atomicrmw_add_i16_monotonic
+    ; RV32IA: liveins: $x10, $x11
+    ; RV32IA-NEXT: {{  $}}
+    ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+    ; RV32IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s32) = COPY $x11
+    %2:_(s16) = G_TRUNC %1(s32)
+    %3:_(s16) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s16))
+    %4:_(s32) = G_ANYEXT %3(s16)
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i32_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32IA-ZABHA-LABEL: name: atomicrmw_add_i32_monotonic
+    ; RV32IA-ZABHA: liveins: $x10, $x11
+    ; RV32IA-ZABHA-NEXT: {{  $}}
+    ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+    ; RV32IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV32IA-LABEL: name: atomicrmw_add_i32_monotonic
+    ; RV32IA: liveins: $x10, $x11
+    ; RV32IA-NEXT: {{  $}}
+    ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+    ; RV32IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s32) = COPY $x11
+    %2:_(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s32))
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i8_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32IA-ZABHA-LABEL: name: atomicrmw_sub_i8_monotonic
+    ; RV32IA-ZABHA: liveins: $x10, $x11
+    ; RV32IA-ZABHA-NEXT: {{  $}}
+    ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-ZABHA-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV32IA-ZABHA-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+    ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s8))
+    ; RV32IA-ZABHA-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-ZABHA-NEXT: $x10 = COPY [[COPY2]](s32)
+    ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV32IA-LABEL: name: atomicrmw_sub_i8_monotonic
+    ; RV32IA: liveins: $x10, $x11
+    ; RV32IA-NEXT: {{  $}}
+    ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV32IA-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+    ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s8))
+    ; RV32IA-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-NEXT: $x10 = COPY [[COPY2]](s32)
+    ; RV32IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s32) = COPY $x11
+    %2:_(s8) = G_TRUNC %1(s32)
+    %3:_(s8) = G_ATOMICRMW_SUB %0(p0), %2 :: (load store monotonic (s8))
+    %4:_(s32) = G_ANYEXT %3(s8)
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i16_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32IA-ZABHA-LABEL: name: atomicrmw_sub_i16_monotonic
+    ; RV32IA-ZABHA: liveins: $x10, $x11
+    ; RV32IA-ZABHA-NEXT: {{  $}}
+    ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-ZABHA-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV32IA-ZABHA-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+    ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s16))
+    ; RV32IA-ZABHA-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-ZABHA-NEXT: $x10 = COPY [[COPY2]](s32)
+    ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV32IA-LABEL: name: atomicrmw_sub_i16_monotonic
+    ; RV32IA: liveins: $x10, $x11
+    ; RV32IA-NEXT: {{  $}}
+    ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV32IA-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+    ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s16))
+    ; RV32IA-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-NEXT: $x10 = COPY [[COPY2]](s32)
+    ; RV32IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s32) = COPY $x11
+    %2:_(s16) = G_TRUNC %1(s32)
+    %3:_(s16) = G_ATOMICRMW_SUB %0(p0), %2 :: (load store monotonic (s16))
+    %4:_(s32) = G_ANYEXT %3(s16)
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i32_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32IA-ZABHA-LABEL: name: atomicrmw_sub_i32_monotonic
+    ; RV32IA-ZABHA: liveins: $x10, $x11
+    ; RV32IA-ZABHA-NEXT: {{  $}}
+    ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-ZABHA-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV32IA-ZABHA-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+    ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s32))
+    ; RV32IA-ZABHA-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-ZABHA-NEXT: $x10 = COPY [[COPY2]](s32)
+    ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV32IA-LABEL: name: atomicrmw_sub_i32_monotonic
+    ; RV32IA: liveins: $x10, $x11
+    ; RV32IA-NEXT: {{  $}}
+    ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; RV32IA-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV32IA-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+    ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s32))
+    ; RV32IA-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ATOMICRMW_ADD]](s32)
+    ; RV32IA-NEXT: $x10 = COPY [[COPY2]](s32)
+    ; RV32IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s32) = COPY $x11
+    %2:_(s32) = G_ATOMICRMW_SUB %0(p0), %1 :: (load store monotonic (s32))
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-sub-rv64.mir
new file mode 100644
index 0000000000000..8bb01af49489b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-sub-rv64.mir
@@ -0,0 +1,274 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=RV64IA-ZABHA
+# RUN: llc -mtriple=riscv64 -mattr=+a -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=RV64IA
+
+---
+name:            atomicrmw_add_i8_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i8_monotonic
+    ; RV64IA-ZABHA: liveins: $x10, $x11
+    ; RV64IA-ZABHA-NEXT: {{  $}}
+    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV64IA-LABEL: name: atomicrmw_add_i8_monotonic
+    ; RV64IA: liveins: $x10, $x11
+    ; RV64IA-NEXT: {{  $}}
+    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+    ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s64) = COPY $x11
+    %2:_(s8) = G_TRUNC %1(s64)
+    %3:_(s8) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s8))
+    %4:_(s64) = G_ANYEXT %3(s8)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i16_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i16_monotonic
+    ; RV64IA-ZABHA: liveins: $x10, $x11
+    ; RV64IA-ZABHA-NEXT: {{  $}}
+    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV64IA-LABEL: name: atomicrmw_add_i16_monotonic
+    ; RV64IA: liveins: $x10, $x11
+    ; RV64IA-NEXT: {{  $}}
+    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+    ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s64) = COPY $x11
+    %2:_(s16) = G_TRUNC %1(s64)
+    %3:_(s16) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s16))
+    %4:_(s64) = G_ANYEXT %3(s16)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i32_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i32_monotonic
+    ; RV64IA-ZABHA: liveins: $x10, $x11
+    ; RV64IA-ZABHA-NEXT: {{  $}}
+    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV64IA-LABEL: name: atomicrmw_add_i32_monotonic
+    ; RV64IA: liveins: $x10, $x11
+    ; RV64IA-NEXT: {{  $}}
+    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+    ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s64) = COPY $x11
+    %2:_(s32) = G_TRUNC %1(s64)
+    %3:_(s32) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s32))
+    %4:_(s64) = G_ANYEXT %3(s32)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_add_i64_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i64_monotonic
+    ; RV64IA-ZABHA: liveins: $x10, $x11
+    ; RV64IA-ZABHA-NEXT: {{  $}}
+    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s64))
+    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV64IA-LABEL: name: atomicrmw_add_i64_monotonic
+    ; RV64IA: liveins: $x10, $x11
+    ; RV64IA-NEXT: {{  $}}
+    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s64))
+    ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s64) = COPY $x11
+    %2:_(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s64))
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i8_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64IA-ZABHA-LABEL: name: atomicrmw_sub_i8_monotonic
+    ; RV64IA-ZABHA: liveins: $x10, $x11
+    ; RV64IA-ZABHA-NEXT: {{  $}}
+    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-ZABHA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64IA-ZABHA-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s8))
+    ; RV64IA-ZABHA-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[COPY2]](s64)
+    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV64IA-LABEL: name: atomicrmw_sub_i8_monotonic
+    ; RV64IA: liveins: $x10, $x11
+    ; RV64IA-NEXT: {{  $}}
+    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64IA-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s8))
+    ; RV64IA-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-NEXT: $x10 = COPY [[COPY2]](s64)
+    ; RV64IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s64) = COPY $x11
+    %2:_(s8) = G_TRUNC %1(s64)
+    %3:_(s8) = G_ATOMICRMW_SUB %0(p0), %2 :: (load store monotonic (s8))
+    %4:_(s64) = G_ANYEXT %3(s8)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i16_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64IA-ZABHA-LABEL: name: atomicrmw_sub_i16_monotonic
+    ; RV64IA-ZABHA: liveins: $x10, $x11
+    ; RV64IA-ZABHA-NEXT: {{  $}}
+    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-ZABHA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64IA-ZABHA-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s16))
+    ; RV64IA-ZABHA-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[COPY2]](s64)
+    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV64IA-LABEL: name: atomicrmw_sub_i16_monotonic
+    ; RV64IA: liveins: $x10, $x11
+    ; RV64IA-NEXT: {{  $}}
+    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64IA-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s16))
+    ; RV64IA-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-NEXT: $x10 = COPY [[COPY2]](s64)
+    ; RV64IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s64) = COPY $x11
+    %2:_(s16) = G_TRUNC %1(s64)
+    %3:_(s16) = G_ATOMICRMW_SUB %0(p0), %2 :: (load store monotonic (s16))
+    %4:_(s64) = G_ANYEXT %3(s16)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i32_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64IA-ZABHA-LABEL: name: atomicrmw_sub_i32_monotonic
+    ; RV64IA-ZABHA: liveins: $x10, $x11
+    ; RV64IA-ZABHA-NEXT: {{  $}}
+    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-ZABHA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64IA-ZABHA-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s32))
+    ; RV64IA-ZABHA-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[COPY2]](s64)
+    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV64IA-LABEL: name: atomicrmw_sub_i32_monotonic
+    ; RV64IA: liveins: $x10, $x11
+    ; RV64IA-NEXT: {{  $}}
+    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64IA-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s32))
+    ; RV64IA-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-NEXT: $x10 = COPY [[COPY2]](s64)
+    ; RV64IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s64) = COPY $x11
+    %2:_(s32) = G_TRUNC %1(s64)
+    %3:_(s32) = G_ATOMICRMW_SUB %0(p0), %2 :: (load store monotonic (s32))
+    %4:_(s64) = G_ANYEXT %3(s32)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            atomicrmw_sub_i64_monotonic
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64IA-ZABHA-LABEL: name: atomicrmw_sub_i64_monotonic
+    ; RV64IA-ZABHA: liveins: $x10, $x11
+    ; RV64IA-ZABHA-NEXT: {{  $}}
+    ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-ZABHA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64IA-ZABHA-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s64))
+    ; RV64IA-ZABHA-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-ZABHA-NEXT: $x10 = COPY [[COPY2]](s64)
+    ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+    ;
+    ; RV64IA-LABEL: name: atomicrmw_sub_i64_monotonic
+    ; RV64IA: liveins: $x10, $x11
+    ; RV64IA-NEXT: {{  $}}
+    ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; RV64IA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64IA-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[SUB]] :: (load store monotonic (s64))
+    ; RV64IA-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ATOMICRMW_ADD]](s64)
+    ; RV64IA-NEXT: $x10 = COPY [[COPY2]](s64)
+    ; RV64IA-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s64) = COPY $x11
+    %2:_(s64) = G_ATOMICRMW_SUB %0(p0), %1 :: (load store monotonic (s64))
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...



More information about the llvm-commits mailing list