[llvm] [RISCV][GlobalISel] Legalize and select G_ATOMICRMW_ADD instruction (PR #153791)
Kane Wang via llvm-commits
llvm-commits at lists.llvm.org
Sat Aug 23 09:50:52 PDT 2025
https://github.com/ReVe1uv updated https://github.com/llvm/llvm-project/pull/153791
>From 9bf77a3b11737aa9a690ae67029157e2a158cd03 Mon Sep 17 00:00:00 2001
From: Kane Wang <kanewang95 at foxmail.com>
Date: Fri, 15 Aug 2025 19:25:32 +0800
Subject: [PATCH] [RISCV][GlobalISel] Legalize and select G_ATOMICRMW_ADD
instruction
This patch adds legalization and instruction selection support for the G_ATOMICRMW_ADD opcode in the RISCV GlobalISel backend.
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 17 +
.../CodeGen/RISCV/GlobalISel/atomicrmw-add.ll | 299 ++++++++++++++++++
.../instruction-select/atomicrmw-add-rv32.mir | 73 +++++
.../instruction-select/atomicrmw-add-rv64.mir | 96 ++++++
.../GlobalISel/legalizer-info-validation.mir | 4 +-
.../legalizer/legalize-atomicrmw-add-rv32.mir | 121 +++++++
.../legalizer/legalize-atomicrmw-add-rv64.mir | 160 ++++++++++
7 files changed, 768 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index e88f33d6859ec..8d522c2906b8c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -26,6 +26,8 @@
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/Type.h"
using namespace llvm;
@@ -692,6 +694,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.customIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)));
+ getActionDefinitionsBuilder(G_ATOMICRMW_ADD)
+ .legalFor(ST.hasStdExtA(), {{sXLen, p0}})
+ .libcallFor(!ST.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
+ .clampScalar(0, sXLen, sXLen);
+
getLegacyLegalizerInfo().computeTables();
verify(*ST.getInstrInfo());
}
@@ -729,6 +736,16 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MI.eraseFromParent();
return true;
}
+ case Intrinsic::riscv_masked_atomicrmw_xchg:
+ case Intrinsic::riscv_masked_atomicrmw_add:
+ case Intrinsic::riscv_masked_atomicrmw_sub:
+ case Intrinsic::riscv_masked_atomicrmw_nand:
+ case Intrinsic::riscv_masked_atomicrmw_max:
+ case Intrinsic::riscv_masked_atomicrmw_min:
+ case Intrinsic::riscv_masked_atomicrmw_umax:
+ case Intrinsic::riscv_masked_atomicrmw_umin:
+ case Intrinsic::riscv_masked_cmpxchg:
+ return true;
}
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll
new file mode 100644
index 0000000000000..8bd105462842d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll
@@ -0,0 +1,299 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32IA-ZABHA
+; RUN: llc -mtriple=riscv32 -mattr=+a -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32IA
+; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64IA-ZABHA
+; RUN: llc -mtriple=riscv64 -mattr=+a -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64IA
+; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+
+define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i8:
+; RV32IA-ZABHA: # %bb.0:
+; RV32IA-ZABHA-NEXT: amoadd.b.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_add_i8:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: li a2, 255
+; RV32IA-NEXT: andi a3, a0, -4
+; RV32IA-NEXT: andi a0, a0, 3
+; RV32IA-NEXT: zext.b a1, a1
+; RV32IA-NEXT: slli a0, a0, 3
+; RV32IA-NEXT: sll a2, a2, a0
+; RV32IA-NEXT: sll a1, a1, a0
+; RV32IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT: lr.w.aqrl a4, (a3)
+; RV32IA-NEXT: add a5, a4, a1
+; RV32IA-NEXT: xor a5, a4, a5
+; RV32IA-NEXT: and a5, a5, a2
+; RV32IA-NEXT: xor a5, a4, a5
+; RV32IA-NEXT: sc.w.rl a5, a5, (a3)
+; RV32IA-NEXT: bnez a5, .LBB0_1
+; RV32IA-NEXT: # %bb.2:
+; RV32IA-NEXT: srl a0, a4, a0
+; RV32IA-NEXT: ret
+;
+; RV32-LABEL: atomicrmw_add_i8:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: li a2, 5
+; RV32-NEXT: call __atomic_fetch_add_1
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i8:
+; RV64IA-ZABHA: # %bb.0:
+; RV64IA-ZABHA-NEXT: amoadd.b.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i8:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: li a2, 255
+; RV64IA-NEXT: andi a3, a0, -4
+; RV64IA-NEXT: andi a0, a0, 3
+; RV64IA-NEXT: zext.b a1, a1
+; RV64IA-NEXT: slli a0, a0, 3
+; RV64IA-NEXT: sllw a2, a2, a0
+; RV64IA-NEXT: sllw a1, a1, a0
+; RV64IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a3)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a3)
+; RV64IA-NEXT: bnez a5, .LBB0_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a0
+; RV64IA-NEXT: ret
+;
+; RV64-LABEL: atomicrmw_add_i8:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: li a2, 5
+; RV64-NEXT: call __atomic_fetch_add_1
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %res = atomicrmw add ptr %ptr, i8 %rhs seq_cst
+ ret i8 %res
+}
+
+define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i16:
+; RV32IA-ZABHA: # %bb.0:
+; RV32IA-ZABHA-NEXT: amoadd.h.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_add_i16:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: lui a2, 16
+; RV32IA-NEXT: andi a3, a0, -4
+; RV32IA-NEXT: andi a0, a0, 3
+; RV32IA-NEXT: addi a2, a2, -1
+; RV32IA-NEXT: slli a0, a0, 3
+; RV32IA-NEXT: sll a4, a2, a0
+; RV32IA-NEXT: and a1, a1, a2
+; RV32IA-NEXT: sll a1, a1, a0
+; RV32IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT: lr.w.aqrl a2, (a3)
+; RV32IA-NEXT: add a5, a2, a1
+; RV32IA-NEXT: xor a5, a2, a5
+; RV32IA-NEXT: and a5, a5, a4
+; RV32IA-NEXT: xor a5, a2, a5
+; RV32IA-NEXT: sc.w.rl a5, a5, (a3)
+; RV32IA-NEXT: bnez a5, .LBB1_1
+; RV32IA-NEXT: # %bb.2:
+; RV32IA-NEXT: srl a0, a2, a0
+; RV32IA-NEXT: ret
+;
+; RV32-LABEL: atomicrmw_add_i16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: li a2, 5
+; RV32-NEXT: call __atomic_fetch_add_2
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i16:
+; RV64IA-ZABHA: # %bb.0:
+; RV64IA-ZABHA-NEXT: amoadd.h.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i16:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: andi a3, a0, -4
+; RV64IA-NEXT: andi a0, a0, 3
+; RV64IA-NEXT: addi a2, a2, -1
+; RV64IA-NEXT: slli a0, a0, 3
+; RV64IA-NEXT: sllw a4, a2, a0
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: sllw a1, a1, a0
+; RV64IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a2, (a3)
+; RV64IA-NEXT: add a5, a2, a1
+; RV64IA-NEXT: xor a5, a2, a5
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a2, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a3)
+; RV64IA-NEXT: bnez a5, .LBB1_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a2, a0
+; RV64IA-NEXT: ret
+;
+; RV64-LABEL: atomicrmw_add_i16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: li a2, 5
+; RV64-NEXT: call __atomic_fetch_add_2
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %res = atomicrmw add ptr %ptr, i16 %rhs seq_cst
+ ret i16 %res
+}
+
+define i32 @atomicrmw_add_i32(ptr %ptr, i32 %rhs) {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i32:
+; RV32IA-ZABHA: # %bb.0:
+; RV32IA-ZABHA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_add_i32:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-NEXT: ret
+;
+; RV32-LABEL: atomicrmw_add_i32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: li a2, 5
+; RV32-NEXT: call __atomic_fetch_add_4
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i32:
+; RV64IA-ZABHA: # %bb.0:
+; RV64IA-ZABHA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i32:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
+;
+; RV64-LABEL: atomicrmw_add_i32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: li a2, 5
+; RV64-NEXT: call __atomic_fetch_add_4
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %res = atomicrmw add ptr %ptr, i32 %rhs seq_cst
+ ret i32 %res
+}
+
+define i64 @atomicrmw_add_i64(ptr %ptr, i64 %rhs) {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i64:
+; RV32IA-ZABHA: # %bb.0:
+; RV32IA-ZABHA-NEXT: addi sp, sp, -16
+; RV32IA-ZABHA-NEXT: .cfi_def_cfa_offset 16
+; RV32IA-ZABHA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-ZABHA-NEXT: .cfi_offset ra, -4
+; RV32IA-ZABHA-NEXT: li a3, 5
+; RV32IA-ZABHA-NEXT: call __atomic_fetch_add_8
+; RV32IA-ZABHA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-ZABHA-NEXT: .cfi_restore ra
+; RV32IA-ZABHA-NEXT: addi sp, sp, 16
+; RV32IA-ZABHA-NEXT: .cfi_def_cfa_offset 0
+; RV32IA-ZABHA-NEXT: ret
+;
+; RV32IA-LABEL: atomicrmw_add_i64:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -16
+; RV32IA-NEXT: .cfi_def_cfa_offset 16
+; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: .cfi_offset ra, -4
+; RV32IA-NEXT: li a3, 5
+; RV32IA-NEXT: call __atomic_fetch_add_8
+; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: .cfi_restore ra
+; RV32IA-NEXT: addi sp, sp, 16
+; RV32IA-NEXT: .cfi_def_cfa_offset 0
+; RV32IA-NEXT: ret
+;
+; RV32-LABEL: atomicrmw_add_i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: li a3, 5
+; RV32-NEXT: call __atomic_fetch_add_8
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i64:
+; RV64IA-ZABHA: # %bb.0:
+; RV64IA-ZABHA-NEXT: amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i64:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
+;
+; RV64-LABEL: atomicrmw_add_i64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: li a2, 5
+; RV64-NEXT: call __atomic_fetch_add_8
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %res = atomicrmw add ptr %ptr, i64 %rhs seq_cst
+ ret i64 %res
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir
new file mode 100644
index 0000000000000..07478686011d0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir
@@ -0,0 +1,73 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -run-pass=instruction-select -verify-machineinstrs %s -o - \
+# RUN: | FileCheck %s
+
+---
+name: atomicrmw_add_i8_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[COPY1]] :: (load store monotonic (s8))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s32) = COPY $x11
+ %6:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s8))
+ $x10 = COPY %6(s32)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i16_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[COPY1]] :: (load store monotonic (s16))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s32) = COPY $x11
+ %6:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s16))
+ $x10 = COPY %6(s32)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i32_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; CHECK-NEXT: [[AMOADD_W:%[0-9]+]]:gpr = AMOADD_W [[COPY]], [[COPY1]] :: (load store monotonic (s32))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_W]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s32) = COPY $x11
+ %2:gprb(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s32))
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir
new file mode 100644
index 0000000000000..2e54a42748aaa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir
@@ -0,0 +1,96 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -run-pass=instruction-select -verify-machineinstrs %s -o - \
+# RUN: | FileCheck %s
+
+---
+name: atomicrmw_add_i8_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[COPY1]] :: (load store monotonic (s8))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s64) = COPY $x11
+ %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s8))
+ $x10 = COPY %6(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i16_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[COPY1]] :: (load store monotonic (s16))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s64) = COPY $x11
+ %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s16))
+ $x10 = COPY %6(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i32_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; CHECK-NEXT: [[AMOADD_W:%[0-9]+]]:gpr = AMOADD_W [[COPY]], [[COPY1]] :: (load store monotonic (s32))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_W]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s64) = COPY $x11
+ %6:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s32))
+ $x10 = COPY %6(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i64_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i64_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; CHECK-NEXT: [[AMOADD_D:%[0-9]+]]:gpr = AMOADD_D [[COPY]], [[COPY1]] :: (load store monotonic (s64))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_D]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s64) = COPY $x11
+ %2:gprb(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s64))
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
index 82cc6829838a0..c47ce4d5f1153 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
@@ -222,8 +222,8 @@
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_ATOMICRMW_ADD (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. the first uncovered type index: 2, OK
+# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_ATOMICRMW_SUB (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir
new file mode 100644
index 0000000000000..034d4181e3e39
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir
@@ -0,0 +1,121 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=RV32IA-ZABHA
+# RUN: llc -mtriple=riscv32 -mattr=+a -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=RV32IA
+
+---
+name: atomicrmw_add_i8_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+ ; CHECK-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ ; RV32IA-ZABHA-LABEL: name: atomicrmw_add_i8_monotonic
+ ; RV32IA-ZABHA: liveins: $x10, $x11
+ ; RV32IA-ZABHA-NEXT: {{ $}}
+ ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+ ; RV32IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV32IA-LABEL: name: atomicrmw_add_i8_monotonic
+ ; RV32IA: liveins: $x10, $x11
+ ; RV32IA-NEXT: {{ $}}
+ ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+ ; RV32IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; RV32IA-NEXT: PseudoRET implicit $x10
+ %0:_(p0) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s8) = G_TRUNC %1(s32)
+ %3:_(s8) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s8))
+ %4:_(s32) = G_ANYEXT %3(s8)
+ $x10 = COPY %4(s32)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i16_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+ ; CHECK-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ ; RV32IA-ZABHA-LABEL: name: atomicrmw_add_i16_monotonic
+ ; RV32IA-ZABHA: liveins: $x10, $x11
+ ; RV32IA-ZABHA-NEXT: {{ $}}
+ ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+ ; RV32IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV32IA-LABEL: name: atomicrmw_add_i16_monotonic
+ ; RV32IA: liveins: $x10, $x11
+ ; RV32IA-NEXT: {{ $}}
+ ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+ ; RV32IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; RV32IA-NEXT: PseudoRET implicit $x10
+ %0:_(p0) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s16) = G_TRUNC %1(s32)
+ %3:_(s16) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s16))
+ %4:_(s32) = G_ANYEXT %3(s16)
+ $x10 = COPY %4(s32)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i32_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+ ; CHECK-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ ; RV32IA-ZABHA-LABEL: name: atomicrmw_add_i32_monotonic
+ ; RV32IA-ZABHA: liveins: $x10, $x11
+ ; RV32IA-ZABHA-NEXT: {{ $}}
+ ; RV32IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+ ; RV32IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; RV32IA-ZABHA-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV32IA-LABEL: name: atomicrmw_add_i32_monotonic
+ ; RV32IA: liveins: $x10, $x11
+ ; RV32IA-NEXT: {{ $}}
+ ; RV32IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32IA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+ ; RV32IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; RV32IA-NEXT: PseudoRET implicit $x10
+ %0:_(p0) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s32) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s32))
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir
new file mode 100644
index 0000000000000..c3c3afcccda06
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir
@@ -0,0 +1,160 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=RV64IA-ZABHA
+# RUN: llc -mtriple=riscv64 -mattr=+a -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=RV64IA
+
+---
+name: atomicrmw_add_i8_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+ ; CHECK-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i8_monotonic
+ ; RV64IA-ZABHA: liveins: $x10, $x11
+ ; RV64IA-ZABHA-NEXT: {{ $}}
+ ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+ ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64IA-LABEL: name: atomicrmw_add_i8_monotonic
+ ; RV64IA: liveins: $x10, $x11
+ ; RV64IA-NEXT: {{ $}}
+ ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s8))
+ ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; RV64IA-NEXT: PseudoRET implicit $x10
+ %0:_(p0) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s8) = G_TRUNC %1(s64)
+ %3:_(s8) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s8))
+ %4:_(s64) = G_ANYEXT %3(s8)
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i16_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+ ; CHECK-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i16_monotonic
+ ; RV64IA-ZABHA: liveins: $x10, $x11
+ ; RV64IA-ZABHA-NEXT: {{ $}}
+ ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+ ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64IA-LABEL: name: atomicrmw_add_i16_monotonic
+ ; RV64IA: liveins: $x10, $x11
+ ; RV64IA-NEXT: {{ $}}
+ ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s16))
+ ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; RV64IA-NEXT: PseudoRET implicit $x10
+ %0:_(p0) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s16) = G_TRUNC %1(s64)
+ %3:_(s16) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s16))
+ %4:_(s64) = G_ANYEXT %3(s16)
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i32_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+ ; CHECK-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i32_monotonic
+ ; RV64IA-ZABHA: liveins: $x10, $x11
+ ; RV64IA-ZABHA-NEXT: {{ $}}
+ ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+ ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64IA-LABEL: name: atomicrmw_add_i32_monotonic
+ ; RV64IA: liveins: $x10, $x11
+ ; RV64IA-NEXT: {{ $}}
+ ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s32))
+ ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; RV64IA-NEXT: PseudoRET implicit $x10
+ %0:_(p0) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s32) = G_TRUNC %1(s64)
+ %3:_(s32) = G_ATOMICRMW_ADD %0(p0), %2 :: (load store monotonic (s32))
+ %4:_(s64) = G_ANYEXT %3(s32)
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i64_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: atomicrmw_add_i64_monotonic
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s64))
+ ; CHECK-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ ; RV64IA-ZABHA-LABEL: name: atomicrmw_add_i64_monotonic
+ ; RV64IA-ZABHA: liveins: $x10, $x11
+ ; RV64IA-ZABHA-NEXT: {{ $}}
+ ; RV64IA-ZABHA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64IA-ZABHA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IA-ZABHA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s64))
+ ; RV64IA-ZABHA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; RV64IA-ZABHA-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64IA-LABEL: name: atomicrmw_add_i64_monotonic
+ ; RV64IA: liveins: $x10, $x11
+ ; RV64IA-NEXT: {{ $}}
+ ; RV64IA-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64IA-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IA-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[COPY1]] :: (load store monotonic (s64))
+ ; RV64IA-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; RV64IA-NEXT: PseudoRET implicit $x10
+ %0:_(p0) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s64) = G_ATOMICRMW_ADD %0(p0), %1 :: (load store monotonic (s64))
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+...
More information about the llvm-commits
mailing list