[llvm] [RISCV][GlobalISel] Legalize and select G_ATOMICRMW_ADD instruction (PR #153791)
Kane Wang via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 20 20:25:23 PDT 2025
https://github.com/ReVe1uv updated https://github.com/llvm/llvm-project/pull/153791
>From dc2684d7c88a931cd10ad03de99e7e5fd645708e Mon Sep 17 00:00:00 2001
From: Kane Wang <kanewang95 at foxmail.com>
Date: Fri, 15 Aug 2025 19:25:32 +0800
Subject: [PATCH] [RISCV][GlobalISel] Legalize and select G_ATOMICRMW_ADD
instruction
This patch adds legalization and instruction selection support for the G_ATOMICRMW_ADD opcode in the RISCV GlobalISel backend.
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 6 +
.../CodeGen/RISCV/GlobalISel/atomicrmw-add.ll | 182 ++++++++++++++++++
.../instruction-select/atomicrmw-add-rv32.mir | 72 +++++++
.../instruction-select/atomicrmw-add-rv64.mir | 95 +++++++++
.../GlobalISel/legalizer-info-validation.mir | 4 +-
.../legalizer/legalize-atomicrmw-add-rv32.mir | 63 ++++++
.../legalizer/legalize-atomicrmw-add-rv64.mir | 86 +++++++++
7 files changed, 506 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index e88f33d6859ec..488bec53be1c8 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -692,6 +692,12 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.customIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)));
+ // FIXME: Support s8/s16 atomics under ExtA without requiring ExtZabha.
+ getActionDefinitionsBuilder(G_ATOMICRMW_ADD)
+ .legalFor(ST.hasStdExtA(), {{sXLen, p0}})
+ .libcallFor(!ST.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
+ .clampScalar(0, sXLen, sXLen);
+
getLegacyLegalizerInfo().computeTables();
verify(*ST.getInstrInfo());
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll
new file mode 100644
index 0000000000000..6bb9a3c014eb2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomicrmw-add.ll
@@ -0,0 +1,182 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32IA-ZABHA
+; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64IA-ZABHA
+; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+
+define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i8:
+; RV32IA-ZABHA: # %bb.0:
+; RV32IA-ZABHA-NEXT: amoadd.b.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT: ret
+;
+; RV32-LABEL: atomicrmw_add_i8:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: li a2, 5
+; RV32-NEXT: call __atomic_fetch_add_1
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i8:
+; RV64IA-ZABHA: # %bb.0:
+; RV64IA-ZABHA-NEXT: amoadd.b.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT: ret
+;
+; RV64-LABEL: atomicrmw_add_i8:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: li a2, 5
+; RV64-NEXT: call __atomic_fetch_add_1
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %res = atomicrmw add ptr %ptr, i8 %rhs seq_cst
+ ret i8 %res
+}
+
+define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i16:
+; RV32IA-ZABHA: # %bb.0:
+; RV32IA-ZABHA-NEXT: amoadd.h.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT: ret
+;
+; RV32-LABEL: atomicrmw_add_i16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: li a2, 5
+; RV32-NEXT: call __atomic_fetch_add_2
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i16:
+; RV64IA-ZABHA: # %bb.0:
+; RV64IA-ZABHA-NEXT: amoadd.h.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT: ret
+;
+; RV64-LABEL: atomicrmw_add_i16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: li a2, 5
+; RV64-NEXT: call __atomic_fetch_add_2
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %res = atomicrmw add ptr %ptr, i16 %rhs seq_cst
+ ret i16 %res
+}
+
+define i32 @atomicrmw_add_i32(ptr %ptr, i32 %rhs) {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i32:
+; RV32IA-ZABHA: # %bb.0:
+; RV32IA-ZABHA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV32IA-ZABHA-NEXT: ret
+;
+; RV32-LABEL: atomicrmw_add_i32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: li a2, 5
+; RV32-NEXT: call __atomic_fetch_add_4
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i32:
+; RV64IA-ZABHA: # %bb.0:
+; RV64IA-ZABHA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT: ret
+;
+; RV64-LABEL: atomicrmw_add_i32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: li a2, 5
+; RV64-NEXT: call __atomic_fetch_add_4
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %res = atomicrmw add ptr %ptr, i32 %rhs seq_cst
+ ret i32 %res
+}
+
+define i64 @atomicrmw_add_i64(ptr %ptr, i64 %rhs) {
+; RV32IA-ZABHA-LABEL: atomicrmw_add_i64:
+; RV32IA-ZABHA: # %bb.0:
+; RV32IA-ZABHA-NEXT: addi sp, sp, -16
+; RV32IA-ZABHA-NEXT: .cfi_def_cfa_offset 16
+; RV32IA-ZABHA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-ZABHA-NEXT: .cfi_offset ra, -4
+; RV32IA-ZABHA-NEXT: li a3, 5
+; RV32IA-ZABHA-NEXT: call __atomic_fetch_add_8
+; RV32IA-ZABHA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-ZABHA-NEXT: .cfi_restore ra
+; RV32IA-ZABHA-NEXT: addi sp, sp, 16
+; RV32IA-ZABHA-NEXT: .cfi_def_cfa_offset 0
+; RV32IA-ZABHA-NEXT: ret
+;
+; RV32-LABEL: atomicrmw_add_i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: li a3, 5
+; RV32-NEXT: call __atomic_fetch_add_8
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64IA-ZABHA-LABEL: atomicrmw_add_i64:
+; RV64IA-ZABHA: # %bb.0:
+; RV64IA-ZABHA-NEXT: amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-ZABHA-NEXT: ret
+;
+; RV64-LABEL: atomicrmw_add_i64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: li a2, 5
+; RV64-NEXT: call __atomic_fetch_add_8
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %res = atomicrmw add ptr %ptr, i64 %rhs seq_cst
+ ret i64 %res
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir
new file mode 100644
index 0000000000000..fb69d67a0381f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv32.mir
@@ -0,0 +1,72 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: atomicrmw_add_i8_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[ADDI]] :: (load store monotonic (s8))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s32) = G_CONSTANT i32 1
+ %2:gprb(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s8))
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i16_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[ADDI]] :: (load store monotonic (s16))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s32) = G_CONSTANT i32 1
+ %2:gprb(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s16))
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i32_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[AMOADD_W:%[0-9]+]]:gpr = AMOADD_W [[COPY]], [[ADDI]] :: (load store monotonic (s32))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_W]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s32) = G_CONSTANT i32 1
+ %2:gprb(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s32))
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir
new file mode 100644
index 0000000000000..e3b38de4e1b46
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/atomicrmw-add-rv64.mir
@@ -0,0 +1,95 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: atomicrmw_add_i8_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[AMOADD_B:%[0-9]+]]:gpr = AMOADD_B [[COPY]], [[ADDI]] :: (load store monotonic (s8))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_B]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s8))
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i16_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[AMOADD_H:%[0-9]+]]:gpr = AMOADD_H [[COPY]], [[ADDI]] :: (load store monotonic (s16))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_H]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s16))
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i32_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[AMOADD_W:%[0-9]+]]:gpr = AMOADD_W [[COPY]], [[ADDI]] :: (load store monotonic (s32))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_W]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s32))
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i64_monotonic
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: atomicrmw_add_i64_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[AMOADD_D:%[0-9]+]]:gpr = AMOADD_D [[COPY]], [[ADDI]] :: (load store monotonic (s64))
+ ; CHECK-NEXT: $x10 = COPY [[AMOADD_D]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s64))
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
index 82cc6829838a0..c47ce4d5f1153 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
@@ -222,8 +222,8 @@
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_ATOMICRMW_ADD (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. the first uncovered type index: 2, OK
+# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_ATOMICRMW_SUB (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir
new file mode 100644
index 0000000000000..7dec5f36222d4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv32.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+a,+zabha -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: atomicrmw_add_i8_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s8))
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY [[ATOMICRMW_ADD]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s8) = G_CONSTANT i8 1
+ %2:gprb(s8) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s8))
+ %3:gprb(s32) = G_ANYEXT %2
+ $x10 = COPY %3(s32)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i16_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s16))
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY [[ATOMICRMW_ADD]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s16) = G_CONSTANT i16 1
+ %2:gprb(s16) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s16))
+ %3:gprb(s32) = G_ANYEXT %2
+ $x10 = COPY %3(s32)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i32_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:gprb(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s32))
+ ; CHECK-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s32) = G_CONSTANT i32 1
+ %2:gprb(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s32))
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir
new file mode 100644
index 0000000000000..b9cc8c0ae2a0c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-atomicrmw-add-rv64.mir
@@ -0,0 +1,86 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+a,+zabha -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_add_i8_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: atomicrmw_add_i8_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s8))
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprb(s64) = COPY [[ATOMICRMW_ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s8) = G_CONSTANT i8 1
+ %2:gprb(s8) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s8))
+ %3:gprb(s64) = G_ANYEXT %2
+ $x10 = COPY %3(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i16_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: atomicrmw_add_i16_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s16))
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprb(s64) = COPY [[ATOMICRMW_ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s16) = G_CONSTANT i16 1
+ %2:gprb(s16) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s16))
+ %3:gprb(s64) = G_ANYEXT %2
+ $x10 = COPY %3(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i32_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s32))
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprb(s64) = COPY [[ATOMICRMW_ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s32) = G_CONSTANT i32 1
+ %2:gprb(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s32))
+ %3:gprb(s64) = G_ANYEXT %2(s32)
+ $x10 = COPY %3(s64)
+ PseudoRET implicit $x10
+...
+---
+name: atomicrmw_add_i64_monotonic
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: atomicrmw_add_i64_monotonic
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:gprb(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s64))
+ ; CHECK-NEXT: $x10 = COPY [[ATOMICRMW_ADD]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(p0) = COPY $x10
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s64))
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+...
More information about the llvm-commits
mailing list