[llvm] 3521677 - [RISCV] Add pre-commit tests for D158759
Yingwei Zheng via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 30 00:37:22 PDT 2023
Author: Yingwei Zheng
Date: 2023-08-30T15:36:47+08:00
New Revision: 3521677994aba7239dc8c12aa854fe9a26081a23
URL: https://github.com/llvm/llvm-project/commit/3521677994aba7239dc8c12aa854fe9a26081a23
DIFF: https://github.com/llvm/llvm-project/commit/3521677994aba7239dc8c12aa854fe9a26081a23.diff
LOG: [RISCV] Add pre-commit tests for D158759
Added:
llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll
new file mode 100644
index 00000000000000..e952e71a5d736d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll
@@ -0,0 +1,474 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O3 -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -O3 -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64 %s
+
+define void @amoswap_w_discard(ptr %a, i32 %b) nounwind {
+; RV32-LABEL: amoswap_w_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: amoswap.w.aqrl a0, a1, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoswap_w_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoswap.w.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i32 %b seq_cst
+ ret void
+}
+
+define void @amoswap_d_discard(ptr %a, i64 %b) nounwind {
+; RV32-LABEL: amoswap_d_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: li a3, 5
+; RV32-NEXT: call __atomic_exchange_8 at plt
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoswap_d_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoswap.d.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i64 %b seq_cst
+ ret void
+}
+
+define void @amoadd_w_discard(ptr %a, i32 %b) nounwind {
+; RV32-LABEL: amoadd_w_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoadd_w_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw add ptr %a, i32 %b seq_cst
+ ret void
+}
+
+define void @amoadd_d_discard(ptr %a, i64 %b) nounwind {
+; RV32-LABEL: amoadd_d_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: li a3, 5
+; RV32-NEXT: call __atomic_fetch_add_8 at plt
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoadd_d_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoadd.d.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw add ptr %a, i64 %b seq_cst
+ ret void
+}
+
+define void @amoand_w_discard(ptr %a, i32 %b) nounwind {
+; RV32-LABEL: amoand_w_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: amoand.w.aqrl a0, a1, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoand_w_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoand.w.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw and ptr %a, i32 %b seq_cst
+ ret void
+}
+
+define void @amoand_d_discard(ptr %a, i64 %b) nounwind {
+; RV32-LABEL: amoand_d_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: li a3, 5
+; RV32-NEXT: call __atomic_fetch_and_8 at plt
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoand_d_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoand.d.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw and ptr %a, i64 %b seq_cst
+ ret void
+}
+
+define void @amoor_w_discard(ptr %a, i32 %b) nounwind {
+; RV32-LABEL: amoor_w_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoor_w_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw or ptr %a, i32 %b seq_cst
+ ret void
+}
+
+define void @amoor_d_discard(ptr %a, i64 %b) nounwind {
+; RV32-LABEL: amoor_d_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: li a3, 5
+; RV32-NEXT: call __atomic_fetch_or_8 at plt
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoor_d_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoor.d.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw or ptr %a, i64 %b seq_cst
+ ret void
+}
+
+define void @amoxor_w_discard(ptr %a, i32 %b) nounwind {
+; RV32-LABEL: amoxor_w_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoxor_w_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw or ptr %a, i32 %b seq_cst
+ ret void
+}
+
+define void @amoxor_d_discard(ptr %a, i64 %b) nounwind {
+; RV32-LABEL: amoxor_d_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: li a3, 5
+; RV32-NEXT: call __atomic_fetch_or_8 at plt
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amoxor_d_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amoor.d.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw or ptr %a, i64 %b seq_cst
+ ret void
+}
+
+define void @amomax_w_discard(ptr %a, i32 %b) nounwind {
+; RV32-LABEL: amomax_w_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: amomax.w.aqrl a0, a1, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amomax_w_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amomax.w.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw max ptr %a, i32 %b seq_cst
+ ret void
+}
+
+define void @amomax_d_discard(ptr %a, i64 %b) nounwind {
+; RV32-LABEL: amomax_d_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: lw a4, 4(a0)
+; RV32-NEXT: lw a5, 0(a0)
+; RV32-NEXT: mv s1, a2
+; RV32-NEXT: mv s2, a1
+; RV32-NEXT: j .LBB11_2
+; RV32-NEXT: .LBB11_1: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB11_2 Depth=1
+; RV32-NEXT: sw a5, 8(sp)
+; RV32-NEXT: sw a4, 12(sp)
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: li a4, 5
+; RV32-NEXT: li a5, 5
+; RV32-NEXT: mv a0, s0
+; RV32-NEXT: call __atomic_compare_exchange_8 at plt
+; RV32-NEXT: lw a4, 12(sp)
+; RV32-NEXT: lw a5, 8(sp)
+; RV32-NEXT: bnez a0, .LBB11_6
+; RV32-NEXT: .LBB11_2: # %atomicrmw.start
+; RV32-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32-NEXT: beq a4, s1, .LBB11_4
+; RV32-NEXT: # %bb.3: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB11_2 Depth=1
+; RV32-NEXT: slt a0, s1, a4
+; RV32-NEXT: mv a2, a5
+; RV32-NEXT: mv a3, a4
+; RV32-NEXT: bnez a0, .LBB11_1
+; RV32-NEXT: j .LBB11_5
+; RV32-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1
+; RV32-NEXT: sltu a0, s2, a5
+; RV32-NEXT: mv a2, a5
+; RV32-NEXT: mv a3, a4
+; RV32-NEXT: bnez a0, .LBB11_1
+; RV32-NEXT: .LBB11_5: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB11_2 Depth=1
+; RV32-NEXT: mv a2, s2
+; RV32-NEXT: mv a3, s1
+; RV32-NEXT: j .LBB11_1
+; RV32-NEXT: .LBB11_6: # %atomicrmw.end
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amomax_d_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amomax.d.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw max ptr %a, i64 %b seq_cst
+ ret void
+}
+
+define void @amomaxu_w_discard(ptr %a, i32 %b) nounwind {
+; RV32-LABEL: amomaxu_w_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: amomaxu.w.aqrl a0, a1, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amomaxu_w_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amomaxu.w.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i32 %b seq_cst
+ ret void
+}
+
+define void @amomaxu_d_discard(ptr %a, i64 %b) nounwind {
+; RV32-LABEL: amomaxu_d_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: lw a4, 4(a0)
+; RV32-NEXT: lw a5, 0(a0)
+; RV32-NEXT: mv s1, a2
+; RV32-NEXT: mv s2, a1
+; RV32-NEXT: j .LBB13_2
+; RV32-NEXT: .LBB13_1: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB13_2 Depth=1
+; RV32-NEXT: sw a5, 8(sp)
+; RV32-NEXT: sw a4, 12(sp)
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: li a4, 5
+; RV32-NEXT: li a5, 5
+; RV32-NEXT: mv a0, s0
+; RV32-NEXT: call __atomic_compare_exchange_8 at plt
+; RV32-NEXT: lw a4, 12(sp)
+; RV32-NEXT: lw a5, 8(sp)
+; RV32-NEXT: bnez a0, .LBB13_6
+; RV32-NEXT: .LBB13_2: # %atomicrmw.start
+; RV32-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32-NEXT: beq a4, s1, .LBB13_4
+; RV32-NEXT: # %bb.3: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB13_2 Depth=1
+; RV32-NEXT: sltu a0, s1, a4
+; RV32-NEXT: mv a2, a5
+; RV32-NEXT: mv a3, a4
+; RV32-NEXT: bnez a0, .LBB13_1
+; RV32-NEXT: j .LBB13_5
+; RV32-NEXT: .LBB13_4: # in Loop: Header=BB13_2 Depth=1
+; RV32-NEXT: sltu a0, s2, a5
+; RV32-NEXT: mv a2, a5
+; RV32-NEXT: mv a3, a4
+; RV32-NEXT: bnez a0, .LBB13_1
+; RV32-NEXT: .LBB13_5: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB13_2 Depth=1
+; RV32-NEXT: mv a2, s2
+; RV32-NEXT: mv a3, s1
+; RV32-NEXT: j .LBB13_1
+; RV32-NEXT: .LBB13_6: # %atomicrmw.end
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amomaxu_d_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amomaxu.d.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i64 %b seq_cst
+ ret void
+}
+
+define void @amomin_w_discard(ptr %a, i32 %b) nounwind {
+; RV32-LABEL: amomin_w_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: amomin.w.aqrl a0, a1, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amomin_w_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amomin.w.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw min ptr %a, i32 %b seq_cst
+ ret void
+}
+
+define void @amomin_d_discard(ptr %a, i64 %b) nounwind {
+; RV32-LABEL: amomin_d_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: lw a4, 4(a0)
+; RV32-NEXT: lw a5, 0(a0)
+; RV32-NEXT: mv s1, a2
+; RV32-NEXT: mv s2, a1
+; RV32-NEXT: j .LBB15_2
+; RV32-NEXT: .LBB15_1: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB15_2 Depth=1
+; RV32-NEXT: sw a5, 8(sp)
+; RV32-NEXT: sw a4, 12(sp)
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: li a4, 5
+; RV32-NEXT: li a5, 5
+; RV32-NEXT: mv a0, s0
+; RV32-NEXT: call __atomic_compare_exchange_8 at plt
+; RV32-NEXT: lw a4, 12(sp)
+; RV32-NEXT: lw a5, 8(sp)
+; RV32-NEXT: bnez a0, .LBB15_6
+; RV32-NEXT: .LBB15_2: # %atomicrmw.start
+; RV32-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32-NEXT: beq a4, s1, .LBB15_4
+; RV32-NEXT: # %bb.3: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB15_2 Depth=1
+; RV32-NEXT: slt a0, s1, a4
+; RV32-NEXT: mv a2, a5
+; RV32-NEXT: mv a3, a4
+; RV32-NEXT: beqz a0, .LBB15_1
+; RV32-NEXT: j .LBB15_5
+; RV32-NEXT: .LBB15_4: # in Loop: Header=BB15_2 Depth=1
+; RV32-NEXT: sltu a0, s2, a5
+; RV32-NEXT: mv a2, a5
+; RV32-NEXT: mv a3, a4
+; RV32-NEXT: beqz a0, .LBB15_1
+; RV32-NEXT: .LBB15_5: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB15_2 Depth=1
+; RV32-NEXT: mv a2, s2
+; RV32-NEXT: mv a3, s1
+; RV32-NEXT: j .LBB15_1
+; RV32-NEXT: .LBB15_6: # %atomicrmw.end
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amomin_d_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amomin.d.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw min ptr %a, i64 %b seq_cst
+ ret void
+}
+
+define void @amominu_w_discard(ptr %a, i32 %b) nounwind {
+; RV32-LABEL: amominu_w_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: amominu.w.aqrl a0, a1, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amominu_w_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amominu.w.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i32 %b seq_cst
+ ret void
+}
+
+define void @amominu_d_discard(ptr %a, i64 %b) nounwind {
+; RV32-LABEL: amominu_d_discard:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: lw a4, 4(a0)
+; RV32-NEXT: lw a5, 0(a0)
+; RV32-NEXT: mv s1, a2
+; RV32-NEXT: mv s2, a1
+; RV32-NEXT: j .LBB17_2
+; RV32-NEXT: .LBB17_1: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB17_2 Depth=1
+; RV32-NEXT: sw a5, 8(sp)
+; RV32-NEXT: sw a4, 12(sp)
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: li a4, 5
+; RV32-NEXT: li a5, 5
+; RV32-NEXT: mv a0, s0
+; RV32-NEXT: call __atomic_compare_exchange_8 at plt
+; RV32-NEXT: lw a4, 12(sp)
+; RV32-NEXT: lw a5, 8(sp)
+; RV32-NEXT: bnez a0, .LBB17_6
+; RV32-NEXT: .LBB17_2: # %atomicrmw.start
+; RV32-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32-NEXT: beq a4, s1, .LBB17_4
+; RV32-NEXT: # %bb.3: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB17_2 Depth=1
+; RV32-NEXT: sltu a0, s1, a4
+; RV32-NEXT: mv a2, a5
+; RV32-NEXT: mv a3, a4
+; RV32-NEXT: beqz a0, .LBB17_1
+; RV32-NEXT: j .LBB17_5
+; RV32-NEXT: .LBB17_4: # in Loop: Header=BB17_2 Depth=1
+; RV32-NEXT: sltu a0, s2, a5
+; RV32-NEXT: mv a2, a5
+; RV32-NEXT: mv a3, a4
+; RV32-NEXT: beqz a0, .LBB17_1
+; RV32-NEXT: .LBB17_5: # %atomicrmw.start
+; RV32-NEXT: # in Loop: Header=BB17_2 Depth=1
+; RV32-NEXT: mv a2, s2
+; RV32-NEXT: mv a3, s1
+; RV32-NEXT: j .LBB17_1
+; RV32-NEXT: .LBB17_6: # %atomicrmw.end
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: amominu_d_discard:
+; RV64: # %bb.0:
+; RV64-NEXT: amominu.d.aqrl a0, a1, (a0)
+; RV64-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i64 %b seq_cst
+ ret void
+}
More information about the llvm-commits
mailing list