[llvm] 8e3cde0 - [LoongArch][test] Add float-point atomic load/store tests. NFC
WANG Rui via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 25 00:39:00 PDT 2024
Author: WANG Rui
Date: 2024-09-25T15:39:22+08:00
New Revision: 8e3cde04cbf13d2aa2251acdd8adbae8d3edd43d
URL: https://github.com/llvm/llvm-project/commit/8e3cde04cbf13d2aa2251acdd8adbae8d3edd43d
DIFF: https://github.com/llvm/llvm-project/commit/8e3cde04cbf13d2aa2251acdd8adbae8d3edd43d.diff
LOG: [LoongArch][test] Add float-point atomic load/store tests. NFC
Added:
Modified:
llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
index 1af2b38d799436..9ef74e4960ce7c 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
@@ -88,6 +88,50 @@ define ptr @load_acquire_ptr(ptr %ptr) {
ret ptr %val
}
+define float @load_acquire_float(ptr %ptr) {
+; LA32-LABEL: load_acquire_float:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a0, $a0, 0
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: dbar 20
+; LA32-NEXT: ret
+;
+; LA64-LABEL: load_acquire_float:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a0, $a0, 0
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: dbar 20
+; LA64-NEXT: ret
+ %val = load atomic float, ptr %ptr acquire, align 8
+ ret float %val
+}
+
+define double @load_acquire_double(ptr %ptr) {
+; LA32-LABEL: load_acquire_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: ori $a1, $zero, 2
+; LA32-NEXT: bl %plt(__atomic_load_8)
+; LA32-NEXT: st.w $a1, $sp, 4
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: fld.d $fa0, $sp, 0
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: load_acquire_double:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.d $a0, $a0, 0
+; LA64-NEXT: movgr2fr.d $fa0, $a0
+; LA64-NEXT: dbar 20
+; LA64-NEXT: ret
+ %val = load atomic double, ptr %ptr acquire, align 8
+ ret double %val
+}
+
define i8 @load_unordered_i8(ptr %ptr) {
; LA32-LABEL: load_unordered_i8:
; LA32: # %bb.0:
@@ -165,6 +209,47 @@ define ptr @load_unordered_ptr(ptr %ptr) {
ret ptr %val
}
+define float @load_unordered_float(ptr %ptr) {
+; LA32-LABEL: load_unordered_float:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a0, $a0, 0
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: load_unordered_float:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a0, $a0, 0
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ret
+ %val = load atomic float, ptr %ptr unordered, align 8
+ ret float %val
+}
+
+define double @load_unordered_double(ptr %ptr) {
+; LA32-LABEL: load_unordered_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: bl %plt(__atomic_load_8)
+; LA32-NEXT: st.w $a1, $sp, 4
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: fld.d $fa0, $sp, 0
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: load_unordered_double:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.d $a0, $a0, 0
+; LA64-NEXT: movgr2fr.d $fa0, $a0
+; LA64-NEXT: ret
+ %val = load atomic double, ptr %ptr unordered, align 8
+ ret double %val
+}
+
define i8 @load_monotonic_i8(ptr %ptr) {
; LA32-LABEL: load_monotonic_i8:
; LA32: # %bb.0:
@@ -242,6 +327,47 @@ define ptr @load_monotonic_ptr(ptr %ptr) {
ret ptr %val
}
+define float @load_monotonic_float(ptr %ptr) {
+; LA32-LABEL: load_monotonic_float:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a0, $a0, 0
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: load_monotonic_float:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a0, $a0, 0
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ret
+ %val = load atomic float, ptr %ptr monotonic, align 8
+ ret float %val
+}
+
+define double @load_monotonic_double(ptr %ptr) {
+; LA32-LABEL: load_monotonic_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: bl %plt(__atomic_load_8)
+; LA32-NEXT: st.w $a1, $sp, 4
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: fld.d $fa0, $sp, 0
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: load_monotonic_double:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.d $a0, $a0, 0
+; LA64-NEXT: movgr2fr.d $fa0, $a0
+; LA64-NEXT: ret
+ %val = load atomic double, ptr %ptr monotonic, align 8
+ ret double %val
+}
+
define i8 @load_seq_cst_i8(ptr %ptr) {
; LA32-LABEL: load_seq_cst_i8:
; LA32: # %bb.0:
@@ -328,6 +454,50 @@ define ptr @load_seq_cst_ptr(ptr %ptr) {
ret ptr %val
}
+define float @load_seq_cst_float(ptr %ptr) {
+; LA32-LABEL: load_seq_cst_float:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a0, $a0, 0
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: dbar 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: load_seq_cst_float:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a0, $a0, 0
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: dbar 16
+; LA64-NEXT: ret
+ %val = load atomic float, ptr %ptr seq_cst, align 8
+ ret float %val
+}
+
+define double @load_seq_cst_double(ptr %ptr) {
+; LA32-LABEL: load_seq_cst_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: ori $a1, $zero, 5
+; LA32-NEXT: bl %plt(__atomic_load_8)
+; LA32-NEXT: st.w $a1, $sp, 4
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: fld.d $fa0, $sp, 0
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: load_seq_cst_double:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.d $a0, $a0, 0
+; LA64-NEXT: movgr2fr.d $fa0, $a0
+; LA64-NEXT: dbar 16
+; LA64-NEXT: ret
+ %val = load atomic double, ptr %ptr seq_cst, align 8
+ ret double %val
+}
+
define void @store_release_i8(ptr %ptr, i8 signext %v) {
; LA32-LABEL: store_release_i8:
; LA32: # %bb.0:
@@ -411,6 +581,48 @@ define void @store_release_ptr(ptr %ptr, ptr %v) {
ret void
}
+define void @store_release_float(ptr %ptr, float %v) {
+; LA32-LABEL: store_release_float:
+; LA32: # %bb.0:
+; LA32-NEXT: movfr2gr.s $a1, $fa0
+; LA32-NEXT: dbar 18
+; LA32-NEXT: st.w $a1, $a0, 0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: store_release_float:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.s $a1, $fa0
+; LA64-NEXT: amswap_db.w $zero, $a1, $a0
+; LA64-NEXT: ret
+ store atomic float %v, ptr %ptr release, align 8
+ ret void
+}
+
+define void @store_release_double(ptr %ptr, double %v) {
+; LA32-LABEL: store_release_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: fst.d $fa0, $sp, 0
+; LA32-NEXT: ld.w $a1, $sp, 0
+; LA32-NEXT: ld.w $a2, $sp, 4
+; LA32-NEXT: ori $a3, $zero, 3
+; LA32-NEXT: bl %plt(__atomic_store_8)
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: store_release_double:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.d $a1, $fa0
+; LA64-NEXT: amswap_db.d $zero, $a1, $a0
+; LA64-NEXT: ret
+ store atomic double %v, ptr %ptr release, align 8
+ ret void
+}
+
define void @store_unordered_i8(ptr %ptr, i8 signext %v) {
; LA32-LABEL: store_unordered_i8:
; LA32: # %bb.0:
@@ -488,6 +700,47 @@ define void @store_unordered_ptr(ptr %ptr, ptr %v) {
ret void
}
+define void @store_unordered_float(ptr %ptr, float %v) {
+; LA32-LABEL: store_unordered_float:
+; LA32: # %bb.0:
+; LA32-NEXT: movfr2gr.s $a1, $fa0
+; LA32-NEXT: st.w $a1, $a0, 0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: store_unordered_float:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.s $a1, $fa0
+; LA64-NEXT: st.w $a1, $a0, 0
+; LA64-NEXT: ret
+ store atomic float %v, ptr %ptr unordered, align 8
+ ret void
+}
+
+define void @store_unordered_double(ptr %ptr, double %v) {
+; LA32-LABEL: store_unordered_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: fst.d $fa0, $sp, 0
+; LA32-NEXT: ld.w $a1, $sp, 0
+; LA32-NEXT: ld.w $a2, $sp, 4
+; LA32-NEXT: move $a3, $zero
+; LA32-NEXT: bl %plt(__atomic_store_8)
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: store_unordered_double:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.d $a1, $fa0
+; LA64-NEXT: st.d $a1, $a0, 0
+; LA64-NEXT: ret
+ store atomic double %v, ptr %ptr unordered, align 8
+ ret void
+}
+
define void @store_monotonic_i8(ptr %ptr, i8 signext %v) {
; LA32-LABEL: store_monotonic_i8:
; LA32: # %bb.0:
@@ -565,6 +818,47 @@ define void @store_monotonic_ptr(ptr %ptr, ptr %v) {
ret void
}
+define void @store_monotonic_float(ptr %ptr, float %v) {
+; LA32-LABEL: store_monotonic_float:
+; LA32: # %bb.0:
+; LA32-NEXT: movfr2gr.s $a1, $fa0
+; LA32-NEXT: st.w $a1, $a0, 0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: store_monotonic_float:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.s $a1, $fa0
+; LA64-NEXT: st.w $a1, $a0, 0
+; LA64-NEXT: ret
+ store atomic float %v, ptr %ptr monotonic, align 8
+ ret void
+}
+
+define void @store_monotonic_double(ptr %ptr, double %v) {
+; LA32-LABEL: store_monotonic_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: fst.d $fa0, $sp, 0
+; LA32-NEXT: ld.w $a1, $sp, 0
+; LA32-NEXT: ld.w $a2, $sp, 4
+; LA32-NEXT: move $a3, $zero
+; LA32-NEXT: bl %plt(__atomic_store_8)
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: store_monotonic_double:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.d $a1, $fa0
+; LA64-NEXT: st.d $a1, $a0, 0
+; LA64-NEXT: ret
+ store atomic double %v, ptr %ptr monotonic, align 8
+ ret void
+}
+
define void @store_seq_cst_i8(ptr %ptr, i8 signext %v) {
; LA32-LABEL: store_seq_cst_i8:
; LA32: # %bb.0:
@@ -653,3 +947,46 @@ define void @store_seq_cst_ptr(ptr %ptr, ptr %v) {
store atomic ptr %v, ptr %ptr seq_cst, align 8
ret void
}
+
+define void @store_seq_cst_float(ptr %ptr, float %v) {
+; LA32-LABEL: store_seq_cst_float:
+; LA32: # %bb.0:
+; LA32-NEXT: movfr2gr.s $a1, $fa0
+; LA32-NEXT: dbar 16
+; LA32-NEXT: st.w $a1, $a0, 0
+; LA32-NEXT: dbar 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: store_seq_cst_float:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.s $a1, $fa0
+; LA64-NEXT: amswap_db.w $zero, $a1, $a0
+; LA64-NEXT: ret
+ store atomic float %v, ptr %ptr seq_cst, align 8
+ ret void
+}
+
+define void @store_seq_cst_double(ptr %ptr, double %v) {
+; LA32-LABEL: store_seq_cst_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: fst.d $fa0, $sp, 0
+; LA32-NEXT: ld.w $a1, $sp, 0
+; LA32-NEXT: ld.w $a2, $sp, 4
+; LA32-NEXT: ori $a3, $zero, 5
+; LA32-NEXT: bl %plt(__atomic_store_8)
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: store_seq_cst_double:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.d $a1, $fa0
+; LA64-NEXT: amswap_db.d $zero, $a1, $a0
+; LA64-NEXT: ret
+ store atomic double %v, ptr %ptr seq_cst, align 8
+ ret void
+}
More information about the llvm-commits
mailing list