[llvm] [NFC][AArch64] move AArch64 non auto-generated tests to static file (PR #126312)

Jonathan Thackray via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 7 13:32:20 PST 2025


https://github.com/jthackray created https://github.com/llvm/llvm-project/pull/126312

Move AArch64 non auto-generated test code into a static file, since the script ./llvm/test/CodeGen/AArch64/Atomics/generate-tests.py will overwrite these tests when re-run. (Test code was originally added in change 465bc5e729fd755880b9a288de42a37ad1206301)

>From e88a786b1a08769d22d0f696be163ae552f152da Mon Sep 17 00:00:00 2001
From: Jonathan Thackray <jonathan.thackray at arm.com>
Date: Fri, 7 Feb 2025 21:23:50 +0000
Subject: [PATCH] [NFC][AArch64] move AArch64 non auto-generated tests to
 static file

Move AArch64 non auto-generated test code into a static file, since the
script ./llvm/test/CodeGen/AArch64/Atomics/generate-tests.py will overwrite
these tests when re-run. (Test code was originally added in change
465bc5e729fd755880b9a288de42a37ad1206301)
---
 .../Atomics/aarch64-atomic-load-lse2.ll       | 112 -----------------
 llvm/test/CodeGen/AArch64/v8.4-atomic.ll      | 113 ++++++++++++++++++
 2 files changed, 113 insertions(+), 112 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/v8.4-atomic.ll

diff --git a/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-lse2.ll b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-lse2.ll
index 3732d4feb0c67bb..0e9c29758244a33 100644
--- a/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-lse2.ll
+++ b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-lse2.ll
@@ -567,118 +567,6 @@ define dso_local i128 @load_atomic_i128_unaligned_seq_cst_const(ptr readonly %pt
     ret i128 %r
 }
 
-define dso_local half @load_atomic_f16_aligned_unordered(ptr %ptr) {
-; CHECK-LABEL: load_atomic_f16_aligned_unordered:
-; CHECK:    ldrh w8, [x0]
-    %r = load atomic half, ptr %ptr unordered, align 2
-    ret half %r
-}
-
-define dso_local half @load_atomic_f16_aligned_unordered_const(ptr readonly %ptr) {
-; CHECK-LABEL: load_atomic_f16_aligned_unordered_const:
-; CHECK:    ldrh w8, [x0]
-    %r = load atomic half, ptr %ptr unordered, align 2
-    ret half %r
-}
-
-define dso_local half @load_atomic_f16_aligned_monotonic(ptr %ptr) {
-; CHECK-LABEL: load_atomic_f16_aligned_monotonic:
-; CHECK:    ldrh w8, [x0]
-    %r = load atomic half, ptr %ptr monotonic, align 2
-    ret half %r
-}
-
-define dso_local half @load_atomic_f16_aligned_monotonic_const(ptr readonly %ptr) {
-; CHECK-LABEL: load_atomic_f16_aligned_monotonic_const:
-; CHECK:    ldrh w8, [x0]
-    %r = load atomic half, ptr %ptr monotonic, align 2
-    ret half %r
-}
-
-define dso_local half @load_atomic_f16_aligned_acquire(ptr %ptr) {
-; CHECK-LABEL: load_atomic_f16_aligned_acquire:
-; CHECK:    ldarh w8, [x0]
-    %r = load atomic half, ptr %ptr acquire, align 2
-    ret half %r
-}
-
-define dso_local half @load_atomic_f16_aligned_acquire_const(ptr readonly %ptr) {
-; CHECK-LABEL: load_atomic_f16_aligned_acquire_const:
-; CHECK:    ldarh w8, [x0]
-    %r = load atomic half, ptr %ptr acquire, align 2
-    ret half %r
-}
-
-define dso_local half @load_atomic_f16_aligned_seq_cst(ptr %ptr) {
-; CHECK-LABEL: load_atomic_f16_aligned_seq_cst:
-; CHECK:    ldarh w8, [x0]
-    %r = load atomic half, ptr %ptr seq_cst, align 2
-    ret half %r
-}
-
-define dso_local half @load_atomic_f16_aligned_seq_cst_const(ptr readonly %ptr) {
-; CHECK-LABEL: load_atomic_f16_aligned_seq_cst_const:
-; CHECK:    ldarh w8, [x0]
-    %r = load atomic half, ptr %ptr seq_cst, align 2
-    ret half %r
-}
-
-define dso_local bfloat @load_atomic_bf16_aligned_unordered(ptr %ptr) {
-; CHECK-LABEL: load_atomic_bf16_aligned_unordered:
-; CHECK:    ldrh w8, [x0]
-    %r = load atomic bfloat, ptr %ptr unordered, align 2
-    ret bfloat %r
-}
-
-define dso_local bfloat @load_atomic_bf16_aligned_unordered_const(ptr readonly %ptr) {
-; CHECK-LABEL: load_atomic_bf16_aligned_unordered_const:
-; CHECK:    ldrh w8, [x0]
-    %r = load atomic bfloat, ptr %ptr unordered, align 2
-    ret bfloat %r
-}
-
-define dso_local bfloat @load_atomic_bf16_aligned_monotonic(ptr %ptr) {
-; CHECK-LABEL: load_atomic_bf16_aligned_monotonic:
-; CHECK:    ldrh w8, [x0]
-    %r = load atomic bfloat, ptr %ptr monotonic, align 2
-    ret bfloat %r
-}
-
-define dso_local bfloat @load_atomic_bf16_aligned_monotonic_const(ptr readonly %ptr) {
-; CHECK-LABEL: load_atomic_bf16_aligned_monotonic_const:
-; CHECK:    ldrh w8, [x0]
-    %r = load atomic bfloat, ptr %ptr monotonic, align 2
-    ret bfloat %r
-}
-
-define dso_local bfloat @load_atomic_bf16_aligned_acquire(ptr %ptr) {
-; CHECK-LABEL: load_atomic_bf16_aligned_acquire:
-; CHECK:    ldarh w8, [x0]
-    %r = load atomic bfloat, ptr %ptr acquire, align 2
-    ret bfloat %r
-}
-
-define dso_local bfloat @load_atomic_bf16_aligned_acquire_const(ptr readonly %ptr) {
-; CHECK-LABEL: load_atomic_bf16_aligned_acquire_const:
-; CHECK:    ldarh w8, [x0]
-    %r = load atomic bfloat, ptr %ptr acquire, align 2
-    ret bfloat %r
-}
-
-define dso_local bfloat @load_atomic_bf16_aligned_seq_cst(ptr %ptr) {
-; CHECK-LABEL: load_atomic_bf16_aligned_seq_cst:
-; CHECK:    ldarh w8, [x0]
-    %r = load atomic bfloat, ptr %ptr seq_cst, align 2
-    ret bfloat %r
-}
-
-define dso_local bfloat @load_atomic_bf16_aligned_seq_cst_const(ptr readonly %ptr) {
-; CHECK-LABEL: load_atomic_bf16_aligned_seq_cst_const:
-; CHECK:    ldarh w8, [x0]
-    %r = load atomic bfloat, ptr %ptr seq_cst, align 2
-    ret bfloat %r
-}
-
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; -O0: {{.*}}
 ; -O1: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/v8.4-atomic.ll b/llvm/test/CodeGen/AArch64/v8.4-atomic.ll
new file mode 100644
index 000000000000000..1394b89159c41d9
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/v8.4-atomic.ll
@@ -0,0 +1,113 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+lse2 %s -o - | FileCheck %s
+
+define dso_local half @load_atomic_f16_aligned_unordered(ptr %ptr) {
+; CHECK-LABEL: load_atomic_f16_aligned_unordered:
+; CHECK:    ldrh w8, [x0]
+    %r = load atomic half, ptr %ptr unordered, align 2
+    ret half %r
+}
+
+define dso_local half @load_atomic_f16_aligned_unordered_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_f16_aligned_unordered_const:
+; CHECK:    ldrh w8, [x0]
+    %r = load atomic half, ptr %ptr unordered, align 2
+    ret half %r
+}
+
+define dso_local half @load_atomic_f16_aligned_monotonic(ptr %ptr) {
+; CHECK-LABEL: load_atomic_f16_aligned_monotonic:
+; CHECK:    ldrh w8, [x0]
+    %r = load atomic half, ptr %ptr monotonic, align 2
+    ret half %r
+}
+
+define dso_local half @load_atomic_f16_aligned_monotonic_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_f16_aligned_monotonic_const:
+; CHECK:    ldrh w8, [x0]
+    %r = load atomic half, ptr %ptr monotonic, align 2
+    ret half %r
+}
+
+define dso_local half @load_atomic_f16_aligned_acquire(ptr %ptr) {
+; CHECK-LABEL: load_atomic_f16_aligned_acquire:
+; CHECK:    ldarh w8, [x0]
+    %r = load atomic half, ptr %ptr acquire, align 2
+    ret half %r
+}
+
+define dso_local half @load_atomic_f16_aligned_acquire_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_f16_aligned_acquire_const:
+; CHECK:    ldarh w8, [x0]
+    %r = load atomic half, ptr %ptr acquire, align 2
+    ret half %r
+}
+
+define dso_local half @load_atomic_f16_aligned_seq_cst(ptr %ptr) {
+; CHECK-LABEL: load_atomic_f16_aligned_seq_cst:
+; CHECK:    ldarh w8, [x0]
+    %r = load atomic half, ptr %ptr seq_cst, align 2
+    ret half %r
+}
+
+define dso_local half @load_atomic_f16_aligned_seq_cst_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_f16_aligned_seq_cst_const:
+; CHECK:    ldarh w8, [x0]
+    %r = load atomic half, ptr %ptr seq_cst, align 2
+    ret half %r
+}
+
+define dso_local bfloat @load_atomic_bf16_aligned_unordered(ptr %ptr) {
+; CHECK-LABEL: load_atomic_bf16_aligned_unordered:
+; CHECK:    ldrh w8, [x0]
+    %r = load atomic bfloat, ptr %ptr unordered, align 2
+    ret bfloat %r
+}
+
+define dso_local bfloat @load_atomic_bf16_aligned_unordered_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_bf16_aligned_unordered_const:
+; CHECK:    ldrh w8, [x0]
+    %r = load atomic bfloat, ptr %ptr unordered, align 2
+    ret bfloat %r
+}
+
+define dso_local bfloat @load_atomic_bf16_aligned_monotonic(ptr %ptr) {
+; CHECK-LABEL: load_atomic_bf16_aligned_monotonic:
+; CHECK:    ldrh w8, [x0]
+    %r = load atomic bfloat, ptr %ptr monotonic, align 2
+    ret bfloat %r
+}
+
+define dso_local bfloat @load_atomic_bf16_aligned_monotonic_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_bf16_aligned_monotonic_const:
+; CHECK:    ldrh w8, [x0]
+    %r = load atomic bfloat, ptr %ptr monotonic, align 2
+    ret bfloat %r
+}
+
+define dso_local bfloat @load_atomic_bf16_aligned_acquire(ptr %ptr) {
+; CHECK-LABEL: load_atomic_bf16_aligned_acquire:
+; CHECK:    ldarh w8, [x0]
+    %r = load atomic bfloat, ptr %ptr acquire, align 2
+    ret bfloat %r
+}
+
+define dso_local bfloat @load_atomic_bf16_aligned_acquire_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_bf16_aligned_acquire_const:
+; CHECK:    ldarh w8, [x0]
+    %r = load atomic bfloat, ptr %ptr acquire, align 2
+    ret bfloat %r
+}
+
+define dso_local bfloat @load_atomic_bf16_aligned_seq_cst(ptr %ptr) {
+; CHECK-LABEL: load_atomic_bf16_aligned_seq_cst:
+; CHECK:    ldarh w8, [x0]
+    %r = load atomic bfloat, ptr %ptr seq_cst, align 2
+    ret bfloat %r
+}
+
+define dso_local bfloat @load_atomic_bf16_aligned_seq_cst_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_bf16_aligned_seq_cst_const:
+; CHECK:    ldarh w8, [x0]
+    %r = load atomic bfloat, ptr %ptr seq_cst, align 2
+    ret bfloat %r
+}



More information about the llvm-commits mailing list