[clang] [llvm] [BPF] Do atomic_fetch_*() pattern matching with memory ordering (PR #107343)

via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 24 10:33:34 PDT 2024


================
@@ -0,0 +1,385 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -march=bpfel -mcpu=v1 -filetype=asm < %s | FileCheck %s
+;
+; Source:
+; $ cat atomics_mem_order_v1.c
+;   #include <stdatomic.h>
+;
+;   void test_fetch_add_32_noret(int _Atomic *i) {
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_relaxed);
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_acquire);
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_release);
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_acq_rel);
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
+;   }
+;
+;   void test_fetch_add_64_noret(long _Atomic *i) {
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_relaxed);
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_acquire);
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_release);
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_acq_rel);
+;     (void)__c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
+;   }
+;
+;   void test_fetch_sub_64_noret(long _Atomic *i) {
+;     (void)__c11_atomic_fetch_sub(i, 10, memory_order_relaxed);
+;     (void)__c11_atomic_fetch_sub(i, 10, memory_order_acquire);
+;     (void)__c11_atomic_fetch_sub(i, 10, memory_order_release);
+;     (void)__c11_atomic_fetch_sub(i, 10, memory_order_acq_rel);
+;     (void)__c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
+;   }
+;
+;   long test_fetch_sub_64_ret(long _Atomic *i) {
+;      return __c11_atomic_fetch_sub(i, 10, memory_order_acquire) +
+;             __c11_atomic_fetch_sub(i, 10, memory_order_release) +
+;             __c11_atomic_fetch_sub(i, 10, memory_order_acq_rel) +
+;             __c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
+;   }
+;
+;   void test_fetch_and_64_noret(long _Atomic *i) {
+;     (void)__c11_atomic_fetch_and(i, 10, memory_order_relaxed);
+;     (void)__c11_atomic_fetch_and(i, 10, memory_order_acquire);
+;     (void)__c11_atomic_fetch_and(i, 10, memory_order_release);
+;     (void)__c11_atomic_fetch_and(i, 10, memory_order_acq_rel);
+;     (void)__c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
+;   }
+;
+;   long test_fetch_and_64_ret(long _Atomic *i) {
+;     return __c11_atomic_fetch_and(i, 10, memory_order_relaxed) +
+;            __c11_atomic_fetch_and(i, 10, memory_order_acquire) +
+;            __c11_atomic_fetch_and(i, 10, memory_order_release) +
+;            __c11_atomic_fetch_and(i, 10, memory_order_acq_rel) +
+;            __c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
+;   }
+;
+;   void test_fetch_or_64_noret(long _Atomic *i) {
+;     (void)__c11_atomic_fetch_or(i, 10, memory_order_relaxed);
+;     (void)__c11_atomic_fetch_or(i, 10, memory_order_acquire);
+;     (void)__c11_atomic_fetch_or(i, 10, memory_order_release);
+;     (void)__c11_atomic_fetch_or(i, 10, memory_order_acq_rel);
+;     (void)__c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
+;   }
+;
+;   long test_fetch_or_64_ret(long _Atomic *i) {
+;     return __c11_atomic_fetch_or(i, 10, memory_order_relaxed) +
+;            __c11_atomic_fetch_or(i, 10, memory_order_acquire) +
+;            __c11_atomic_fetch_or(i, 10, memory_order_release) +
+;            __c11_atomic_fetch_or(i, 10, memory_order_acq_rel) +
+;            __c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
+;   }
+;
+;   void test_fetch_xor_64_noret(long _Atomic *i) {
+;     (void)__c11_atomic_fetch_xor(i, 10, memory_order_relaxed);
+;     (void)__c11_atomic_fetch_xor(i, 10, memory_order_acquire);
+;     (void)__c11_atomic_fetch_xor(i, 10, memory_order_release);
+;     (void)__c11_atomic_fetch_xor(i, 10, memory_order_acq_rel);
+;     (void)__c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
+;   }
+;
+;   long test_fetch_xor_64_ret(long _Atomic *i) {
+;     return __c11_atomic_fetch_xor(i, 10, memory_order_relaxed) +
+;            __c11_atomic_fetch_xor(i, 10, memory_order_acquire) +
+;            __c11_atomic_fetch_xor(i, 10, memory_order_release) +
+;            __c11_atomic_fetch_xor(i, 10, memory_order_acq_rel) +
+;            __c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
+;   }
+
+target triple = "bpf"
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_add_32_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_add_32_noret:
+; CHECK:       .Ltest_fetch_add_32_noret$local:
+; CHECK-NEXT:    .type .Ltest_fetch_add_32_noret$local, at function
+; CHECK-NEXT:  # %bb.0: # %entry
+; CHECK-NEXT:    r2 = 10
+; CHECK-NEXT:    r3 = 10
+; CHECK-NEXT:    lock *(u32 *)(r1 + 0) += r3
+; CHECK-NEXT:    r3 = 10
+; CHECK-NEXT:    lock *(u32 *)(r1 + 0) += r3
+; CHECK-NEXT:    r3 = 10
+; CHECK-NEXT:    lock *(u32 *)(r1 + 0) += r3
+; CHECK-NEXT:    r3 = 10
+; CHECK-NEXT:    lock *(u32 *)(r1 + 0) += r3
+; CHECK-NEXT:    lock *(u32 *)(r1 + 0) += r2
+; CHECK-NEXT:    exit
+entry:
+  %0 = atomicrmw add ptr %i, i32 10 monotonic, align 4
+  %1 = atomicrmw add ptr %i, i32 10 acquire, align 4
+  %2 = atomicrmw add ptr %i, i32 10 release, align 4
+  %3 = atomicrmw add ptr %i, i32 10 acq_rel, align 4
+  %4 = atomicrmw add ptr %i, i32 10 seq_cst, align 4
+  ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_add_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
----------------
eddyz87 wrote:

Oh, ok.

https://github.com/llvm/llvm-project/pull/107343


More information about the llvm-commits mailing list