[PATCH] D54291: [XRay] Add atomic fences around non-atomic reads and writes

Dean Michael Berris via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 8 22:42:14 PST 2018


This revision was automatically updated to reflect the committed changes.
Closed by commit rL346474: [XRay] Add atomic fences around non-atomic reads and writes (authored by dberris, committed by ).
Herald added a subscriber: delcypher.

Changed prior to commit:
  https://reviews.llvm.org/D54291?vs=173268&id=173272#toc

Repository:
  rL LLVM

https://reviews.llvm.org/D54291

Files:
  compiler-rt/trunk/lib/xray/xray_fdr_log_writer.h
  compiler-rt/trunk/lib/xray/xray_fdr_logging.cc


Index: compiler-rt/trunk/lib/xray/xray_fdr_log_writer.h
===================================================================
--- compiler-rt/trunk/lib/xray/xray_fdr_log_writer.h
+++ compiler-rt/trunk/lib/xray/xray_fdr_log_writer.h
@@ -63,6 +63,10 @@
   template <class T> void writeRecord(const T &R) {
     internal_memcpy(NextRecord, reinterpret_cast<const char *>(&R), sizeof(T));
     NextRecord += sizeof(T);
+    // We need this atomic fence here to ensure that other threads attempting to
+    // read the bytes in the buffer will see the writes committed before the
+    // extents are updated.
+    atomic_thread_fence(memory_order_release);
     atomic_fetch_add(&Buffer.Extents, sizeof(T), memory_order_acq_rel);
   }
 
@@ -89,6 +93,10 @@
     constexpr auto Size = sizeof(MetadataRecord) * N;
     internal_memcpy(NextRecord, reinterpret_cast<const char *>(Recs), Size);
     NextRecord += Size;
+    // We need this atomic fence here to ensure that other threads attempting to
+    // read the bytes in the buffer will see the writes committed before the
+    // extents are updated.
+    atomic_thread_fence(memory_order_release);
     atomic_fetch_add(&Buffer.Extents, Size, memory_order_acq_rel);
     return Size;
   }
@@ -129,6 +137,10 @@
     NextRecord = reinterpret_cast<char *>(internal_memcpy(
                      NextRecord, reinterpret_cast<char *>(&A), sizeof(A))) +
                  sizeof(A);
+    // We need this atomic fence here to ensure that other threads attempting to
+    // read the bytes in the buffer will see the writes committed before the
+    // extents are updated.
+    atomic_thread_fence(memory_order_release);
     atomic_fetch_add(&Buffer.Extents, sizeof(R) + sizeof(A),
                      memory_order_acq_rel);
     return true;
@@ -149,6 +161,11 @@
     NextRecord = reinterpret_cast<char *>(
                      internal_memcpy(NextRecord, Event, EventSize)) +
                  EventSize;
+
+    // We need this atomic fence here to ensure that other threads attempting to
+    // read the bytes in the buffer will see the writes committed before the
+    // extents are updated.
+    atomic_thread_fence(memory_order_release);
     atomic_fetch_add(&Buffer.Extents, sizeof(R) + EventSize,
                      memory_order_acq_rel);
     return true;
@@ -167,6 +184,11 @@
     NextRecord = reinterpret_cast<char *>(
                      internal_memcpy(NextRecord, Event, EventSize)) +
                  EventSize;
+
+    // We need this atomic fence here to ensure that other threads attempting to
+    // read the bytes in the buffer will see the writes committed before the
+    // extents are updated.
+    atomic_thread_fence(memory_order_release);
     atomic_fetch_add(&Buffer.Extents, EventSize, memory_order_acq_rel);
     return true;
   }
Index: compiler-rt/trunk/lib/xray/xray_fdr_logging.cc
===================================================================
--- compiler-rt/trunk/lib/xray/xray_fdr_logging.cc
+++ compiler-rt/trunk/lib/xray/xray_fdr_logging.cc
@@ -243,6 +243,13 @@
   // out to disk. The difference here would be that we still write "empty"
   // buffers, or at least go through the iterators faithfully to let the
   // handlers see the empty buffers in the queue.
+  //
+  // We need this atomic fence here to ensure that writes happening to the
+  // buffer have been committed before we load the extents atomically. Because
+  // the buffer is not explicitly synchronised across threads, we rely on the
+  // fence ordering to ensure that writes we expect to have been completed
+  // before the fence are fully committed before we read the extents.
+  atomic_thread_fence(memory_order_acquire);
   auto BufferSize = atomic_load(&It->Extents, memory_order_acquire);
   SerializedBufferSize = BufferSize + sizeof(MetadataRecord);
   CurrentBuffer = allocateBuffer(SerializedBufferSize);


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D54291.173272.patch
Type: text/x-patch
Size: 3884 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20181109/0ff36e70/attachment.bin>


More information about the llvm-commits mailing list