[compiler-rt] [tsan] Clang format a few files (PR #114725)

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 3 16:33:07 PST 2024


https://github.com/vitalybuka updated https://github.com/llvm/llvm-project/pull/114725

>From a0e4dce3cfd528b6d58cf9935facdf32d727ec65 Mon Sep 17 00:00:00 2001
From: Vitaly Buka <vitalybuka at google.com>
Date: Sun, 3 Nov 2024 16:29:35 -0800
Subject: [PATCH] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20initia?=
 =?UTF-8?q?l=20version?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Created using spr 1.3.4
---
 .../lib/tsan/rtl/tsan_interceptors_mac.cpp    | 210 +++++++--------
 compiler-rt/lib/tsan/rtl/tsan_interface.h     |  15 +-
 .../lib/tsan/rtl/tsan_interface_atomic.cpp    | 247 ++++++++++--------
 3 files changed, 248 insertions(+), 224 deletions(-)

diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index 9db0eebd923696..e0e4c5b9d36cd3 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -14,22 +14,22 @@
 #include "sanitizer_common/sanitizer_platform.h"
 #if SANITIZER_APPLE
 
-#include "interception/interception.h"
-#include "tsan_interceptors.h"
-#include "tsan_interface.h"
-#include "tsan_interface_ann.h"
-#include "tsan_spinlock_defs_mac.h"
-#include "sanitizer_common/sanitizer_addrhashmap.h"
-
-#include <errno.h>
-#include <libkern/OSAtomic.h>
-#include <objc/objc-sync.h>
-#include <os/lock.h>
-#include <sys/ucontext.h>
-
-#if defined(__has_include) && __has_include(<xpc/xpc.h>)
-#include <xpc/xpc.h>
-#endif  // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+#  include <errno.h>
+#  include <libkern/OSAtomic.h>
+#  include <objc/objc-sync.h>
+#  include <os/lock.h>
+#  include <sys/ucontext.h>
+
+#  include "interception/interception.h"
+#  include "sanitizer_common/sanitizer_addrhashmap.h"
+#  include "tsan_interceptors.h"
+#  include "tsan_interface.h"
+#  include "tsan_interface_ann.h"
+#  include "tsan_spinlock_defs_mac.h"
+
+#  if defined(__has_include) && __has_include(<xpc/xpc.h>)
+#    include <xpc/xpc.h>
+#  endif  // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
 
 typedef long long_t;
 
@@ -49,55 +49,56 @@ static constexpr morder kMacOrderBarrier = mo_acq_rel;
 static constexpr morder kMacOrderNonBarrier = mo_acq_rel;
 static constexpr morder kMacFailureOrder = mo_relaxed;
 
-#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
-  TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                 \
-    SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                 \
-    return tsan_atomic_f((volatile tsan_t *)ptr, x, mo);                \
-  }
+#  define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+    TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                 \
+      SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                 \
+      return tsan_atomic_f((volatile tsan_t *)ptr, x, mo);                \
+    }
 
-#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
-  TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                        \
-    SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                        \
-    return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x;                   \
-  }
+#  define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, \
+                                      mo)                                    \
+    TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                    \
+      SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                    \
+      return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x;               \
+    }
 
-#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
-  TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) {                             \
-    SCOPED_TSAN_INTERCEPTOR(f, ptr);                                           \
-    return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1;                   \
-  }
+#  define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+                                      mo)                                    \
+    TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) {                         \
+      SCOPED_TSAN_INTERCEPTOR(f, ptr);                                       \
+      return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1;               \
+    }
 
-#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
-                                     mo)                                    \
-  TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) {                          \
-    SCOPED_TSAN_INTERCEPTOR(f, ptr);                                        \
-    return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1;                \
-  }
+#  define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+                                       mo)                                    \
+    TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) {                          \
+      SCOPED_TSAN_INTERCEPTOR(f, ptr);                                        \
+      return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1;                \
+    }
 
-#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m)                  \
-  m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f,             \
-    kMacOrderNonBarrier)                                                       \
-  m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f,    \
-    kMacOrderBarrier)                                                          \
-  m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f,             \
-    kMacOrderNonBarrier)                                                       \
-  m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f,    \
-    kMacOrderBarrier)
-
-#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig)             \
-  m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f,            \
-    kMacOrderNonBarrier)                                                       \
-  m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f,   \
-    kMacOrderBarrier)                                                          \
-  m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
-    kMacOrderNonBarrier)                                                       \
-  m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier,                           \
-    __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
-
-
-#pragma clang diagnostic push
+#  define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m)              \
+    m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f,         \
+      kMacOrderNonBarrier)                                                   \
+        m(int32_t, int32_t, a32, f##32##Barrier,                             \
+          __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)                 \
+            m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
+              kMacOrderNonBarrier)                                           \
+                m(int64_t, int64_t, a64, f##64##Barrier,                     \
+                  __tsan_atomic64_##tsan_atomic_f, kMacOrderBarrier)
+
+#  define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig)     \
+    m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f,    \
+      kMacOrderNonBarrier)                                               \
+        m(int32_t, uint32_t, a32, f##32##Barrier,                        \
+          __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)             \
+            m_orig(int32_t, uint32_t, a32, f##32##Orig,                  \
+                   __tsan_atomic32_##tsan_atomic_f, kMacOrderNonBarrier) \
+                m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier,       \
+                       __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
+
+#  pragma clang diagnostic push
 // OSAtomic* functions are deprecated.
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#  pragma clang diagnostic ignored "-Wdeprecated-declarations"
 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
                                  OSATOMIC_INTERCEPTOR_PLUS_X)
 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
@@ -111,25 +112,25 @@ OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
                               OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
 
-#define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t)              \
-  TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) {    \
-    SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr);                  \
-    return tsan_atomic_f##_compare_exchange_strong(                         \
-        (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value,    \
-        kMacOrderNonBarrier, kMacFailureOrder);                             \
-  }                                                                         \
-                                                                            \
-  TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value,              \
-                   t volatile *ptr) {                                       \
-    SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr);         \
-    return tsan_atomic_f##_compare_exchange_strong(                         \
-        (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value,    \
-        kMacOrderBarrier, kMacFailureOrder);                                \
-  }
+#  define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t)           \
+    TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
+      SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr);               \
+      return tsan_atomic_f##_compare_exchange_strong(                      \
+          (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+          kMacOrderNonBarrier, kMacFailureOrder);                          \
+    }                                                                      \
+                                                                           \
+    TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value,           \
+                     t volatile *ptr) {                                    \
+      SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr);      \
+      return tsan_atomic_f##_compare_exchange_strong(                      \
+          (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+          kMacOrderBarrier, kMacFailureOrder);                             \
+    }
 
-#pragma clang diagnostic push
+#  pragma clang diagnostic push
 // OSAtomicCompareAndSwap* functions are deprecated.
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#  pragma clang diagnostic ignored "-Wdeprecated-declarations"
 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
                           long_t)
@@ -139,21 +140,21 @@ OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
                           int32_t)
 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
                           int64_t)
-#pragma clang diagnostic pop
-
-#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo)             \
-  TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) {    \
-    SCOPED_TSAN_INTERCEPTOR(f, n, ptr);                          \
-    volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
-    char bit = 0x80u >> (n & 7);                                 \
-    char mask = clear ? ~bit : bit;                              \
-    char orig_byte = op((volatile a8 *)byte_ptr, mask, mo);      \
-    return orig_byte & bit;                                      \
-  }
+#  pragma clang diagnostic pop
+
+#  define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo)             \
+    TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) {    \
+      SCOPED_TSAN_INTERCEPTOR(f, n, ptr);                          \
+      volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
+      char bit = 0x80u >> (n & 7);                                 \
+      char mask = clear ? ~bit : bit;                              \
+      char orig_byte = op((volatile a8 *)byte_ptr, mask, mo);      \
+      return orig_byte & bit;                                      \
+    }
 
-#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear)               \
-  OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
-  OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
+#  define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear)               \
+    OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
+    OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
 
 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
@@ -169,12 +170,13 @@ TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
 TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
   SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
   void *item = REAL(OSAtomicDequeue)(list, offset);
-  if (item) __tsan_acquire(item);
+  if (item)
+    __tsan_acquire(item);
   return item;
 }
 
 // OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
-#if !SANITIZER_IOS
+#  if !SANITIZER_IOS
 
 TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
                  size_t offset) {
@@ -187,11 +189,12 @@ TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
                  size_t offset) {
   SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
   void *item = REAL(OSAtomicFifoDequeue)(list, offset);
-  if (item) __tsan_acquire(item);
+  if (item)
+    __tsan_acquire(item);
   return item;
 }
 
-#endif
+#  endif
 
 TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
   CHECK(!cur_thread()->is_dead);
@@ -296,7 +299,7 @@ TSAN_INTERCEPTOR(void, os_unfair_lock_unlock, os_unfair_lock_t lock) {
   REAL(os_unfair_lock_unlock)(lock);
 }
 
-#if defined(__has_include) && __has_include(<xpc/xpc.h>)
+#  if defined(__has_include) && __has_include(<xpc/xpc.h>)
 
 TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
                  xpc_connection_t connection, xpc_handler_t handler) {
@@ -350,7 +353,7 @@ TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
   REAL(xpc_connection_cancel)(connection);
 }
 
-#endif  // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+#  endif  // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
 
 // Determines whether the Obj-C object pointer is a tagged pointer. Tagged
 // pointers encode the object data directly in their pointer bits and do not
@@ -373,7 +376,7 @@ static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) {
   Map::Handle h(&Addresses, addr);
   if (h.created()) {
     ThreadIgnoreBegin(thr, pc);
-    *h = (uptr) user_alloc(thr, pc, /*size=*/1);
+    *h = (uptr)user_alloc(thr, pc, /*size=*/1);
     ThreadIgnoreEnd(thr);
   }
   return *h;
@@ -391,7 +394,8 @@ static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) {
 
 TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) {
   SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj);
-  if (!obj) return REAL(objc_sync_enter)(obj);
+  if (!obj)
+    return REAL(objc_sync_enter)(obj);
   uptr addr = SyncAddressForObjCObject(obj, thr, pc);
   MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant);
   int result = REAL(objc_sync_enter)(obj);
@@ -402,11 +406,13 @@ TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) {
 
 TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) {
   SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj);
-  if (!obj) return REAL(objc_sync_exit)(obj);
+  if (!obj)
+    return REAL(objc_sync_exit)(obj);
   uptr addr = SyncAddressForObjCObject(obj, thr, pc);
   MutexUnlock(thr, pc, addr);
   int result = REAL(objc_sync_exit)(obj);
-  if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr);
+  if (result != OBJC_SYNC_SUCCESS)
+    MutexInvalidAccess(thr, pc, addr);
   return result;
 }
 
@@ -437,7 +443,7 @@ TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) {
 
 // On macOS, libc++ is always linked dynamically, so intercepting works the
 // usual way.
-#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+#  define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
 
 namespace {
 struct fake_shared_weak_count {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface.h b/compiler-rt/lib/tsan/rtl/tsan_interface.h
index 3731c90d459152..2b8a13ddb842cc 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface.h
@@ -16,8 +16,8 @@
 #define TSAN_INTERFACE_H
 
 #include <sanitizer_common/sanitizer_internal_defs.h>
-using __sanitizer::uptr;
 using __sanitizer::tid_t;
+using __sanitizer::uptr;
 
 // This header should NOT include any other headers.
 // All functions in this header are extern "C" and start with __tsan_.
@@ -203,17 +203,18 @@ int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
 namespace __tsan {
 
 // These should match declarations from public tsan_interface_atomic.h header.
-typedef unsigned char      a8;
+typedef unsigned char a8;
 typedef unsigned short a16;
-typedef unsigned int       a32;
+typedef unsigned int a32;
 typedef unsigned long long a64;
-#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
-    || (__clang_major__ * 100 + __clang_minor__ >= 302)) && \
+#if !SANITIZER_GO &&                                      \
+    (defined(__SIZEOF_INT128__) ||                        \
+     (__clang_major__ * 100 + __clang_minor__ >= 302)) && \
     !defined(__mips64) && !defined(__s390x__)
 __extension__ typedef __int128 a128;
-# define __TSAN_HAS_INT128 1
+#  define __TSAN_HAS_INT128 1
 #else
-# define __TSAN_HAS_INT128 0
+#  define __TSAN_HAS_INT128 0
 #endif
 
 // Part of ABI, do not change.
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 2b5a2c6ef79b9e..29cfc751ea8172 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -18,9 +18,9 @@
 // The following page contains more background information:
 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
 
+#include "sanitizer_common/sanitizer_mutex.h"
 #include "sanitizer_common/sanitizer_placement_new.h"
 #include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_mutex.h"
 #include "tsan_flags.h"
 #include "tsan_interface.h"
 #include "tsan_rtl.h"
@@ -34,8 +34,8 @@ static StaticSpinMutex mutex128;
 
 #if SANITIZER_DEBUG
 static bool IsLoadOrder(morder mo) {
-  return mo == mo_relaxed || mo == mo_consume
-      || mo == mo_acquire || mo == mo_seq_cst;
+  return mo == mo_relaxed || mo == mo_consume || mo == mo_acquire ||
+         mo == mo_seq_cst;
 }
 
 static bool IsStoreOrder(morder mo) {
@@ -48,42 +48,49 @@ static bool IsReleaseOrder(morder mo) {
 }
 
 static bool IsAcquireOrder(morder mo) {
-  return mo == mo_consume || mo == mo_acquire
-      || mo == mo_acq_rel || mo == mo_seq_cst;
+  return mo == mo_consume || mo == mo_acquire || mo == mo_acq_rel ||
+         mo == mo_seq_cst;
 }
 
 static bool IsAcqRelOrder(morder mo) {
   return mo == mo_acq_rel || mo == mo_seq_cst;
 }
 
-template<typename T> T func_xchg(volatile T *v, T op) {
+template <typename T>
+T func_xchg(volatile T *v, T op) {
   T res = __sync_lock_test_and_set(v, op);
   // __sync_lock_test_and_set does not contain full barrier.
   __sync_synchronize();
   return res;
 }
 
-template<typename T> T func_add(volatile T *v, T op) {
+template <typename T>
+T func_add(volatile T *v, T op) {
   return __sync_fetch_and_add(v, op);
 }
 
-template<typename T> T func_sub(volatile T *v, T op) {
+template <typename T>
+T func_sub(volatile T *v, T op) {
   return __sync_fetch_and_sub(v, op);
 }
 
-template<typename T> T func_and(volatile T *v, T op) {
+template <typename T>
+T func_and(volatile T *v, T op) {
   return __sync_fetch_and_and(v, op);
 }
 
-template<typename T> T func_or(volatile T *v, T op) {
+template <typename T>
+T func_or(volatile T *v, T op) {
   return __sync_fetch_and_or(v, op);
 }
 
-template<typename T> T func_xor(volatile T *v, T op) {
+template <typename T>
+T func_xor(volatile T *v, T op) {
   return __sync_fetch_and_xor(v, op);
 }
 
-template<typename T> T func_nand(volatile T *v, T op) {
+template <typename T>
+T func_nand(volatile T *v, T op) {
   // clang does not support __sync_fetch_and_nand.
   T cmp = *v;
   for (;;) {
@@ -95,7 +102,8 @@ template<typename T> T func_nand(volatile T *v, T op) {
   }
 }
 
-template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
+template <typename T>
+T func_cas(volatile T *v, T cmp, T xch) {
   return __sync_val_compare_and_swap(v, cmp, xch);
 }
 
@@ -103,8 +111,8 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
 // Atomic ops are executed under tsan internal mutex,
 // here we assume that the atomic variables are not accessed
 // from non-instrumented code.
-#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
-    && __TSAN_HAS_INT128
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO && \
+    __TSAN_HAS_INT128
 a128 func_xchg(volatile a128 *v, a128 op) {
   SpinMutexLock lock(&mutex128);
   a128 cmp = *v;
@@ -197,18 +205,24 @@ static atomic_uint64_t *to_atomic(const volatile a64 *a) {
 
 static memory_order to_mo(morder mo) {
   switch (mo) {
-  case mo_relaxed: return memory_order_relaxed;
-  case mo_consume: return memory_order_consume;
-  case mo_acquire: return memory_order_acquire;
-  case mo_release: return memory_order_release;
-  case mo_acq_rel: return memory_order_acq_rel;
-  case mo_seq_cst: return memory_order_seq_cst;
+    case mo_relaxed:
+      return memory_order_relaxed;
+    case mo_consume:
+      return memory_order_consume;
+    case mo_acquire:
+      return memory_order_acquire;
+    case mo_release:
+      return memory_order_release;
+    case mo_acq_rel:
+      return memory_order_acq_rel;
+    case mo_seq_cst:
+      return memory_order_seq_cst;
   }
   DCHECK(0);
   return memory_order_seq_cst;
 }
 
-template<typename T>
+template <typename T>
 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
   return atomic_load(to_atomic(a), to_mo(mo));
 }
@@ -246,7 +260,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
   return v;
 }
 
-template<typename T>
+template <typename T>
 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
   atomic_store(to_atomic(a), v, to_mo(mo));
 }
@@ -303,91 +317,91 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
   return v;
 }
 
-template<typename T>
+template <typename T>
 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
   return func_xchg(a, v);
 }
 
-template<typename T>
+template <typename T>
 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
   return func_add(a, v);
 }
 
-template<typename T>
+template <typename T>
 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
   return func_sub(a, v);
 }
 
-template<typename T>
+template <typename T>
 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
   return func_and(a, v);
 }
 
-template<typename T>
+template <typename T>
 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
   return func_or(a, v);
 }
 
-template<typename T>
+template <typename T>
 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
   return func_xor(a, v);
 }
 
-template<typename T>
+template <typename T>
 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
   return func_nand(a, v);
 }
 
-template<typename T>
+template <typename T>
 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
-    morder mo) {
+                        morder mo) {
   return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
 }
 
-template<typename T>
+template <typename T>
 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
-    morder mo) {
+                        morder mo) {
   return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
 }
 
-template<typename T>
+template <typename T>
 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
-    morder mo) {
+                        morder mo) {
   return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
 }
 
-template<typename T>
+template <typename T>
 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
-    morder mo) {
+                        morder mo) {
   return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
 }
 
-template<typename T>
+template <typename T>
 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
-    morder mo) {
+                       morder mo) {
   return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
 }
 
-template<typename T>
+template <typename T>
 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
-    morder mo) {
+                        morder mo) {
   return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
 }
 
-template<typename T>
+template <typename T>
 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
-    morder mo) {
+                         morder mo) {
   return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
 }
 
-template<typename T>
+template <typename T>
 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
   return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
 }
 
 #if __TSAN_HAS_INT128
-static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
-    morder mo, morder fmo) {
+static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, morder mo,
+                            morder fmo) {
   a128 old = *c;
   a128 cur = func_cas(a, old, v);
   if (cur == old)
@@ -397,7 +411,7 @@ static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
 }
 #endif
 
-template<typename T>
+template <typename T>
 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
   NoTsanAtomicCAS(a, &c, v, mo, fmo);
   return c;
@@ -445,17 +459,15 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
   return success;
 }
 
-template<typename T>
-static T AtomicCAS(ThreadState *thr, uptr pc,
-    volatile T *a, T c, T v, morder mo, morder fmo) {
+template <typename T>
+static T AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T c, T v,
+                   morder mo, morder fmo) {
   AtomicCAS(thr, pc, a, &c, v, mo, fmo);
   return c;
 }
 
 #if !SANITIZER_GO
-static void NoTsanAtomicFence(morder mo) {
-  __sync_synchronize();
-}
+static void NoTsanAtomicFence(morder mo) { __sync_synchronize(); }
 
 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
   // FIXME(dvyukov): not implemented.
@@ -514,12 +526,12 @@ a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
   ATOMIC_IMPL(Load, a, mo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
   ATOMIC_IMPL(Load, a, mo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
@@ -541,12 +553,12 @@ void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
   ATOMIC_IMPL(Store, a, v, mo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
   ATOMIC_IMPL(Store, a, v, mo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
@@ -568,12 +580,12 @@ a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
   ATOMIC_IMPL(Exchange, a, v, mo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
   ATOMIC_IMPL(Exchange, a, v, mo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
@@ -595,12 +607,12 @@ a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
   ATOMIC_IMPL(FetchAdd, a, v, mo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
   ATOMIC_IMPL(FetchAdd, a, v, mo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
@@ -622,12 +634,12 @@ a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
   ATOMIC_IMPL(FetchSub, a, v, mo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
   ATOMIC_IMPL(FetchSub, a, v, mo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
@@ -649,12 +661,12 @@ a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
   ATOMIC_IMPL(FetchAnd, a, v, mo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
   ATOMIC_IMPL(FetchAnd, a, v, mo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
@@ -676,12 +688,12 @@ a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
   ATOMIC_IMPL(FetchOr, a, v, mo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
   ATOMIC_IMPL(FetchOr, a, v, mo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
@@ -703,12 +715,12 @@ a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
   ATOMIC_IMPL(FetchXor, a, v, mo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
   ATOMIC_IMPL(FetchXor, a, v, mo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
@@ -730,115 +742,114 @@ a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
   ATOMIC_IMPL(FetchNand, a, v, mo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
   ATOMIC_IMPL(FetchNand, a, v, mo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
-    morder mo, morder fmo) {
+                                           morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
-    morder mo, morder fmo) {
+                                            morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
-    morder mo, morder fmo) {
+                                            morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
-    morder mo, morder fmo) {
+                                            morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
-    morder mo, morder fmo) {
+                                             morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
-    morder mo, morder fmo) {
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
+                                         morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
-    morder mo, morder fmo) {
+                                          morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
-    morder mo, morder fmo) {
+                                          morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
-    morder mo, morder fmo) {
+                                          morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
-    morder mo, morder fmo) {
+                                           morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
-    morder mo, morder fmo) {
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
+                                       morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
-    morder mo, morder fmo) {
+                                         morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
-    morder mo, morder fmo) {
+                                         morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
-    morder mo, morder fmo) {
+                                         morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
-#if __TSAN_HAS_INT128
+#  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
-    morder mo, morder fmo) {
+                                           morder mo, morder fmo) {
   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
-#endif
+#  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_signal_fence(morder mo) {
-}
+void __tsan_atomic_signal_fence(morder mo) {}
 }  // extern "C"
 
 #else  // #if !SANITIZER_GO
@@ -866,32 +877,34 @@ void __tsan_atomic_signal_fence(morder mo) {
 extern "C" {
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
+  ATOMIC_RET(Load, *(a32 *)(a + 8), *(a32 **)a, mo_acquire);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
+  ATOMIC_RET(Load, *(a64 *)(a + 8), *(a64 **)a, mo_acquire);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
+  ATOMIC(Store, *(a32 **)a, *(a32 *)(a + 8), mo_release);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
+  ATOMIC(Store, *(a64 **)a, *(a64 *)(a + 8), mo_release);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+  ATOMIC_RET(FetchAdd, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
+             mo_acq_rel);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+  ATOMIC_RET(FetchAdd, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
+             mo_acq_rel);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
@@ -920,30 +933,34 @@ void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+  ATOMIC_RET(Exchange, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
+             mo_acq_rel);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+  ATOMIC_RET(Exchange, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
+             mo_acq_rel);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_go_atomic32_compare_exchange(
-    ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+                                         u8 *a) {
   a32 cur = 0;
-  a32 cmp = *(a32*)(a+8);
-  ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
-  *(bool*)(a+16) = (cur == cmp);
+  a32 cmp = *(a32 *)(a + 8);
+  ATOMIC_RET(CAS, cur, *(a32 **)a, cmp, *(a32 *)(a + 12), mo_acq_rel,
+             mo_acquire);
+  *(bool *)(a + 16) = (cur == cmp);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_go_atomic64_compare_exchange(
-    ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+                                         u8 *a) {
   a64 cur = 0;
-  a64 cmp = *(a64*)(a+8);
-  ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
-  *(bool*)(a+24) = (cur == cmp);
+  a64 cmp = *(a64 *)(a + 8);
+  ATOMIC_RET(CAS, cur, *(a64 **)a, cmp, *(a64 *)(a + 16), mo_acq_rel,
+             mo_acquire);
+  *(bool *)(a + 24) = (cur == cmp);
 }
 }  // extern "C"
 #endif  // #if !SANITIZER_GO



More information about the llvm-commits mailing list