[compiler-rt] e0ddd42 - [msan] Use namespace qualifier. NFC

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 9 20:45:45 PDT 2024


Author: Fangrui Song
Date: 2024-08-09T20:45:39-07:00
New Revision: e0ddd42735b05fd6bee7fc24caeba5464e1a871a

URL: https://github.com/llvm/llvm-project/commit/e0ddd42735b05fd6bee7fc24caeba5464e1a871a
DIFF: https://github.com/llvm/llvm-project/commit/e0ddd42735b05fd6bee7fc24caeba5464e1a871a.diff

LOG: [msan] Use namespace qualifier. NFC

nsan will port msan_allocator.cpp and msan_thread.cpp. Clean up the two
files first.

Added: 
    

Modified: 
    compiler-rt/lib/msan/msan_allocator.cpp
    compiler-rt/lib/msan/msan_thread.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index 8350106dc8175f..e1204b8d808cb4 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -24,8 +24,9 @@
 #include "sanitizer_common/sanitizer_allocator_report.h"
 #include "sanitizer_common/sanitizer_errno.h"
 
-namespace __msan {
+using namespace __msan;
 
+namespace {
 struct Metadata {
   uptr requested_size;
 };
@@ -47,7 +48,9 @@ struct MsanMapUnmapCallback {
     }
   }
 };
+}  // namespace
 
+namespace __msan {
 // Note: to ensure that the allocator is compatible with the application memory
 // layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be
 // duplicated as MappingDesc::ALLOCATOR in msan.h.
@@ -145,6 +148,7 @@ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
 #endif
 typedef CombinedAllocator<PrimaryAllocator> Allocator;
 typedef Allocator::AllocatorCache AllocatorCache;
+}  // namespace __msan
 
 static Allocator allocator;
 static AllocatorCache fallback_allocator_cache;
@@ -152,7 +156,7 @@ static StaticSpinMutex fallback_mutex;
 
 static uptr max_malloc_size;
 
-void MsanAllocatorInit() {
+void __msan::MsanAllocatorInit() {
   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
   allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
   if (common_flags()->max_allocation_size_mb)
@@ -162,9 +166,9 @@ void MsanAllocatorInit() {
     max_malloc_size = kMaxAllowedMallocSize;
 }
 
-void LockAllocator() { allocator.ForceLock(); }
+void __msan::LockAllocator() { allocator.ForceLock(); }
 
-void UnlockAllocator() { allocator.ForceUnlock(); }
+void __msan::UnlockAllocator() { allocator.ForceUnlock(); }
 
 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
   CHECK(ms);
@@ -235,7 +239,7 @@ static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,
   return allocated;
 }
 
-void MsanDeallocate(BufferedStackTrace *stack, void *p) {
+void __msan::MsanDeallocate(BufferedStackTrace *stack, void *p) {
   CHECK(p);
   UnpoisonParam(1);
   RunFreeHooks(p);
@@ -327,15 +331,15 @@ static uptr AllocationSizeFast(const void *p) {
   return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
 }
 
-void *msan_malloc(uptr size, BufferedStackTrace *stack) {
+void *__msan::msan_malloc(uptr size, BufferedStackTrace *stack) {
   return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
 }
 
-void *msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+void *__msan::msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
   return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
 }
 
-void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
+void *__msan::msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
   if (!ptr)
     return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
   if (size == 0) {
@@ -345,8 +349,8 @@ void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
   return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
 }
 
-void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,
-                        BufferedStackTrace *stack) {
+void *__msan::msan_reallocarray(void *ptr, uptr nmemb, uptr size,
+                                BufferedStackTrace *stack) {
   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
     errno = errno_ENOMEM;
     if (AllocatorMayReturnNull())
@@ -357,11 +361,11 @@ void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,
   return msan_realloc(ptr, nmemb * size, stack);
 }
 
-void *msan_valloc(uptr size, BufferedStackTrace *stack) {
+void *__msan::msan_valloc(uptr size, BufferedStackTrace *stack) {
   return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
 }
 
-void *msan_pvalloc(uptr size, BufferedStackTrace *stack) {
+void *__msan::msan_pvalloc(uptr size, BufferedStackTrace *stack) {
   uptr PageSize = GetPageSizeCached();
   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
     errno = errno_ENOMEM;
@@ -375,7 +379,8 @@ void *msan_pvalloc(uptr size, BufferedStackTrace *stack) {
   return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
 }
 
-void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
+void *__msan::msan_aligned_alloc(uptr alignment, uptr size,
+                                 BufferedStackTrace *stack) {
   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
     errno = errno_EINVAL;
     if (AllocatorMayReturnNull())
@@ -386,7 +391,8 @@ void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
 }
 
-void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) {
+void *__msan::msan_memalign(uptr alignment, uptr size,
+                            BufferedStackTrace *stack) {
   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
     errno = errno_EINVAL;
     if (AllocatorMayReturnNull())
@@ -397,8 +403,8 @@ void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) {
   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
 }
 
-int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
-                        BufferedStackTrace *stack) {
+int __msan::msan_posix_memalign(void **memptr, uptr alignment, uptr size,
+                                BufferedStackTrace *stack) {
   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
     if (AllocatorMayReturnNull())
       return errno_EINVAL;
@@ -414,10 +420,7 @@ int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
   return 0;
 }
 
-} // namespace __msan
-
-using namespace __msan;
-
+extern "C" {
 uptr __sanitizer_get_current_allocated_bytes() {
   uptr stats[AllocatorStatCount];
   allocator.GetStats(stats);
@@ -452,3 +455,4 @@ uptr __sanitizer_get_allocated_size_fast(const void *p) {
 }
 
 void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
+}

diff  --git a/compiler-rt/lib/msan/msan_thread.cpp b/compiler-rt/lib/msan/msan_thread.cpp
index 280a7be2d8f43a..e5bdedcd415119 100644
--- a/compiler-rt/lib/msan/msan_thread.cpp
+++ b/compiler-rt/lib/msan/msan_thread.cpp
@@ -5,7 +5,7 @@
 #include "msan_interface_internal.h"
 #include "sanitizer_common/sanitizer_tls_get_addr.h"
 
-namespace __msan {
+using namespace __msan;
 
 MsanThread *MsanThread::Create(thread_callback_t start_routine,
                                void *arg) {
@@ -74,9 +74,7 @@ thread_return_t MsanThread::ThreadStart() {
     return 0;
   }
 
-  thread_return_t res = start_routine_(arg_);
-
-  return res;
+  return start_routine_(arg_);
 }
 
 MsanThread::StackBounds MsanThread::GetStackBounds() const {
@@ -119,5 +117,3 @@ void MsanThread::FinishSwitchFiber(uptr *bottom_old, uptr *size_old) {
   next_stack_.top = 0;
   next_stack_.bottom = 0;
 }
-
-} // namespace __msan


        


More information about the llvm-commits mailing list