[llvm-commits] [compiler-rt] r169155 - in /compiler-rt/trunk/lib/sanitizer_common: sanitizer_linux.cc sanitizer_procmaps.h

Alexander Potapenko glider at google.com
Mon Dec 3 13:21:22 PST 2012


Author: glider
Date: Mon Dec  3 15:21:22 2012
New Revision: 169155

URL: http://llvm.org/viewvc/llvm-project?rev=169155&view=rev
Log:
Use a struct to hold the /proc/self/maps buffer on Linux.

Modified:
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc?rev=169155&r1=169154&r2=169155&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc Mon Dec  3 15:21:22 2012
@@ -218,63 +218,54 @@
 }
 
 // ----------------- sanitizer_procmaps.h
-char *MemoryMappingLayout::cached_proc_self_maps_buff_ = NULL;
-uptr MemoryMappingLayout::cached_proc_self_maps_buff_mmaped_size_ = 0;
-uptr MemoryMappingLayout::cached_proc_self_maps_buff_len_ = 0;
-StaticSpinMutex MemoryMappingLayout::cache_lock_;
+ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;  // Linker initialized.
+StaticSpinMutex MemoryMappingLayout::cache_lock_;  // Linker initialized.
 
 MemoryMappingLayout::MemoryMappingLayout() {
-  proc_self_maps_buff_len_ =
-      ReadFileToBuffer("/proc/self/maps", &proc_self_maps_buff_,
-                       &proc_self_maps_buff_mmaped_size_, 1 << 26);
-  if (proc_self_maps_buff_mmaped_size_ == 0) {
+  proc_self_maps_.len =
+      ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
+                       &proc_self_maps_.mmaped_size, 1 << 26);
+  if (proc_self_maps_.mmaped_size == 0) {
     LoadFromCache();
-    CHECK_GT(proc_self_maps_buff_len_, 0);
+    CHECK_GT(proc_self_maps_.len, 0);
   }
-  // internal_write(2, proc_self_maps_buff_, proc_self_maps_buff_len_);
+  // internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
   Reset();
   // FIXME: in the future we may want to cache the mappings on demand only.
   CacheMemoryMappings();
 }
 
 MemoryMappingLayout::~MemoryMappingLayout() {
-  UnmapOrDie(proc_self_maps_buff_, proc_self_maps_buff_mmaped_size_);
+  UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
 }
 
 void MemoryMappingLayout::Reset() {
-  current_ = proc_self_maps_buff_;
+  current_ = proc_self_maps_.data;
 }
 
 // static
 void MemoryMappingLayout::CacheMemoryMappings() {
   SpinMutexLock l(&cache_lock_);
   // Don't invalidate the cache if the mappings are unavailable.
-  char *old_proc_self_maps_buff_ = cached_proc_self_maps_buff_;
-  uptr old_proc_self_maps_buff_mmaped_size_ =
-      cached_proc_self_maps_buff_mmaped_size_;
-  uptr old_proc_self_maps_buff_len_ = cached_proc_self_maps_buff_len_;
-  cached_proc_self_maps_buff_len_ =
-      ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_buff_,
-                       &cached_proc_self_maps_buff_mmaped_size_, 1 << 26);
-  if (cached_proc_self_maps_buff_mmaped_size_ == 0) {
-    cached_proc_self_maps_buff_ = old_proc_self_maps_buff_;
-    cached_proc_self_maps_buff_mmaped_size_ =
-        old_proc_self_maps_buff_mmaped_size_;
-    cached_proc_self_maps_buff_len_ = old_proc_self_maps_buff_len_;
+  ProcSelfMapsBuff old_proc_self_maps;
+  old_proc_self_maps = cached_proc_self_maps_;
+  cached_proc_self_maps_.len =
+      ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
+                       &cached_proc_self_maps_.mmaped_size, 1 << 26);
+  if (cached_proc_self_maps_.mmaped_size == 0) {
+    cached_proc_self_maps_ = old_proc_self_maps;
   } else {
-    if (old_proc_self_maps_buff_mmaped_size_) {
-      UnmapOrDie(old_proc_self_maps_buff_,
-                 old_proc_self_maps_buff_mmaped_size_);
+    if (old_proc_self_maps.mmaped_size) {
+      UnmapOrDie(old_proc_self_maps.data,
+                 old_proc_self_maps.mmaped_size);
     }
   }
 }
 
 void MemoryMappingLayout::LoadFromCache() {
   SpinMutexLock l(&cache_lock_);
-  if (cached_proc_self_maps_buff_) {
-    proc_self_maps_buff_ = cached_proc_self_maps_buff_;
-    proc_self_maps_buff_len_ = cached_proc_self_maps_buff_len_;
-    proc_self_maps_buff_mmaped_size_ = cached_proc_self_maps_buff_mmaped_size_;
+  if (cached_proc_self_maps_.data) {
+    proc_self_maps_ = cached_proc_self_maps_;  
   }
 }
 
@@ -309,7 +300,7 @@
 
 bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
                                char filename[], uptr filename_size) {
-  char *last = proc_self_maps_buff_ + proc_self_maps_buff_len_;
+  char *last = proc_self_maps_.data + proc_self_maps_.len;
   if (current_ >= last) return false;
   uptr dummy;
   if (!start) start = &dummy;

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h?rev=169155&r1=169154&r2=169155&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h Mon Dec  3 15:21:22 2012
@@ -30,6 +30,14 @@
 };
 
 #else  // _WIN32
+#if defined(__linux__)
+struct ProcSelfMapsBuff {
+  char *data;
+  uptr mmaped_size;
+  uptr len;
+};
+#endif  // defined(__linux__)
+
 class MemoryMappingLayout {
  public:
   MemoryMappingLayout();
@@ -79,16 +87,12 @@
   }
 
 # if defined __linux__
-  char *proc_self_maps_buff_;
-  uptr proc_self_maps_buff_mmaped_size_;
-  uptr proc_self_maps_buff_len_;
+  ProcSelfMapsBuff proc_self_maps_;
   char *current_;
 
   // Static mappings cache.
-  static char *cached_proc_self_maps_buff_;
-  static uptr cached_proc_self_maps_buff_mmaped_size_;
-  static uptr cached_proc_self_maps_buff_len_;
-  static StaticSpinMutex cache_lock_;  // protects the cache contents.
+  static ProcSelfMapsBuff cached_proc_self_maps_;
+  static StaticSpinMutex cache_lock_;  // protects cached_proc_self_maps_.
 # elif defined __APPLE__
   template<u32 kLCSegment, typename SegmentCommand>
   bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,





More information about the llvm-commits mailing list