[compiler-rt] r319083 - [fuchsia] Update Fuchsia with a new mmap implementation.

Kostya Kortchinsky via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 27 11:53:53 PST 2017


Author: cryptoad
Date: Mon Nov 27 11:53:53 2017
New Revision: 319083

URL: http://llvm.org/viewvc/llvm-project?rev=319083&view=rev
Log:
[fuchsia] Update Fuchsia with a new mmap implementation.

Summary:
    Now that the sanitizer_common interface for MmapNoAccess / MmapFixed
    have been refactored to allow a more OO-esque access pattern, update the
    Fuchsia mmap implementation to take advantage of this.
    
    Previously MmapNoAccess / MmapFixed relied on a global allocator_vmar,
    since the sanitizer_allocator only called MmapNoAccess once.  Now, we
    create a new VMAR per ReservedAddressRange object.
    
    This allows the sanitizer allocator to work in tandem with the Scudo
    secondary allocator.
    
    This is part 4 of a 4 part changeset:
    * part 1 https://reviews.llvm.org/D38593
    * part 2 https://reviews.llvm.org/D38592
    * part 3 https://reviews.llvm.org/D38593

Reviewers: mcgrathr, cryptoad

Reviewed By: cryptoad

Subscribers: alekseyshl, mcgrathr, kubamracek, mehdi_amini

Differential Revision: https://reviews.llvm.org/D38595

Modified:
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_fuchsia.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix_libcdep.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h?rev=319083&r1=319082&r2=319083&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h Mon Nov 27 11:53:53 2017
@@ -142,6 +142,7 @@ class ReservedAddressRange {
   void* base_;
   uptr size_;
   const char* name_;
+  uptr os_handle_;
 };
 
 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_fuchsia.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_fuchsia.cc?rev=319083&r1=319082&r2=319083&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_fuchsia.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_fuchsia.cc Mon Nov 27 11:53:53 2017
@@ -191,9 +191,7 @@ uptr GetMaxUserVirtualAddress() {
   return ShadowBounds.memory_limit - 1;
 }
 
-uptr GetMaxVirtualAddress() {
-  return GetMaxUserVirtualAddress();
-}
+uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
 
 static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
                                   bool raw_report, bool die_for_nomem) {
@@ -240,108 +238,96 @@ void *MmapOrDieOnFatalError(uptr size, c
   return DoAnonymousMmapOrDie(size, mem_type, false, false);
 }
 
-uptr ReservedAddressRange::Init(uptr init_size, const char* name,
+uptr ReservedAddressRange::Init(uptr init_size, const char *name,
                                 uptr fixed_addr) {
-  base_ = MmapNoAccess(init_size);
-  size_ = init_size;
-  name_ = name;
-  return reinterpret_cast<uptr>(base_);
-}
-
-// Uses fixed_addr for now.
-// Will use offset instead once we've implemented this function for real.
-uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size) {
-  return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr,
-                                                           map_size));
-}
-
-uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size) {
-  return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, map_size));
-}
-
-void ReservedAddressRange::Unmap(uptr addr, uptr size) {
-  void* addr_as_void = reinterpret_cast<void*>(addr);
-  uptr base_as_uptr = reinterpret_cast<uptr>(base_);
-  // Only unmap at the beginning or end of the range.
-  CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_));
-  CHECK_LE(size, size_);
-  UnmapOrDie(reinterpret_cast<void*>(addr), size);
-  if (addr_as_void == base_) {
-    base_ = reinterpret_cast<void*>(addr + size);
-  }
-  size_ = size_ - size;
-}
-
-// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator.
-// Instead of doing exactly what they say, we make MmapNoAccess actually
-// just allocate a VMAR to reserve the address space.  Then MmapFixedOrDie
-// uses that VMAR instead of the root.
-
-zx_handle_t allocator_vmar = ZX_HANDLE_INVALID;
-uintptr_t allocator_vmar_base;
-size_t allocator_vmar_size;
-
-void *MmapNoAccess(uptr size) {
-  size = RoundUpTo(size, PAGE_SIZE);
-  CHECK_EQ(allocator_vmar, ZX_HANDLE_INVALID);
+  init_size = RoundUpTo(init_size, PAGE_SIZE);
+  DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
   uintptr_t base;
+  zx_handle_t vmar;
   zx_status_t status =
-      _zx_vmar_allocate(_zx_vmar_root_self(), 0, size,
+      _zx_vmar_allocate(_zx_vmar_root_self(), 0, init_size,
                         ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
                             ZX_VM_FLAG_CAN_MAP_SPECIFIC,
-                        &allocator_vmar, &base);
+                        &vmar, &base);
   if (status != ZX_OK)
-    ReportMmapFailureAndDie(size, "sanitizer allocator address space",
-                            "zx_vmar_allocate", status);
+    ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
+  base_ = reinterpret_cast<void *>(base);
+  size_ = init_size;
+  name_ = name;
+  os_handle_ = vmar;
 
-  allocator_vmar_base = base;
-  allocator_vmar_size = size;
-  return reinterpret_cast<void *>(base);
+  return reinterpret_cast<uptr>(base_);
 }
 
-constexpr const char kAllocatorVmoName[] = "sanitizer_allocator";
-
-static void *DoMmapFixedOrDie(uptr fixed_addr, uptr size, bool die_for_nomem) {
-  size = RoundUpTo(size, PAGE_SIZE);
-
+static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
+                             void *base, const char *name, bool die_for_nomem) {
+  uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
+  map_size = RoundUpTo(map_size, PAGE_SIZE);
   zx_handle_t vmo;
-  zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+  zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
   if (status != ZX_OK) {
     if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
-      ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmo_create", status);
-    return nullptr;
+      ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
+    return 0;
   }
-  _zx_object_set_property(vmo, ZX_PROP_NAME, kAllocatorVmoName,
-                          sizeof(kAllocatorVmoName) - 1);
-
-  DCHECK_GE(fixed_addr, allocator_vmar_base);
-  uintptr_t offset = fixed_addr - allocator_vmar_base;
-  DCHECK_LE(size, allocator_vmar_size);
-  DCHECK_GE(allocator_vmar_size - offset, size);
-
+  _zx_object_set_property(vmo, ZX_PROP_NAME, name, sizeof(name) - 1);
+  DCHECK_GE(base + size_, map_size + offset);
   uintptr_t addr;
+
   status = _zx_vmar_map(
-      allocator_vmar, offset, vmo, 0, size,
+      vmar, offset, vmo, 0, map_size,
       ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC,
       &addr);
   _zx_handle_close(vmo);
   if (status != ZX_OK) {
-    if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
-      ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmar_map", status);
-    return nullptr;
+    if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
+      ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
+    }
+    return 0;
   }
+  IncreaseTotalMmap(map_size);
+  return addr;
+}
 
-  IncreaseTotalMmap(size);
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size) {
+  return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+                          name_, false);
+}
 
-  return reinterpret_cast<void *>(addr);
+uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size) {
+  return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+                          name_, true);
 }
 
-void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
-  return DoMmapFixedOrDie(fixed_addr, size, true);
+void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
+  if (!addr || !size) return;
+  size = RoundUpTo(size, PAGE_SIZE);
+
+  zx_status_t status =
+      _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
+  if (status != ZX_OK) {
+    Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+           SanitizerToolName, size, size, addr);
+    CHECK("unable to unmap" && 0);
+  }
+
+  DecreaseTotalMmap(size);
 }
 
-void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
-  return DoMmapFixedOrDie(fixed_addr, size, false);
+void ReservedAddressRange::Unmap(uptr fixed_addr, uptr size) {
+  uptr offset = fixed_addr - reinterpret_cast<uptr>(base_);
+  uptr addr = reinterpret_cast<uptr>(base_) + offset;
+  void *addr_as_void = reinterpret_cast<void *>(addr);
+  uptr base_as_uptr = reinterpret_cast<uptr>(base_);
+  // Only unmap at the beginning or end of the range.
+  CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_));
+  CHECK_LE(size, size_);
+  UnmapOrDieVmar(reinterpret_cast<void *>(addr), size,
+                 static_cast<zx_handle_t>(os_handle_));
+  if (addr_as_void == base_) {
+    base_ = reinterpret_cast<void *>(addr + size);
+  }
+  size_ = size_ - size;
 }
 
 // This should never be called.
@@ -413,18 +399,7 @@ void *MmapAlignedOrDieOnFatalError(uptr
 }
 
 void UnmapOrDie(void *addr, uptr size) {
-  if (!addr || !size) return;
-  size = RoundUpTo(size, PAGE_SIZE);
-
-  zx_status_t status = _zx_vmar_unmap(_zx_vmar_root_self(),
-                                      reinterpret_cast<uintptr_t>(addr), size);
-  if (status != ZX_OK) {
-    Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
-           SanitizerToolName, size, size, addr);
-    CHECK("unable to unmap" && 0);
-  }
-
-  DecreaseTotalMmap(size);
+  UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
 }
 
 // This is used on the shadow mapping, which cannot be changed.
@@ -432,7 +407,8 @@ void UnmapOrDie(void *addr, uptr size) {
 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
 
 void DumpProcessMap() {
-  UNIMPLEMENTED();  // TODO(mcgrathr): write it
+  // TODO(mcgrathr): write it
+  return;
 }
 
 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
@@ -532,6 +508,8 @@ u32 GetNumberOfCPUs() {
   return zx_system_get_num_cpus();
 }
 
+uptr GetRSS() { UNIMPLEMENTED(); }
+
 }  // namespace __sanitizer
 
 using namespace __sanitizer;  // NOLINT

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix_libcdep.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix_libcdep.cc?rev=319083&r1=319082&r2=319083&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix_libcdep.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix_libcdep.cc Mon Nov 27 11:53:53 2017
@@ -350,6 +350,7 @@ uptr ReservedAddressRange::Init(uptr siz
   }
   size_ = size;
   name_ = name;
+  (void)os_handle_;  // unsupported
   return reinterpret_cast<uptr>(base_);
 }
 

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc?rev=319083&r1=319082&r2=319083&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc Mon Nov 27 11:53:53 2017
@@ -286,6 +286,7 @@ uptr ReservedAddressRange::Init(uptr siz
   }
   size_ = size;
   name_ = name;
+  (void)os_handle_;  // unsupported
   return reinterpret_cast<uptr>(base_);
 }
 




More information about the llvm-commits mailing list