[compiler-rt] [compiler-rt][fuchsia] Preallocate a vmar for sanitizer internals (PR #75256)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 11 12:06:03 PST 2024


https://github.com/PiJoules updated https://github.com/llvm/llvm-project/pull/75256

>From b7acfdc358e304cca37cce1085b6c009faa453b1 Mon Sep 17 00:00:00 2001
From: Leonard Chan <leonardchan at google.com>
Date: Tue, 12 Dec 2023 15:58:28 -0800
Subject: [PATCH] [compiler-rt][fuchsia] Preallocate a vmar for sanitizer
 internals

In an effort to reduce more mmap fragmentation, allocate a large enough
vmar where we can map sanitizer internals via DoAnonymousMmap. Objects
being mapped here include asan's FakeStack, LowLevelAllocator mappings,
the primary allocator's TwoLevelMap, InternalMmapVector, StackStore, and
asan's thread internals. The vmar is large enough to hold the total size
of these objects seen in a "typical" process lifetime. If the vmar is
full, it will fallback to mapping in the root vmar.
---
 .../sanitizer_common/sanitizer_fuchsia.cpp    | 77 +++++++++++++++----
 1 file changed, 63 insertions(+), 14 deletions(-)

diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index 0245164403c51d..f8a196cd63a5a1 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -129,6 +129,55 @@ uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
 
 bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; }
 
+// For any sanitizer internal that needs to map something which can be unmapped
+// later, first attempt to map to a pre-allocated VMAR. This helps reduce
+// fragmentation from many small anonymous mmap calls. A good value for this
+// VMAR size would be the total size of your typical sanitizer internal objects
+// allocated in an "average" process lifetime. Examples of this include:
+// FakeStack, LowLevelAllocator mappings, TwoLevelMap, InternalMmapVector,
+// StackStore, CreateAsanThread, etc.
+//
+// This is roughly equal to the total sum of sanitizer internal mappings for a
+// large test case.
+constexpr size_t kSanitizerHeapVmarSize = 13ULL << 20;
+static zx_handle_t gSanitizerHeapVmar = ZX_HANDLE_INVALID;
+
+static zx_status_t GetSanitizerHeapVmar(zx_handle_t *vmar) {
+  zx_status_t status = ZX_OK;
+  if (gSanitizerHeapVmar == ZX_HANDLE_INVALID) {
+    CHECK_EQ(kSanitizerHeapVmarSize % GetPageSizeCached(), 0);
+    uintptr_t base;
+    status = _zx_vmar_allocate(
+        _zx_vmar_root_self(),
+        ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+        kSanitizerHeapVmarSize, &gSanitizerHeapVmar, &base);
+  }
+  *vmar = gSanitizerHeapVmar;
+  if (status == ZX_OK)
+    CHECK_NE(gSanitizerHeapVmar, ZX_HANDLE_INVALID);
+  return status;
+}
+
+static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options,
+                                          size_t vmar_offset, zx_handle_t vmo,
+                                          size_t size, uintptr_t *addr) {
+  zx_handle_t vmar;
+  zx_status_t status = GetSanitizerHeapVmar(&vmar);
+  if (status != ZX_OK)
+    return status;
+
+  status = _zx_vmar_map(gSanitizerHeapVmar, options, vmar_offset, vmo,
+                        /*vmo_offset=*/0, size, addr);
+  if (status == ZX_ERR_NO_RESOURCES) {
+    // This means there's no space in the heap VMAR, so fallback to the root
+    // VMAR.
+    status = _zx_vmar_map(_zx_vmar_root_self(), options, vmar_offset, vmo,
+                          /*vmo_offset=*/0, size, addr);
+  }
+
+  return status;
+}
+
 static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
                                   bool raw_report, bool die_for_nomem) {
   size = RoundUpTo(size, GetPageSize());
@@ -144,11 +193,9 @@ static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
   _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
                           internal_strlen(mem_type));
 
-  // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
   uintptr_t addr;
-  status =
-      _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
-                   vmo, 0, size, &addr);
+  status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
+                                  /*vmar_offset=*/0, vmo, size, &addr);
   _zx_handle_close(vmo);
 
   if (status != ZX_OK) {
@@ -243,6 +290,12 @@ void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
 
   zx_status_t status =
       _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
+  if (status == ZX_ERR_INVALID_ARGS && target_vmar == gSanitizerHeapVmar) {
+    // If there wasn't any space in the heap vmar, the fallback was the root
+    // vmar.
+    status = _zx_vmar_unmap(_zx_vmar_root_self(),
+                            reinterpret_cast<uintptr_t>(addr), size);
+  }
   if (status != ZX_OK) {
     Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
            SanitizerToolName, size, size, addr);
@@ -308,17 +361,14 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
   _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
                           internal_strlen(mem_type));
 
-  // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
-
   // Map a larger size to get a chunk of address space big enough that
   // it surely contains an aligned region of the requested size.  Then
   // overwrite the aligned middle portion with a mapping from the
   // beginning of the VMO, and unmap the excess before and after.
   size_t map_size = size + alignment;
   uintptr_t addr;
-  status =
-      _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
-                   vmo, 0, map_size, &addr);
+  status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
+                                  /*vmar_offset=*/0, vmo, map_size, &addr);
   if (status == ZX_OK) {
     uintptr_t map_addr = addr;
     uintptr_t map_end = map_addr + map_size;
@@ -326,14 +376,13 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
     uintptr_t end = addr + size;
     if (addr != map_addr) {
       zx_info_vmar_t info;
-      status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
+      status = _zx_object_get_info(gSanitizerHeapVmar, ZX_INFO_VMAR, &info,
                                    sizeof(info), NULL, NULL);
       if (status == ZX_OK) {
         uintptr_t new_addr;
-        status = _zx_vmar_map(
-            _zx_vmar_root_self(),
+        status = TryVmoMapSanitizerVmar(
             ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
-            addr - info.base, vmo, 0, size, &new_addr);
+            addr - info.base, vmo, size, &new_addr);
         if (status == ZX_OK)
           CHECK_EQ(new_addr, addr);
       }
@@ -357,7 +406,7 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
 }
 
 void UnmapOrDie(void *addr, uptr size) {
-  UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
+  UnmapOrDieVmar(addr, size, gSanitizerHeapVmar);
 }
 
 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {



More information about the llvm-commits mailing list