[compiler-rt] 612e02e - [GWP-ASan] Refactor memory mapping functions

Kostya Kortchinsky via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 26 13:32:27 PDT 2020


Author: Kostya Kortchinsky
Date: 2020-10-26T13:32:08-07:00
New Revision: 612e02ee8c3e8f204378796af2eb526cf5e348f0

URL: https://github.com/llvm/llvm-project/commit/612e02ee8c3e8f204378796af2eb526cf5e348f0
DIFF: https://github.com/llvm/llvm-project/commit/612e02ee8c3e8f204378796af2eb526cf5e348f0.diff

LOG: [GWP-ASan] Refactor memory mapping functions

In preparation for Fuchsia support, this CL refactors the memory
mapping functions.

The new functions are as follows:
- for Freeslots and Metadata:
  `void *map(size_t Size, const char *Name) const;`
  `void unmap(void *Ptr, size_t Size) const;`
- for the Pool:
  `void *reservePool(size_t Size);`
  `void commitPool(void *Ptr, size_t Size) const;`
  `void decommitPool(void *Ptr, size_t Size) const;`
  `void unreservePool();`
  Note that those don't need a `Name` parameter as those are fixed per
  function. `{reserve,unreserve}Pool` are not `const` because they will
  modify platform specific class member on Fuchsia.

I added a plethora of `assert()` as the initial code was not enforcing
page alignment for sizes and addresses, which caused problem in the
initial Fuchsia draft. All sizes should now be properly rounded up to
a page.

Differential Revision: https://reviews.llvm.org/D89993

Added: 
    

Modified: 
    compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
    compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
    compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
index 17e6f76727f9..6c4f2b87007a 100644
--- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
+++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
@@ -45,6 +45,10 @@ GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() {
   return SingletonPtr;
 }
 
+static size_t roundUpTo(size_t Size, size_t Boundary) {
+  return (Size + Boundary - 1) & ~(Boundary - 1);
+}
+
 void GuardedPoolAllocator::init(const options::Options &Opts) {
   // Note: We return from the constructor here if GWP-ASan is not available.
   // This will stop heap-allocation of class members, as well as mmap() of the
@@ -63,25 +67,29 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
 
   State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
 
-  State.PageSize = getPlatformPageSize();
+  const size_t PageSize = getPlatformPageSize();
+  // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
+  assert((PageSize & (PageSize - 1)) == 0);
+  State.PageSize = PageSize;
 
   PerfectlyRightAlign = Opts.PerfectlyRightAlign;
 
   size_t PoolBytesRequired =
-      State.PageSize * (1 + State.MaxSimultaneousAllocations) +
+      PageSize * (1 + State.MaxSimultaneousAllocations) +
       State.MaxSimultaneousAllocations * State.maximumAllocationSize();
-  void *GuardedPoolMemory = mapMemory(PoolBytesRequired, kGwpAsanGuardPageName);
+  assert(PoolBytesRequired % PageSize == 0);
+  void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
 
-  size_t BytesRequired = State.MaxSimultaneousAllocations * sizeof(*Metadata);
+  size_t BytesRequired =
+      roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
   Metadata = reinterpret_cast<AllocationMetadata *>(
-      mapMemory(BytesRequired, kGwpAsanMetadataName));
-  markReadWrite(Metadata, BytesRequired, kGwpAsanMetadataName);
+      map(BytesRequired, kGwpAsanMetadataName));
 
   // Allocate memory and set up the free pages queue.
-  BytesRequired = State.MaxSimultaneousAllocations * sizeof(*FreeSlots);
-  FreeSlots = reinterpret_cast<size_t *>(
-      mapMemory(BytesRequired, kGwpAsanFreeSlotsName));
-  markReadWrite(FreeSlots, BytesRequired, kGwpAsanFreeSlotsName);
+  BytesRequired = roundUpTo(
+      State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
+  FreeSlots =
+      reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
 
   // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
   // SampleRate) chance of sampling.
@@ -120,21 +128,20 @@ void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
 
 void GuardedPoolAllocator::uninitTestOnly() {
   if (State.GuardedPagePool) {
-    unmapMemory(reinterpret_cast<void *>(State.GuardedPagePool),
-                State.GuardedPagePoolEnd - State.GuardedPagePool,
-                kGwpAsanGuardPageName);
+    unreserveGuardedPool();
     State.GuardedPagePool = 0;
     State.GuardedPagePoolEnd = 0;
   }
   if (Metadata) {
-    unmapMemory(Metadata, State.MaxSimultaneousAllocations * sizeof(*Metadata),
-                kGwpAsanMetadataName);
+    unmap(Metadata,
+          roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
+                    State.PageSize));
     Metadata = nullptr;
   }
   if (FreeSlots) {
-    unmapMemory(FreeSlots,
-                State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
-                kGwpAsanFreeSlotsName);
+    unmap(FreeSlots,
+          roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
+                    State.PageSize));
     FreeSlots = nullptr;
   }
 }
@@ -184,8 +191,9 @@ void *GuardedPoolAllocator::allocate(size_t Size) {
   // If a slot is multiple pages in size, and the allocation takes up a single
   // page, we can improve overflow detection by leaving the unused pages as
   // unmapped.
-  markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr, State.PageSize)),
-                Size, kGwpAsanAliveSlotName);
+  const size_t PageSize = State.PageSize;
+  allocateInGuardedPool(reinterpret_cast<void *>(getPageAddr(Ptr, PageSize)),
+                        roundUpTo(Size, PageSize));
 
   Meta->RecordAllocation(Ptr, Size);
   Meta->AllocationTrace.RecordBacktrace(Backtrace);
@@ -241,8 +249,8 @@ void GuardedPoolAllocator::deallocate(void *Ptr) {
     }
   }
 
-  markInaccessible(reinterpret_cast<void *>(SlotStart),
-                   State.maximumAllocationSize(), kGwpAsanGuardPageName);
+  deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
+                          State.maximumAllocationSize());
 
   // And finally, lock again to release the slot back into the pool.
   ScopedLock L(PoolMutex);

diff  --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
index 801f33e3f9ed..294a5b4143f5 100644
--- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
+++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
@@ -124,15 +124,30 @@ class GuardedPoolAllocator {
   // memory into this process in a platform-specific way. Pointer and size
   // arguments are expected to be page-aligned. These functions will never
   // return on error, instead electing to kill the calling process on failure.
-  // Note that memory is initially mapped inaccessible. In order for RW
-  // mappings, call mapMemory() followed by markReadWrite() on the returned
-  // pointer. Each mapping is named on platforms that support it, primarily
-  // Android. This name must be a statically allocated string, as the Android
-  // kernel uses the string pointer directly.
-  void *mapMemory(size_t Size, const char *Name) const;
-  void unmapMemory(void *Ptr, size_t Size, const char *Name) const;
-  void markReadWrite(void *Ptr, size_t Size, const char *Name) const;
-  void markInaccessible(void *Ptr, size_t Size, const char *Name) const;
+  // The pool memory is initially reserved and inaccessible, and RW mappings are
+  // subsequently created and destroyed via allocateInGuardedPool() and
+  // deallocateInGuardedPool(). Each mapping is named on platforms that support
+  // it, primarily Android. This name must be a statically allocated string, as
+  // the Android kernel uses the string pointer directly.
+  void *map(size_t Size, const char *Name) const;
+  void unmap(void *Ptr, size_t Size) const;
+
+  // The pool is managed separately, as some platforms (particularly Fuchsia)
+  // manage virtual memory regions as a chunk where individual pages can still
+  // have separate permissions. These platforms maintain metadata about the
+  // region in order to perform operations. The pool is unique as it's the only
+  // thing in GWP-ASan that treats pages in a single VM region on an individual
+  // basis for page protection.
+  // The pointer returned by reserveGuardedPool() is the reserved address range
+  // of (at least) Size bytes.
+  void *reserveGuardedPool(size_t Size);
+  // allocateInGuardedPool() Ptr and Size must be a subrange of the previously
+  // reserved pool range.
+  void allocateInGuardedPool(void *Ptr, size_t Size) const;
+  // deallocateInGuardedPool() Ptr and Size must be an exact pair previously
+  // passed to allocateInGuardedPool().
+  void deallocateInGuardedPool(void *Ptr, size_t Size) const;
+  void unreserveGuardedPool();
 
   // Get the page size from the platform-specific implementation. Only needs to
   // be called once, and the result should be cached in PageSize in this class.

diff  --git a/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp b/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
index c2cd24454fa2..21b622c56729 100644
--- a/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
+++ b/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
@@ -25,6 +25,7 @@
 #define PR_SET_VMA_ANON_NAME 0
 #endif // ANDROID
 
+namespace {
 void MaybeSetMappingName(void *Mapping, size_t Size, const char *Name) {
 #ifdef ANDROID
   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, Mapping, Size, Name);
@@ -32,44 +33,64 @@ void MaybeSetMappingName(void *Mapping, size_t Size, const char *Name) {
   // Anonymous mapping names are only supported on Android.
   return;
 }
+} // anonymous namespace
 
 namespace gwp_asan {
 
 void GuardedPoolAllocator::initPRNG() {
-  ThreadLocals.RandomState = time(nullptr) + getThreadID();
+  ThreadLocals.RandomState =
+      static_cast<uint32_t>(time(nullptr) + getThreadID());
 }
 
-void *GuardedPoolAllocator::mapMemory(size_t Size, const char *Name) const {
-  void *Ptr =
-      mmap(nullptr, Size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+void *GuardedPoolAllocator::map(size_t Size, const char *Name) const {
+  assert((Size % State.PageSize) == 0);
+  void *Ptr = mmap(nullptr, Size, PROT_READ | PROT_WRITE,
+                   MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
   Check(Ptr != MAP_FAILED, "Failed to map guarded pool allocator memory");
   MaybeSetMappingName(Ptr, Size, Name);
   return Ptr;
 }
 
-void GuardedPoolAllocator::unmapMemory(void *Ptr, size_t Size,
-                                       const char *Name) const {
+void GuardedPoolAllocator::unmap(void *Ptr, size_t Size) const {
+  assert((reinterpret_cast<uintptr_t>(Ptr) % State.PageSize) == 0);
+  assert((Size % State.PageSize) == 0);
   Check(munmap(Ptr, Size) == 0,
         "Failed to unmap guarded pool allocator memory.");
-  MaybeSetMappingName(Ptr, Size, Name);
 }
 
-void GuardedPoolAllocator::markReadWrite(void *Ptr, size_t Size,
-                                         const char *Name) const {
+void *GuardedPoolAllocator::reserveGuardedPool(size_t Size) {
+  assert((Size % State.PageSize) == 0);
+  void *Ptr =
+      mmap(nullptr, Size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+  Check(Ptr != MAP_FAILED, "Failed to reserve guarded pool allocator memory");
+  MaybeSetMappingName(Ptr, Size, kGwpAsanGuardPageName);
+  return Ptr;
+}
+
+void GuardedPoolAllocator::unreserveGuardedPool() {
+  unmap(reinterpret_cast<void *>(State.GuardedPagePool),
+        State.GuardedPagePoolEnd - State.GuardedPagePool);
+}
+
+void GuardedPoolAllocator::allocateInGuardedPool(void *Ptr, size_t Size) const {
+  assert((reinterpret_cast<uintptr_t>(Ptr) % State.PageSize) == 0);
+  assert((Size % State.PageSize) == 0);
   Check(mprotect(Ptr, Size, PROT_READ | PROT_WRITE) == 0,
-        "Failed to set guarded pool allocator memory at as RW.");
-  MaybeSetMappingName(Ptr, Size, Name);
+        "Failed to allocate in guarded pool allocator memory");
+  MaybeSetMappingName(Ptr, Size, kGwpAsanAliveSlotName);
 }
 
-void GuardedPoolAllocator::markInaccessible(void *Ptr, size_t Size,
-                                            const char *Name) const {
+void GuardedPoolAllocator::deallocateInGuardedPool(void *Ptr,
+                                                   size_t Size) const {
+  assert((reinterpret_cast<uintptr_t>(Ptr) % State.PageSize) == 0);
+  assert((Size % State.PageSize) == 0);
   // mmap() a PROT_NONE page over the address to release it to the system, if
   // we used mprotect() here the system would count pages in the quarantine
   // against the RSS.
   Check(mmap(Ptr, Size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1,
              0) != MAP_FAILED,
-        "Failed to set guarded pool allocator memory as inaccessible.");
-  MaybeSetMappingName(Ptr, Size, Name);
+        "Failed to deallocate in guarded pool allocator memory");
+  MaybeSetMappingName(Ptr, Size, kGwpAsanGuardPageName);
 }
 
 size_t GuardedPoolAllocator::getPlatformPageSize() {
@@ -87,5 +108,4 @@ void GuardedPoolAllocator::installAtFork() {
   };
   pthread_atfork(Disable, Enable, Enable);
 }
-
 } // namespace gwp_asan


        


More information about the llvm-commits mailing list