[llvm] Implement reserveAllocationSpace for SectionMemoryManager (PR #71968)

Michael Smith via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 10 10:00:48 PST 2023


https://github.com/MikaelSmith created https://github.com/llvm/llvm-project/pull/71968

Implements `reserveAllocationSpace` and provides an option to enable `needsToReserveAllocationSpace` for large-memory environments with AArch64.

The [AArch64 ABI](https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst#7code-models) has restrictions on the distance between TEXT and GOT sections as the instructions to reference them are limited to 2 or 4GB. Allocating sections in multiple blocks can result in distances greater than that on systems with lots of memory. In those environments several projects using SectionMemoryManager with MCJIT have run across assertion failures for the R_AARCH64_ADR_PREL_PG_HI21 instruction as it attempts to address across distances greater than 2GB (an int32).

Fixes #71963 by allocating all sections in a single contiguous memory allocation, limiting the distance required for instruction offsets similar to how pre-compiled binaries would be loaded into memory.

TODO: add tests to MCJITMemoryManagerTest.

>From 24ebe923881e1f252f4ad64a533d816390418a6d Mon Sep 17 00:00:00 2001
From: Michael Smith <michael.smith at cloudera.com>
Date: Fri, 10 Nov 2023 09:48:28 -0800
Subject: [PATCH] Implement reserveAllocationSpace for SectionMemoryManager

Implements `reserveAllocationSpace` and provides an option to enable
`needsToReserveAllocationSpace` for large-memory environments with
AArch64.

The [AArch64 ABI](https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst#7code-models)
has restrictions on the distance between TEXT and GOT sections as the
instructions to reference them are limited to 2 or 4GB. Allocating
sections in multiple blocks can result in distances greater than that
on systems with lots of memory. In those environments several projects
using SectionMemoryManager with MCJIT have run across assertion failures
for the R_AARCH64_ADR_PREL_PG_HI21 instruction as it attempts to address
across distances greater than 2GB (an int32).

Fixes #71963 by allocating all sections in a single contiguous memory
allocation, limiting the distance required for instruction offsets
similar to how pre-compiled binaries would be loaded into memory.

TODO: add tests to MCJITMemoryManagerTest.
---
 .../ExecutionEngine/SectionMemoryManager.h    | 14 +++-
 .../ExecutionEngine/SectionMemoryManager.cpp  | 66 ++++++++++++++++++-
 2 files changed, 77 insertions(+), 3 deletions(-)

diff --git a/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h b/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h
index fa1b2355528dd0b..a53a6e7e6a5c0aa 100644
--- a/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h
+++ b/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h
@@ -104,11 +104,22 @@ class SectionMemoryManager : public RTDyldMemoryManager {
   /// Creates a SectionMemoryManager instance with \p MM as the associated
   /// memory mapper.  If \p MM is nullptr then a default memory mapper is used
   /// that directly calls into the operating system.
-  SectionMemoryManager(MemoryMapper *MM = nullptr);
+  SectionMemoryManager(MemoryMapper *MM = nullptr, bool ReserveAlloc = false);
   SectionMemoryManager(const SectionMemoryManager &) = delete;
   void operator=(const SectionMemoryManager &) = delete;
   ~SectionMemoryManager() override;
 
+  /// Provides an option to reserveAllocationSpace and pre-allocate all memory
+  /// in a single block. This is required for ARM where ADRP instructions have a
+  /// limit of 4GB offsets. Large memory systems may allocate sections further
+  /// apart than this unless we pre-allocate.
+  bool needsToReserveAllocationSpace() override { return ReserveAllocation; }
+
+  /// Implements pre-allocating all memory in a single block.
+  void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign,
+      uintptr_t RODataSize, uint32_t RODataAlign,
+      uintptr_t RWDataSize, uint32_t RWDataAlign) override;
+
   /// Allocates a memory block of (at least) the given size suitable for
   /// executable code.
   ///
@@ -187,6 +198,7 @@ class SectionMemoryManager : public RTDyldMemoryManager {
   MemoryGroup RODataMem;
   MemoryMapper *MMapper;
   std::unique_ptr<MemoryMapper> OwnedMMapper;
+  bool ReserveAllocation;
 };
 
 } // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
index 436888730bfb246..073b38de11a41d5 100644
--- a/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
+++ b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
@@ -18,6 +18,63 @@
 
 namespace llvm {
 
+static uintptr_t requiredPageSize(uintptr_t Size, uint32_t Alignment) {
+  static const size_t PageSize = sys::Process::getPageSize();
+  // Use the same calculation as allocateSection because we need to be able to satisfy it.
+  uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1)/Alignment + 1);
+  // Round up to the nearest page size. Blocks must be page-aligned.
+  return PageSize * ((RequiredSize + PageSize - 1)/PageSize);
+}
+
+void SectionMemoryManager::reserveAllocationSpace(
+    uintptr_t CodeSize, uint32_t CodeAlign, uintptr_t RODataSize, uint32_t RODataAlign,
+    uintptr_t RWDataSize, uint32_t RWDataAlign) {
+  if (CodeSize == 0 && RODataSize == 0 && RWDataSize == 0) return;
+
+  // Get space required for each section.
+  CodeAlign = CodeAlign ? CodeAlign : 16;
+  RODataAlign = RODataAlign ? RODataAlign : 16;
+  RWDataAlign = RWDataAlign ? RWDataAlign : 16;
+  uintptr_t RequiredCodeSize = requiredPageSize(CodeSize, CodeAlign);
+  uintptr_t RequiredRODataSize = requiredPageSize(RODataSize, RODataAlign);
+  uintptr_t RequiredRWDataSize = requiredPageSize(RWDataSize, RWDataAlign);
+  uintptr_t RequiredSize = RequiredCodeSize + RequiredRODataSize + RequiredRWDataSize;
+
+  std::error_code ec;
+  sys::MemoryBlock MB = sys::Memory::allocateMappedMemory(RequiredSize, nullptr,
+      sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
+  if (ec) {
+    return;
+  }
+  // Request is page-aligned, so we should always get back exactly the request.
+  assert(MB.size() == RequiredSize);
+  // CodeMem will arbitrarily own this MemoryBlock to handle cleanup.
+  CodeMem.AllocatedMem.push_back(MB);
+  uintptr_t Addr = (uintptr_t)MB.base();
+  FreeMemBlock FreeMB;
+  FreeMB.PendingPrefixIndex = (unsigned)-1;
+
+  if (CodeSize > 0) {
+    assert(Addr == ((Addr + CodeAlign - 1) & ~(uintptr_t)(CodeAlign - 1)));
+    FreeMB.Free = sys::MemoryBlock((void*)Addr, RequiredCodeSize);
+    CodeMem.FreeMem.push_back(FreeMB);
+    Addr += RequiredCodeSize;
+  }
+
+  if (RODataSize > 0) {
+    assert(Addr == ((Addr + RODataAlign - 1) & ~(uintptr_t)(RODataAlign - 1)));
+    FreeMB.Free = sys::MemoryBlock((void*)Addr, RequiredRODataSize);
+    RODataMem.FreeMem.push_back(FreeMB);
+    Addr += RequiredRODataSize;
+  }
+
+  if (RWDataSize > 0) {
+    assert(Addr == ((Addr + RWDataAlign - 1) & ~(uintptr_t)(RWDataAlign - 1)));
+    FreeMB.Free = sys::MemoryBlock((void*)Addr, RequiredRWDataSize);
+    RWDataMem.FreeMem.push_back(FreeMB);
+  }
+}
+
 uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
                                                    unsigned Alignment,
                                                    unsigned SectionID,
@@ -91,6 +148,9 @@ uint8_t *SectionMemoryManager::allocateSection(
     }
   }
 
+  // All memory should be pre-allocated if needsToReserveAllocationSpace().
+  assert(!needsToReserveAllocationSpace());
+
   // No pre-allocated free block was large enough. Allocate a new memory region.
   // Note that all sections get allocated as read-write.  The permissions will
   // be updated later based on memory group.
@@ -265,8 +325,10 @@ class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
 };
 } // namespace
 
-SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM)
-    : MMapper(UnownedMM), OwnedMMapper(nullptr) {
+SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM,
+                                           bool ReserveAlloc)
+    : MMapper(UnownedMM), OwnedMMapper(nullptr),
+      ReserveAllocation(ReserveAlloc) {
   if (!MMapper) {
     OwnedMMapper = std::make_unique<DefaultMMapper>();
     MMapper = OwnedMMapper.get();



More information about the llvm-commits mailing list