[llvm] Implement reserveAllocationSpace for SectionMemoryManager (PR #71968)
Michael Smith via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 10 15:02:29 PST 2023
https://github.com/MikaelSmith updated https://github.com/llvm/llvm-project/pull/71968
>From c04dc5a625e06fc87e708489a28cd5b2c4f4a5d6 Mon Sep 17 00:00:00 2001
From: Michael Smith <michael.smith at cloudera.com>
Date: Fri, 10 Nov 2023 09:48:28 -0800
Subject: [PATCH] Implement reserveAllocationSpace for SectionMemoryManager
Implements `reserveAllocationSpace` and provides an option to enable
`needsToReserveAllocationSpace` for large-memory environments with
AArch64.
The [AArch64 ABI](https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst#7code-models)
has restrictions on the distance between TEXT and GOT sections as the
instructions to reference them are limited to 2 or 4GB. Allocating
sections in multiple blocks can result in distances greater than that
on systems with lots of memory. In those environments several projects
using SectionMemoryManager with MCJIT have run across assertion failures
for the R_AARCH64_ADR_PREL_PG_HI21 instruction as it attempts to address
across distances greater than 2GB (an int32).
Fixes #71963 by allocating all sections in a single contiguous memory
allocation, limiting the distance required for instruction offsets
similar to how pre-compiled binaries would be loaded into memory.
TODO: add tests to MCJITMemoryManagerTest.
---
.../ExecutionEngine/SectionMemoryManager.h | 14 +++-
.../ExecutionEngine/SectionMemoryManager.cpp | 66 ++++++++++++++++++-
2 files changed, 77 insertions(+), 3 deletions(-)
diff --git a/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h b/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h
index fa1b2355528dd0b..4596b69855796b1 100644
--- a/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h
+++ b/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h
@@ -104,11 +104,22 @@ class SectionMemoryManager : public RTDyldMemoryManager {
/// Creates a SectionMemoryManager instance with \p MM as the associated
/// memory mapper. If \p MM is nullptr then a default memory mapper is used
/// that directly calls into the operating system.
- SectionMemoryManager(MemoryMapper *MM = nullptr);
+ SectionMemoryManager(MemoryMapper *MM = nullptr, bool ReserveAlloc = false);
SectionMemoryManager(const SectionMemoryManager &) = delete;
void operator=(const SectionMemoryManager &) = delete;
~SectionMemoryManager() override;
+ /// Provides an option to reserveAllocationSpace and pre-allocate all memory
+ /// in a single block. This is required for ARM where ADRP instructions have a
+ /// limit of 4GB offsets. Large memory systems may allocate sections further
+ /// apart than this unless we pre-allocate.
+ bool needsToReserveAllocationSpace() override { return ReserveAllocation; }
+
+ /// Implements pre-allocating all memory in a single block.
+ void reserveAllocationSpace(uintptr_t CodeSize, Align CodeAlign,
+ uintptr_t RODataSize, Align RODataAlign,
+ uintptr_t RWDataSize, Align RWDataAlign) override;
+
/// Allocates a memory block of (at least) the given size suitable for
/// executable code.
///
@@ -187,6 +198,7 @@ class SectionMemoryManager : public RTDyldMemoryManager {
MemoryGroup RODataMem;
MemoryMapper *MMapper;
std::unique_ptr<MemoryMapper> OwnedMMapper;
+ bool ReserveAllocation;
};
} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
index 436888730bfb246..a2c45bd1d5021d3 100644
--- a/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
+++ b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
@@ -18,6 +18,63 @@
namespace llvm {
+static uint64_t requiredPageSize(uintptr_t Size, Align Alignment) {
+ static const size_t PageSize = sys::Process::getPageSizeEstimate();
+ // Use the same calculation as allocateSection because we need to be able to
+ // satisfy it.
+ uint64_t RequiredSize = alignTo(Size, Alignment);
+ // Round up to the nearest page size. Blocks must be page-aligned.
+ return PageSize * ((RequiredSize + PageSize - 1) / PageSize);
+}
+
+void SectionMemoryManager::reserveAllocationSpace(
+ uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize,
+ Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) {
+ if (CodeSize == 0 && RODataSize == 0 && RWDataSize == 0)
+ return;
+
+ // Get space required for each section.
+ uint64_t RequiredCodeSize = requiredPageSize(CodeSize, CodeAlign);
+ uint64_t RequiredRODataSize = requiredPageSize(RODataSize, RODataAlign);
+ uint64_t RequiredRWDataSize = requiredPageSize(RWDataSize, RWDataAlign);
+ uint64_t RequiredSize =
+ RequiredCodeSize + RequiredRODataSize + RequiredRWDataSize;
+
+ std::error_code ec;
+ sys::MemoryBlock MB = sys::Memory::allocateMappedMemory(
+ RequiredSize, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
+ if (ec) {
+ return;
+ }
+ // Request is page-aligned, so we should always get back exactly the request.
+ assert(MB.allocatedSize() == RequiredSize);
+ // CodeMem will arbitrarily own this MemoryBlock to handle cleanup.
+ CodeMem.AllocatedMem.push_back(MB);
+ uintptr_t Addr = (uintptr_t)MB.base();
+ FreeMemBlock FreeMB;
+ FreeMB.PendingPrefixIndex = (unsigned)-1;
+
+ if (CodeSize > 0) {
+ assert(isAddrAligned(CodeAlign, (void *)Addr));
+ FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredCodeSize);
+ CodeMem.FreeMem.push_back(FreeMB);
+ Addr += RequiredCodeSize;
+ }
+
+ if (RODataSize > 0) {
+ assert(isAddrAligned(RODataAlign, (void *)Addr));
+ FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRODataSize);
+ RODataMem.FreeMem.push_back(FreeMB);
+ Addr += RequiredRODataSize;
+ }
+
+ if (RWDataSize > 0) {
+ assert(isAddrAligned(RWDataAlign, (void *)Addr));
+ FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRWDataSize);
+ RWDataMem.FreeMem.push_back(FreeMB);
+ }
+}
+
uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
unsigned Alignment,
unsigned SectionID,
@@ -91,6 +148,9 @@ uint8_t *SectionMemoryManager::allocateSection(
}
}
+ // All memory should be pre-allocated if needsToReserveAllocationSpace().
+ assert(!needsToReserveAllocationSpace());
+
// No pre-allocated free block was large enough. Allocate a new memory region.
// Note that all sections get allocated as read-write. The permissions will
// be updated later based on memory group.
@@ -265,8 +325,10 @@ class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
};
} // namespace
-SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM)
- : MMapper(UnownedMM), OwnedMMapper(nullptr) {
+SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM,
+ bool ReserveAlloc)
+ : MMapper(UnownedMM), OwnedMMapper(nullptr),
+ ReserveAllocation(ReserveAlloc) {
if (!MMapper) {
OwnedMMapper = std::make_unique<DefaultMMapper>();
MMapper = OwnedMMapper.get();
More information about the llvm-commits
mailing list