[llvm] Implement reserveAllocationSpace for SectionMemoryManager (PR #71968)

Graham Markall via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 17 11:58:21 PST 2023


================
@@ -18,6 +18,96 @@
 
 namespace llvm {
 
+bool SectionMemoryManager::hasSpace(const MemoryGroup &MemGroup,
+                                    uintptr_t Size) const {
+  for (const FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+    if (FreeMB.Free.allocatedSize() >= Size)
+      return true;
+  }
+  return false;
+}
+
+void SectionMemoryManager::reserveAllocationSpace(
+    uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize,
+    Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) {
+  if (CodeSize == 0 && RODataSize == 0 && RWDataSize == 0)
+    return;
+
+  static const size_t PageSize = sys::Process::getPageSizeEstimate();
+
+  // Code alignment needs to be at least the stub alignment - however, we
+  // don't have an easy way to get that here so as a workaround, we assume
+  // it's 8, which is the largest value I observed across all platforms.
+  constexpr uint64_t StubAlign = 8;
+  CodeAlign = Align(std::max(CodeAlign.value(), StubAlign));
+  RODataAlign = Align(std::max(RODataAlign.value(), StubAlign));
+  RWDataAlign = Align(std::max(RWDataAlign.value(), StubAlign));
+
+  // Get space required for each section. Use the same calculation as
+  // allocateSection because we need to be able to satisfy it.
+  uint64_t RequiredCodeSize = alignTo(CodeSize, CodeAlign) + CodeAlign.value();
+  uint64_t RequiredRODataSize =
+      alignTo(RODataSize, RODataAlign) + RODataAlign.value();
+  uint64_t RequiredRWDataSize =
+      alignTo(RWDataSize, RWDataAlign) + RWDataAlign.value();
+
+  if (hasSpace(CodeMem, RequiredCodeSize) &&
+      hasSpace(RODataMem, RequiredRODataSize) &&
+      hasSpace(RWDataMem, RequiredRWDataSize)) {
+    // Sufficient space in contiguous block already available.
+    return;
+  }
+
+  // MemoryManager does not have functions for releasing memory after it's
+  // allocated. Normally it tries to use any excess blocks that were allocated
+  // due to page alignment, but if we have insufficient free memory for the
+  // request this can lead to allocating disparate memory that can violate the
+  // ARM ABI. Clear free memory so only the new allocations are used, but do
+  // not release allocated memory as it may still be in-use.
+  CodeMem.FreeMem.clear();
+  RODataMem.FreeMem.clear();
+  RWDataMem.FreeMem.clear();
+
+  // Round up to the nearest page size. Blocks must be page-aligned.
+  RequiredCodeSize = alignTo(RequiredCodeSize, PageSize);
+  RequiredRODataSize = alignTo(RequiredRODataSize, PageSize);
+  RequiredRWDataSize = alignTo(RequiredCodeSize, PageSize);
----------------
gmarkall wrote:

In the sense that only the code alignment needs to be at least the stub alignment?

https://github.com/llvm/llvm-project/pull/71968


More information about the llvm-commits mailing list