[llvm] r355065 - [Memory] Add basic support for large/huge memory pages
Alexandre Ganea via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 27 18:47:34 PST 2019
Author: aganea
Date: Wed Feb 27 18:47:34 2019
New Revision: 355065
URL: http://llvm.org/viewvc/llvm-project?rev=355065&view=rev
Log:
[Memory] Add basic support for large/huge memory pages
This patch introduces Memory::MF_HUGE_HINT which indicates that allocateMappedMemory() shall return a pointer to a large memory page.
However the flag is a hint because we're not guaranteed in any way that we will get back a large memory page. There are several restrictions:
- Large/huge memory pages aren't enabled by default on modern OSes (Windows 10 and Linux at least), and should be manually enabled/reserved.
- Once enabled, it should be kept in mind that large pages are physical only, they can't be swapped.
- Memory fragmentation can affect the availability of large pages, especially after running the OS for a long time and/or running along many other applications.
Memory::allocateMappedMemory() will fallback to 4KB pages if it can't allocate 2MB large pages (if Memory::MF_HUGE_HINT is provided)
Currently, Memory::MF_HUGE_HINT only works on Windows. The hint will be ignored on Linux, 4KB pages will always be returned.
Differential Revision: https://reviews.llvm.org/D58718
Modified:
llvm/trunk/include/llvm/Support/Memory.h
llvm/trunk/lib/Support/Unix/Memory.inc
llvm/trunk/lib/Support/Windows/Memory.inc
llvm/trunk/unittests/Support/MemoryTest.cpp
Modified: llvm/trunk/include/llvm/Support/Memory.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Support/Memory.h?rev=355065&r1=355064&r2=355065&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Support/Memory.h (original)
+++ llvm/trunk/include/llvm/Support/Memory.h Wed Feb 27 18:47:34 2019
@@ -35,6 +35,7 @@ namespace sys {
private:
void *Address; ///< Address of first byte of memory area
size_t Size; ///< Size, in bytes of the memory area
+ unsigned Flags = 0;
friend class Memory;
};
@@ -45,9 +46,11 @@ namespace sys {
class Memory {
public:
enum ProtectionFlags {
- MF_READ = 0x1000000,
+ MF_READ = 0x1000000,
MF_WRITE = 0x2000000,
- MF_EXEC = 0x4000000
+ MF_EXEC = 0x4000000,
+ MF_RWE_MASK = 0x7000000,
+ MF_HUGE_HINT = 0x0000001
};
/// This method allocates a block of memory that is suitable for loading
Modified: llvm/trunk/lib/Support/Unix/Memory.inc
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Support/Unix/Memory.inc?rev=355065&r1=355064&r2=355065&view=diff
==============================================================================
--- llvm/trunk/lib/Support/Unix/Memory.inc (original)
+++ llvm/trunk/lib/Support/Unix/Memory.inc Wed Feb 27 18:47:34 2019
@@ -45,7 +45,7 @@ extern "C" void __clear_cache(void *, vo
namespace {
int getPosixProtectionFlags(unsigned Flags) {
- switch (Flags) {
+ switch (Flags & llvm::sys::Memory::MF_RWE_MASK) {
case llvm::sys::Memory::MF_READ:
return PROT_READ;
case llvm::sys::Memory::MF_WRITE:
@@ -114,6 +114,7 @@ Memory::allocateMappedMemory(size_t NumB
if (Start && Start % PageSize)
Start += PageSize - Start % PageSize;
+ // FIXME: Handle huge page requests (MF_HUGE_HINT).
void *Addr = ::mmap(reinterpret_cast<void *>(Start), NumBytes, Protect,
MMFlags, fd, 0);
if (Addr == MAP_FAILED) {
Modified: llvm/trunk/lib/Support/Windows/Memory.inc
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Support/Windows/Memory.inc?rev=355065&r1=355064&r2=355065&view=diff
==============================================================================
--- llvm/trunk/lib/Support/Windows/Memory.inc (original)
+++ llvm/trunk/lib/Support/Windows/Memory.inc Wed Feb 27 18:47:34 2019
@@ -22,7 +22,7 @@
namespace {
DWORD getWindowsProtectionFlags(unsigned Flags) {
- switch (Flags) {
+ switch (Flags & llvm::sys::Memory::MF_RWE_MASK) {
// Contrary to what you might expect, the Windows page protection flags
// are not a bitwise combination of RWX values
case llvm::sys::Memory::MF_READ:
@@ -47,6 +47,9 @@ DWORD getWindowsProtectionFlags(unsigned
return PAGE_NOACCESS;
}
+// While we'd be happy to allocate single pages, the Windows allocation
+// granularity may be larger than a single page (in practice, it is 64K)
+// so mapping less than that will create an unreachable fragment of memory.
size_t getAllocationGranularity() {
SYSTEM_INFO Info;
::GetSystemInfo(&Info);
@@ -56,6 +59,38 @@ size_t getAllocationGranularity() {
return Info.dwAllocationGranularity;
}
+// Large/huge memory pages need explicit process permissions in order to be
+// used. See https://blogs.msdn.microsoft.com/oldnewthing/20110128-00/?p=11643
+// Also large pages need to be manually enabled on your OS. If all this is
+// sucessfull, we return the minimal large memory page size.
+static size_t enableProcessLargePages() {
+ HANDLE Token = 0;
+ size_t LargePageMin = GetLargePageMinimum();
+ if (LargePageMin)
+ OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+ &Token);
+ if (!Token)
+ return 0;
+ LUID Luid;
+ if (!LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &Luid)) {
+ CloseHandle(Token);
+ return 0;
+ }
+ TOKEN_PRIVILEGES TP{};
+ TP.PrivilegeCount = 1;
+ TP.Privileges[0].Luid = Luid;
+ TP.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+ if (!AdjustTokenPrivileges(Token, FALSE, &TP, 0, 0, 0)) {
+ CloseHandle(Token);
+ return 0;
+ }
+ DWORD E = GetLastError();
+ CloseHandle(Token);
+ if (E == ERROR_SUCCESS)
+ return LargePageMin;
+ return 0;
+}
+
} // namespace
namespace llvm {
@@ -74,19 +109,20 @@ MemoryBlock Memory::allocateMappedMemory
if (NumBytes == 0)
return MemoryBlock();
- // While we'd be happy to allocate single pages, the Windows allocation
- // granularity may be larger than a single page (in practice, it is 64K)
- // so mapping less than that will create an unreachable fragment of memory.
- // Avoid using one-time initialization of static locals here, since they
- // aren't thread safe with MSVC.
- static volatile size_t GranularityCached;
- size_t Granularity = GranularityCached;
- if (Granularity == 0) {
- Granularity = getAllocationGranularity();
- GranularityCached = Granularity;
+ static size_t DefaultGranularity = getAllocationGranularity();
+ static Optional<size_t> LargePageGranularity = enableProcessLargePages();
+
+ DWORD AllocType = MEM_RESERVE | MEM_COMMIT;
+ bool HugePages = false;
+ size_t Granularity = DefaultGranularity;
+
+ if ((Flags & MF_HUGE_HINT) && LargePageGranularity.hasValue()) {
+ AllocType |= MEM_LARGE_PAGES;
+ HugePages = true;
+ Granularity = *LargePageGranularity;
}
- const size_t NumBlocks = (NumBytes+Granularity-1)/Granularity;
+ size_t NumBlocks = (NumBytes + Granularity - 1) / Granularity;
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
NearBlock->size()
@@ -99,13 +135,12 @@ MemoryBlock Memory::allocateMappedMemory
DWORD Protect = getWindowsProtectionFlags(Flags);
- void *PA = ::VirtualAlloc(reinterpret_cast<void*>(Start),
- NumBlocks*Granularity,
- MEM_RESERVE | MEM_COMMIT, Protect);
+ void *PA = ::VirtualAlloc(reinterpret_cast<void *>(Start),
+ NumBlocks * Granularity, AllocType, Protect);
if (PA == NULL) {
- if (NearBlock) {
- // Try again without the NearBlock hint
- return allocateMappedMemory(NumBytes, NULL, Flags, EC);
+ if (NearBlock || HugePages) {
+ // Try again without the NearBlock hint and without large memory pages
+ return allocateMappedMemory(NumBytes, NULL, Flags & ~MF_HUGE_HINT, EC);
}
EC = mapWindowsError(::GetLastError());
return MemoryBlock();
@@ -114,6 +149,7 @@ MemoryBlock Memory::allocateMappedMemory
MemoryBlock Result;
Result.Address = PA;
Result.Size = NumBlocks*Granularity;
+ Result.Flags = (Flags & ~MF_HUGE_HINT) | (HugePages ? MF_HUGE_HINT : 0);
if (Flags & MF_EXEC)
Memory::InvalidateInstructionCache(Result.Address, Result.Size);
Modified: llvm/trunk/unittests/Support/MemoryTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Support/MemoryTest.cpp?rev=355065&r1=355064&r2=355065&view=diff
==============================================================================
--- llvm/trunk/unittests/Support/MemoryTest.cpp (original)
+++ llvm/trunk/unittests/Support/MemoryTest.cpp Wed Feb 27 18:47:34 2019
@@ -105,6 +105,22 @@ TEST_P(MappedMemoryTest, AllocAndRelease
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
}
+TEST_P(MappedMemoryTest, AllocAndReleaseHuge) {
+ CHECK_UNSUPPORTED();
+ std::error_code EC;
+ MemoryBlock M1 = Memory::allocateMappedMemory(
+ sizeof(int), nullptr, Flags | Memory::MF_HUGE_HINT, EC);
+ EXPECT_EQ(std::error_code(), EC);
+
+ // Test large/huge memory pages. In the worst case, 4kb pages should be
+ // returned, if large pages aren't available.
+
+ EXPECT_NE((void *)nullptr, M1.base());
+ EXPECT_LE(sizeof(int), M1.size());
+
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));
+}
+
TEST_P(MappedMemoryTest, MultipleAllocAndRelease) {
CHECK_UNSUPPORTED();
std::error_code EC;
More information about the llvm-commits
mailing list