[compiler-rt] r342745 - [XRay][compiler-rt] Update use of internal_mmap
Dean Michael Berris via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 21 09:34:42 PDT 2018
Author: dberris
Date: Fri Sep 21 09:34:42 2018
New Revision: 342745
URL: http://llvm.org/viewvc/llvm-project?rev=342745&view=rev
Log:
[XRay][compiler-rt] Update use of internal_mmap
Summary:
The implementation of `internal_mmap(...)` deviates from the contract of
`mmap(...)` -- i.e. error returns are actually the equivalent of `errno`
results. We update how XRay uses `internal_mmap(...)` to better handle
these error conditions.
In the process, we change the default pointers we're using from `char*`
to `uint8_t*` to prevent potential usage of the pointers in the string
library functions that expect to operate on `char*`.
We also take the chance to "promote" sizes of individual `internal_mmap`
requests to at least page size bytes, consistent with the expectations
of calls to `mmap`.
Reviewers: cryptoad, mboerger
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D52361
Modified:
compiler-rt/trunk/lib/xray/xray_allocator.h
compiler-rt/trunk/lib/xray/xray_buffer_queue.h
compiler-rt/trunk/lib/xray/xray_fdr_logging.cc
compiler-rt/trunk/lib/xray/xray_profile_collector.cc
Modified: compiler-rt/trunk/lib/xray/xray_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/xray/xray_allocator.h?rev=342745&r1=342744&r2=342745&view=diff
==============================================================================
--- compiler-rt/trunk/lib/xray/xray_allocator.h (original)
+++ compiler-rt/trunk/lib/xray/xray_allocator.h Fri Sep 21 09:34:42 2018
@@ -32,13 +32,15 @@ namespace __xray {
// internal allocator. This allows us to manage the memory directly, using
// mmap'ed memory to back the allocators.
template <class T> T *allocate() XRAY_NEVER_INSTRUMENT {
- auto B = reinterpret_cast<void *>(
- internal_mmap(NULL, sizeof(T), PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
- if (B == MAP_FAILED) {
+ uptr RoundedSize = RoundUpTo(sizeof(T), GetPageSizeCached());
+ uptr B = internal_mmap(NULL, RoundedSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ int ErrNo;
+ if (UNLIKELY(internal_iserror(B, &ErrNo))) {
if (Verbosity())
- Report("XRay Profiling: Failed to allocate memory of size %d.\n",
- sizeof(T));
+ Report(
+ "XRay Profiling: Failed to allocate memory of size %d; Error = %d.\n",
+ RoundedSize, B);
return nullptr;
}
return reinterpret_cast<T *>(B);
@@ -47,16 +49,20 @@ template <class T> T *allocate() XRAY_NE
template <class T> void deallocate(T *B) XRAY_NEVER_INSTRUMENT {
if (B == nullptr)
return;
- internal_munmap(B, sizeof(T));
+ uptr RoundedSize = RoundUpTo(sizeof(T), GetPageSizeCached());
+ internal_munmap(B, RoundedSize);
}
template <class T = uint8_t> T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
- auto B = reinterpret_cast<void *>(
- internal_mmap(NULL, S * sizeof(T), PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
- if (B == MAP_FAILED) {
+ uptr RoundedSize = RoundUpTo(S * sizeof(T), GetPageSizeCached());
+ uptr B = internal_mmap(NULL, RoundedSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ int ErrNo;
+ if (UNLIKELY(internal_iserror(B, &ErrNo))) {
if (Verbosity())
- Report("XRay Profiling: Failed to allocate memory of size %d.\n", S);
+ Report(
+ "XRay Profiling: Failed to allocate memory of size %d; Error = %d.\n",
+ RoundedSize, B);
return nullptr;
}
return reinterpret_cast<T *>(B);
@@ -65,7 +71,8 @@ template <class T = uint8_t> T *allocate
template <class T> void deallocateBuffer(T *B, size_t S) XRAY_NEVER_INSTRUMENT {
if (B == nullptr)
return;
- internal_munmap(B, S);
+ uptr RoundedSize = RoundUpTo(S * sizeof(T), GetPageSizeCached());
+ internal_munmap(B, RoundedSize);
}
template <class T, class... U>
@@ -104,19 +111,16 @@ template <size_t N> struct Allocator {
private:
const size_t MaxMemory{0};
- void *BackingStore = nullptr;
- void *AlignedNextBlock = nullptr;
+ uint8_t *BackingStore = nullptr;
+ uint8_t *AlignedNextBlock = nullptr;
size_t AllocatedBlocks = 0;
SpinMutex Mutex{};
void *Alloc() XRAY_NEVER_INSTRUMENT {
SpinMutexLock Lock(&Mutex);
if (UNLIKELY(BackingStore == nullptr)) {
- BackingStore = reinterpret_cast<void *>(
- internal_mmap(NULL, MaxMemory, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
- if (BackingStore == MAP_FAILED) {
- BackingStore = nullptr;
+ BackingStore = allocateBuffer(MaxMemory);
+ if (BackingStore == nullptr) {
if (Verbosity())
Report("XRay Profiling: Failed to allocate memory for allocator.\n");
return nullptr;
@@ -129,7 +133,7 @@ private:
auto AlignedNextBlockNum = nearest_boundary(
reinterpret_cast<uintptr_t>(AlignedNextBlock), kCacheLineSize);
if (diff(AlignedNextBlockNum, BackingStoreNum) > ptrdiff_t(MaxMemory)) {
- munmap(BackingStore, MaxMemory);
+ deallocateBuffer(BackingStore, MaxMemory);
AlignedNextBlock = BackingStore = nullptr;
if (Verbosity())
Report("XRay Profiling: Cannot obtain enough memory from "
@@ -137,7 +141,7 @@ private:
return nullptr;
}
- AlignedNextBlock = reinterpret_cast<void *>(AlignedNextBlockNum);
+ AlignedNextBlock = reinterpret_cast<uint8_t *>(AlignedNextBlockNum);
// Assert that AlignedNextBlock is cache-line aligned.
DCHECK_EQ(reinterpret_cast<uintptr_t>(AlignedNextBlock) % kCacheLineSize,
@@ -150,8 +154,8 @@ private:
// Align the pointer we'd like to return to an appropriate alignment, then
// advance the pointer from where to start allocations.
void *Result = AlignedNextBlock;
- AlignedNextBlock = reinterpret_cast<void *>(
- reinterpret_cast<char *>(AlignedNextBlock) + N);
+ AlignedNextBlock = reinterpret_cast<uint8_t *>(
+ reinterpret_cast<uint8_t *>(AlignedNextBlock) + N);
++AllocatedBlocks;
return Result;
}
@@ -164,7 +168,7 @@ public:
~Allocator() NOEXCEPT XRAY_NEVER_INSTRUMENT {
if (BackingStore != nullptr) {
- internal_munmap(BackingStore, MaxMemory);
+ deallocateBuffer(BackingStore, MaxMemory);
}
}
};
Modified: compiler-rt/trunk/lib/xray/xray_buffer_queue.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/xray/xray_buffer_queue.h?rev=342745&r1=342744&r2=342745&view=diff
==============================================================================
--- compiler-rt/trunk/lib/xray/xray_buffer_queue.h (original)
+++ compiler-rt/trunk/lib/xray/xray_buffer_queue.h Fri Sep 21 09:34:42 2018
@@ -20,6 +20,7 @@
#include "sanitizer_common/sanitizer_mutex.h"
#include "xray_defs.h"
#include <cstddef>
+#include <cstdint>
namespace __xray {
@@ -114,7 +115,7 @@ private:
// A pointer to a contiguous block of memory to serve as the backing store for
// all the individual buffers handed out.
- void *BackingStore;
+ uint8_t *BackingStore;
// A dynamically allocated array of BufferRep instances.
BufferRep *Buffers;
Modified: compiler-rt/trunk/lib/xray/xray_fdr_logging.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/xray/xray_fdr_logging.cc?rev=342745&r1=342744&r2=342745&view=diff
==============================================================================
--- compiler-rt/trunk/lib/xray/xray_fdr_logging.cc (original)
+++ compiler-rt/trunk/lib/xray/xray_fdr_logging.cc Fri Sep 21 09:34:42 2018
@@ -721,7 +721,7 @@ XRayBuffer fdrIterator(const XRayBuffer
static BufferQueue::const_iterator It{};
static BufferQueue::const_iterator End{};
- static void *CurrentBuffer{nullptr};
+ static uint8_t *CurrentBuffer{nullptr};
static size_t SerializedBufferSize = 0;
if (B.Data == static_cast<void *>(&Header) && B.Size == sizeof(Header)) {
// From this point on, we provide raw access to the raw buffer we're getting
Modified: compiler-rt/trunk/lib/xray/xray_profile_collector.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/xray/xray_profile_collector.cc?rev=342745&r1=342744&r2=342745&view=diff
==============================================================================
--- compiler-rt/trunk/lib/xray/xray_profile_collector.cc (original)
+++ compiler-rt/trunk/lib/xray/xray_profile_collector.cc Fri Sep 21 09:34:42 2018
@@ -162,34 +162,34 @@ populateRecords(ProfileRecordArray &PRs,
static void serializeRecords(ProfileBuffer *Buffer, const BlockHeader &Header,
const ProfileRecordArray &ProfileRecords)
XRAY_NEVER_INSTRUMENT {
- auto NextPtr = static_cast<char *>(
+ auto NextPtr = static_cast<uint8_t *>(
internal_memcpy(Buffer->Data, &Header, sizeof(Header))) +
sizeof(Header);
for (const auto &Record : ProfileRecords) {
// List of IDs follow:
for (const auto FId : Record.Path)
NextPtr =
- static_cast<char *>(internal_memcpy(NextPtr, &FId, sizeof(FId))) +
+ static_cast<uint8_t *>(internal_memcpy(NextPtr, &FId, sizeof(FId))) +
sizeof(FId);
// Add the sentinel here.
constexpr int32_t SentinelFId = 0;
- NextPtr = static_cast<char *>(
+ NextPtr = static_cast<uint8_t *>(
internal_memset(NextPtr, SentinelFId, sizeof(SentinelFId))) +
sizeof(SentinelFId);
// Add the node data here.
NextPtr =
- static_cast<char *>(internal_memcpy(NextPtr, &Record.Node->CallCount,
- sizeof(Record.Node->CallCount))) +
+ static_cast<uint8_t *>(internal_memcpy(
+ NextPtr, &Record.Node->CallCount, sizeof(Record.Node->CallCount))) +
sizeof(Record.Node->CallCount);
- NextPtr = static_cast<char *>(
+ NextPtr = static_cast<uint8_t *>(
internal_memcpy(NextPtr, &Record.Node->CumulativeLocalTime,
sizeof(Record.Node->CumulativeLocalTime))) +
sizeof(Record.Node->CumulativeLocalTime);
}
- DCHECK_EQ(NextPtr - static_cast<char *>(Buffer->Data), Buffer->Size);
+ DCHECK_EQ(NextPtr - static_cast<uint8_t *>(Buffer->Data), Buffer->Size);
}
} // namespace
@@ -203,7 +203,7 @@ void serialize() XRAY_NEVER_INSTRUMENT {
// Clear out the global ProfileBuffers, if it's not empty.
for (auto &B : *ProfileBuffers)
- deallocateBuffer(B.Data, B.Size);
+ deallocateBuffer(reinterpret_cast<uint8_t *>(B.Data), B.Size);
ProfileBuffers->trim(ProfileBuffers->size());
if (ThreadTries->empty())
@@ -259,7 +259,7 @@ void reset() XRAY_NEVER_INSTRUMENT {
if (ProfileBuffers != nullptr) {
// Clear out the profile buffers that have been serialized.
for (auto &B : *ProfileBuffers)
- deallocateBuffer(B.Data, B.Size);
+ deallocateBuffer(reinterpret_cast<uint8_t *>(B.Data), B.Size);
ProfileBuffers->trim(ProfileBuffers->size());
}
More information about the llvm-commits
mailing list