[compiler-rt] r347286 - [XRay] Add a test for allocator exhaustion
Dean Michael Berris via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 19 19:56:04 PST 2018
Author: dberris
Date: Mon Nov 19 19:56:04 2018
New Revision: 347286
URL: http://llvm.org/viewvc/llvm-project?rev=347286&view=rev
Log:
[XRay] Add a test for allocator exhaustion
Use a more representative test of allocating small chunks for
oddly-sized (small) objects from an allocator that has a page's worth of
memory.
Modified:
compiler-rt/trunk/lib/xray/tests/unit/allocator_test.cc
compiler-rt/trunk/lib/xray/xray_allocator.h
Modified: compiler-rt/trunk/lib/xray/tests/unit/allocator_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/xray/tests/unit/allocator_test.cc?rev=347286&r1=347285&r2=347286&view=diff
==============================================================================
--- compiler-rt/trunk/lib/xray/tests/unit/allocator_test.cc (original)
+++ compiler-rt/trunk/lib/xray/tests/unit/allocator_test.cc Mon Nov 19 19:56:04 2018
@@ -33,10 +33,28 @@ TEST(AllocatorTest, Allocate) {
TEST(AllocatorTest, OverAllocate) {
Allocator<sizeof(TestData)> A(sizeof(TestData));
auto B1 = A.Allocate();
- (void)B1;
+ ASSERT_NE(B1.Data, nullptr);
auto B2 = A.Allocate();
ASSERT_EQ(B2.Data, nullptr);
}
+struct OddSizedData {
+ s64 A;
+ s32 B;
+};
+
+TEST(AllocatorTest, AllocateBoundaries) {
+ Allocator<sizeof(OddSizedData)> A(GetPageSizeCached());
+
+ // Keep allocating until we hit a nullptr block.
+ unsigned C = 0;
+ auto Expected =
+ GetPageSizeCached() / RoundUpTo(sizeof(OddSizedData), kCacheLineSize);
+ for (auto B = A.Allocate(); B.Data != nullptr; B = A.Allocate(), ++C)
+ ;
+
+ ASSERT_EQ(C, Expected);
+}
+
} // namespace
} // namespace __xray
Modified: compiler-rt/trunk/lib/xray/xray_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/xray/xray_allocator.h?rev=347286&r1=347285&r2=347286&view=diff
==============================================================================
--- compiler-rt/trunk/lib/xray/xray_allocator.h (original)
+++ compiler-rt/trunk/lib/xray/xray_allocator.h Mon Nov 19 19:56:04 2018
@@ -53,7 +53,8 @@ template <class T> void deallocate(T *B)
internal_munmap(B, RoundedSize);
}
-template <class T = uint8_t> T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
+template <class T = unsigned char>
+T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
uptr RoundedSize = RoundUpTo(S * sizeof(T), GetPageSizeCached());
uptr B = internal_mmap(NULL, RoundedSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -111,8 +112,8 @@ template <size_t N> struct Allocator {
private:
const size_t MaxMemory{0};
- uint8_t *BackingStore = nullptr;
- uint8_t *AlignedNextBlock = nullptr;
+ unsigned char *BackingStore = nullptr;
+ unsigned char *AlignedNextBlock = nullptr;
size_t AllocatedBlocks = 0;
SpinMutex Mutex{};
@@ -141,7 +142,7 @@ private:
return nullptr;
}
- AlignedNextBlock = reinterpret_cast<uint8_t *>(AlignedNextBlockNum);
+ AlignedNextBlock = reinterpret_cast<unsigned char *>(AlignedNextBlockNum);
// Assert that AlignedNextBlock is cache-line aligned.
DCHECK_EQ(reinterpret_cast<uintptr_t>(AlignedNextBlock) % kCacheLineSize,
@@ -154,15 +155,15 @@ private:
// Align the pointer we'd like to return to an appropriate alignment, then
// advance the pointer from where to start allocations.
void *Result = AlignedNextBlock;
- AlignedNextBlock = reinterpret_cast<uint8_t *>(
- reinterpret_cast<uint8_t *>(AlignedNextBlock) + N);
+ AlignedNextBlock = reinterpret_cast<unsigned char *>(
+ reinterpret_cast<unsigned char *>(AlignedNextBlock) + N);
++AllocatedBlocks;
return Result;
}
public:
explicit Allocator(size_t M) XRAY_NEVER_INSTRUMENT
- : MaxMemory(nearest_boundary(M, kCacheLineSize)) {}
+ : MaxMemory(RoundUpTo(M, kCacheLineSize)) {}
Block Allocate() XRAY_NEVER_INSTRUMENT { return {Alloc()}; }
More information about the llvm-commits
mailing list