[compiler-rt] a926977 - [scudo] Improve the uses of roundUpTo/roundDownTo/isAligned
Chia-hung Duan via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 15 15:46:49 PST 2023
Author: Chia-hung Duan
Date: 2023-02-15T23:44:44Z
New Revision: a9269773eb94ddd36ec3e624740013ca3a5885ba
URL: https://github.com/llvm/llvm-project/commit/a9269773eb94ddd36ec3e624740013ca3a5885ba
DIFF: https://github.com/llvm/llvm-project/commit/a9269773eb94ddd36ec3e624740013ca3a5885ba.diff
LOG: [scudo] Improve the uses of roundUpTo/roundDownTo/isAligned
The implementations of those functions require the rounding target to be
power-of-two. It's better to add a debugging check to avoid misuse.
Besides, add a general verion of those three to accommadate non
power-of-two cases.
Also change the name to roundUp/roundDown/isAligned
Reviewed By: cferris, cryptoad
Differential Revision: https://reviews.llvm.org/D142658
Added:
Modified:
compiler-rt/lib/scudo/standalone/chunk.h
compiler-rt/lib/scudo/standalone/combined.h
compiler-rt/lib/scudo/standalone/common.h
compiler-rt/lib/scudo/standalone/primary32.h
compiler-rt/lib/scudo/standalone/primary64.h
compiler-rt/lib/scudo/standalone/release.h
compiler-rt/lib/scudo/standalone/secondary.h
compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
compiler-rt/lib/scudo/standalone/tests/release_test.cpp
compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
compiler-rt/lib/scudo/standalone/trusty.cpp
compiler-rt/lib/scudo/standalone/vector.h
compiler-rt/lib/scudo/standalone/wrappers_c.inc
compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
Removed:
################################################################################
diff --git a/compiler-rt/lib/scudo/standalone/chunk.h b/compiler-rt/lib/scudo/standalone/chunk.h
index 88bada8c2d19..32874a8df642 100644
--- a/compiler-rt/lib/scudo/standalone/chunk.h
+++ b/compiler-rt/lib/scudo/standalone/chunk.h
@@ -85,7 +85,7 @@ constexpr uptr OffsetMask = (1UL << 16) - 1;
constexpr uptr ChecksumMask = (1UL << 16) - 1;
constexpr uptr getHeaderSize() {
- return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+ return roundUp(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
}
inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index c3079e23c795..52dbf6b14526 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -342,7 +342,7 @@ class Allocator {
// to be sure that there will be an address in the block that will satisfy
// the alignment.
const uptr NeededSize =
- roundUpTo(Size, MinAlignment) +
+ roundUp(Size, MinAlignment) +
((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
// Takes care of extravagantly large sizes as well as integer overflows.
@@ -402,7 +402,7 @@ class Allocator {
const uptr BlockUptr = reinterpret_cast<uptr>(Block);
const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
- const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
+ const uptr UserPtr = roundUp(UnalignedUserPtr, Alignment);
void *Ptr = reinterpret_cast<void *>(UserPtr);
void *TaggedPtr = Ptr;
@@ -461,7 +461,7 @@ class Allocator {
PrevUserPtr == UserPtr &&
(TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
- const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached());
+ const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
PrevEnd = NextPage;
TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
@@ -474,8 +474,8 @@ class Allocator {
// was freed, it would not have been retagged and thus zeroed, and
// therefore it needs to be zeroed now.
memset(TaggedPtr, 0,
- Min(Size, roundUpTo(PrevEnd - TaggedUserPtr,
- archMemoryTagGranuleSize())));
+ Min(Size, roundUp(PrevEnd - TaggedUserPtr,
+ archMemoryTagGranuleSize())));
} else if (Size) {
// Clear any stack metadata that may have previously been stored in
// the chunk data.
@@ -1241,15 +1241,15 @@ class Allocator {
void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
uptr BlockEnd) {
- uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
+ uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
uptr RoundNewPtr;
if (RoundOldPtr >= NewPtr) {
// If the allocation is shrinking we just need to set the tag past the end
// of the allocation to 0. See explanation in storeEndMarker() above.
- RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
+ RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
} else {
// Set the memory tag of the region
- // [RoundOldPtr, roundUpTo(NewPtr, archMemoryTagGranuleSize()))
+ // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
// to the pointer tag stored in OldPtr.
RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
}
@@ -1505,7 +1505,8 @@ class Allocator {
MapPlatformData Data = {};
RawRingBuffer = static_cast<char *>(
map(/*Addr=*/nullptr,
- roundUpTo(ringBufferSizeInBytes(AllocationRingBufferSize), getPageSizeCached()),
+ roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
+ getPageSizeCached()),
"AllocatorRingBuffer", /*Flags=*/0, &Data));
auto *RingBuffer = reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
RingBuffer->Size = AllocationRingBufferSize;
diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h
index 2ec9a630359a..aa15e9e787e2 100644
--- a/compiler-rt/lib/scudo/standalone/common.h
+++ b/compiler-rt/lib/scudo/standalone/common.h
@@ -27,17 +27,31 @@ template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
return D;
}
-inline constexpr uptr roundUpTo(uptr X, uptr Boundary) {
+inline constexpr bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
+
+inline constexpr uptr roundUp(uptr X, uptr Boundary) {
+ DCHECK(isPowerOfTwo(Boundary));
return (X + Boundary - 1) & ~(Boundary - 1);
}
+inline constexpr uptr roundUpSlow(uptr X, uptr Boundary) {
+ return ((X + Boundary - 1) / Boundary) * Boundary;
+}
-inline constexpr uptr roundDownTo(uptr X, uptr Boundary) {
+inline constexpr uptr roundDown(uptr X, uptr Boundary) {
+ DCHECK(isPowerOfTwo(Boundary));
return X & ~(Boundary - 1);
}
+inline constexpr uptr roundDownSlow(uptr X, uptr Boundary) {
+ return (X / Boundary) * Boundary;
+}
inline constexpr bool isAligned(uptr X, uptr Alignment) {
+ DCHECK(isPowerOfTwo(Alignment));
return (X & (Alignment - 1)) == 0;
}
+inline constexpr bool isAlignedSlow(uptr X, uptr Alignment) {
+ return X % Alignment == 0;
+}
template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
@@ -49,14 +63,12 @@ template <class T> void Swap(T &A, T &B) {
B = Tmp;
}
-inline bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
-
inline uptr getMostSignificantSetBitIndex(uptr X) {
DCHECK_NE(X, 0U);
return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
}
-inline uptr roundUpToPowerOfTwo(uptr Size) {
+inline uptr roundUpPowerOfTwo(uptr Size) {
DCHECK(Size);
if (isPowerOfTwo(Size))
return Size;
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 2a204ca40f96..8e7682f058fa 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -341,7 +341,7 @@ template <typename Config> class SizeClassAllocator32 {
else
MapSize = RegionSize;
} else {
- Region = roundUpTo(MapBase, RegionSize);
+ Region = roundUp(MapBase, RegionSize);
unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
MapSize = RegionSize;
}
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 3c53c03a6d19..650c28687ad6 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -55,7 +55,7 @@ template <typename Config> class SizeClassAllocator64 {
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? roundUpTo(sizeof(TransferBatch), 1U << CompactPtrScale)
+ ? roundUp(sizeof(TransferBatch), 1U << CompactPtrScale)
: SizeClassMap::getSizeByClassId(ClassId);
}
@@ -638,7 +638,7 @@ template <typename Config> class SizeClassAllocator64 {
if (TotalUserBytes > MappedUser) {
// Do the mmap for the user memory.
const uptr MapSize =
- roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
+ roundUp(TotalUserBytes - MappedUser, MapSizeIncrement);
const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
Region->Exhausted = true;
diff --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
index 1e831a660092..7e5738c0985c 100644
--- a/compiler-rt/lib/scudo/standalone/release.h
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -73,7 +73,7 @@ class RegionPageMap {
Mutex.unlock();
else
unmap(reinterpret_cast<void *>(Buffer),
- roundUpTo(BufferSize, getPageSizeCached()));
+ roundUp(BufferSize, getPageSizeCached()));
Buffer = nullptr;
}
@@ -94,7 +94,7 @@ class RegionPageMap {
// Rounding counter storage size up to the power of two allows for using
// bit shifts calculating particular counter's Index and offset.
const uptr CounterSizeBits =
- roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
+ roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
DCHECK_LE(CounterSizeBits, MaxCounterBits);
CounterSizeBitsLog = getLog2(CounterSizeBits);
CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
@@ -105,7 +105,7 @@ class RegionPageMap {
BitOffsetMask = PackingRatio - 1;
SizePerRegion =
- roundUpTo(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
+ roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
PackingRatioLog;
BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
if (BufferSize <= (StaticBufferCount * sizeof(Buffer[0])) &&
@@ -120,7 +120,7 @@ class RegionPageMap {
const uptr MmapFlags =
MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
Buffer = reinterpret_cast<uptr *>(
- map(nullptr, roundUpTo(BufferSize, getPageSizeCached()),
+ map(nullptr, roundUp(BufferSize, getPageSizeCached()),
"scudo:counters", MmapFlags, &MapData));
}
}
@@ -266,7 +266,7 @@ struct PageReleaseContext {
}
}
- PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
+ PagesCount = roundUp(RegionSize, PageSize) / PageSize;
PageSizeLog = getLog2(PageSize);
RoundedRegionSize = PagesCount << PageSizeLog;
RoundedSize = NumberOfRegions * RoundedRegionSize;
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 3a11c4778862..b31288772226 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -244,7 +244,7 @@ template <typename Config> class MapAllocatorCache {
continue;
const uptr CommitSize = Entries[I].CommitSize;
const uptr AllocPos =
- roundDownTo(CommitBase + CommitSize - Size, Alignment);
+ roundDown(CommitBase + CommitSize - Size, Alignment);
HeaderPos =
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
if (HeaderPos > CommitBase + CommitSize)
@@ -510,9 +510,9 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
const uptr PageSize = getPageSizeCached();
uptr RoundedSize =
- roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
- Chunk::getHeaderSize(),
- PageSize);
+ roundUp(roundUp(Size, Alignment) + LargeBlock::getHeaderSize() +
+ Chunk::getHeaderSize(),
+ PageSize);
if (Alignment > PageSize)
RoundedSize += Alignment - PageSize;
@@ -559,7 +559,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
// For alignments greater than or equal to a page, the user pointer (eg: the
// pointer that is returned by the C or C++ allocation APIs) ends up on a
// page boundary , and our headers will live in the preceding page.
- CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
+ CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
const uptr NewMapBase = CommitBase - PageSize;
DCHECK_GE(NewMapBase, MapBase);
// We only trim the extra memory on 32-bit platforms: 64-bit platforms
@@ -569,7 +569,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
MapBase = NewMapBase;
}
const uptr NewMapEnd =
- CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
+ CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
DCHECK_LE(NewMapEnd, MapEnd);
if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
@@ -578,7 +578,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
}
const uptr CommitSize = MapEnd - PageSize - CommitBase;
- const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
+ const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
const uptr HeaderPos =
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index a79469fa8198..6f4fa748ed93 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -39,7 +39,7 @@ bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
if (Alignment < MinAlignment)
Alignment = MinAlignment;
const scudo::uptr NeededSize =
- scudo::roundUpTo(Size, MinAlignment) +
+ scudo::roundUp(Size, MinAlignment) +
((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
return AllocatorT::PrimaryT::canAllocate(NeededSize);
}
@@ -48,7 +48,7 @@ template <class AllocatorT>
void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
scudo::uptr Alignment) {
const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
- Size = scudo::roundUpTo(Size, MinAlignment);
+ Size = scudo::roundUp(Size, MinAlignment);
if (Allocator->useMemoryTaggingTestOnly())
EXPECT_DEATH(
{
diff --git a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
index 283edaa2a2cc..8a40eda3a571 100644
--- a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
@@ -163,7 +163,7 @@ TEST_F(MemtagTest, StoreTags) {
uptr TaggedBegin = addFixedTag(NoTagBegin, Tag);
uptr TaggedEnd = addFixedTag(NoTagEnd, Tag);
- EXPECT_EQ(roundUpTo(TaggedEnd, archMemoryTagGranuleSize()),
+ EXPECT_EQ(roundUp(TaggedEnd, archMemoryTagGranuleSize()),
storeTags(TaggedBegin, TaggedEnd));
uptr LoadPtr = Addr;
diff --git a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
index 8625e7fb4b76..146c63823773 100644
--- a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
@@ -29,7 +29,7 @@ TEST(ScudoReleaseTest, RegionPageMap) {
// Verify the packing ratio, the counter is Expected to be packed into the
// closest power of 2 bits.
scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I);
- EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpToPowerOfTwo(I + 1),
+ EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpPowerOfTwo(I + 1),
PageMap.getBufferSize());
}
@@ -238,7 +238,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
InFreeRange = false;
// Verify that all entire memory pages covered by this range of free
// chunks were released.
- scudo::uptr P = scudo::roundUpTo(CurrentFreeRangeStart, PageSize);
+ scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize);
while (P + PageSize <= CurrentBlock) {
const bool PageReleased =
Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
@@ -254,9 +254,9 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
}
if (InFreeRange) {
- scudo::uptr P = scudo::roundUpTo(CurrentFreeRangeStart, PageSize);
+ scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize);
const scudo::uptr EndPage =
- scudo::roundUpTo(MaxBlocks * BlockSize, PageSize);
+ scudo::roundUp(MaxBlocks * BlockSize, PageSize);
while (P + PageSize <= EndPage) {
const bool PageReleased =
Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
index e656466d68f6..b0319011771a 100644
--- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
@@ -64,7 +64,7 @@ template <typename Config> static void testSecondaryBasic(void) {
P = L->allocate(Options, Size + Align, Align);
EXPECT_NE(P, nullptr);
void *AlignedP = reinterpret_cast<void *>(
- scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
+ scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
memset(AlignedP, 'A', Size);
L->deallocate(Options, P);
@@ -122,7 +122,7 @@ struct MapAllocatorTest : public Test {
// combined allocator.
TEST_F(MapAllocatorTest, SecondaryCombinations) {
constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
- constexpr scudo::uptr HeaderSize = scudo::roundUpTo(8, MinAlign);
+ constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign);
for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
AlignLog++) {
@@ -131,13 +131,13 @@ TEST_F(MapAllocatorTest, SecondaryCombinations) {
if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
continue;
const scudo::uptr UserSize =
- scudo::roundUpTo((1U << SizeLog) + Delta, MinAlign);
+ scudo::roundUp((1U << SizeLog) + Delta, MinAlign);
const scudo::uptr Size =
HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
void *P = Allocator->allocate(Options, Size, Align);
EXPECT_NE(P, nullptr);
void *AlignedP = reinterpret_cast<void *>(
- scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
+ scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
memset(AlignedP, 0xff, UserSize);
Allocator->deallocate(Options, P);
}
diff --git a/compiler-rt/lib/scudo/standalone/trusty.cpp b/compiler-rt/lib/scudo/standalone/trusty.cpp
index 702d4a9a6b63..592514d4c3a6 100644
--- a/compiler-rt/lib/scudo/standalone/trusty.cpp
+++ b/compiler-rt/lib/scudo/standalone/trusty.cpp
@@ -37,7 +37,7 @@ void *map(UNUSED void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
uptr Start;
uptr End;
- Start = roundUpTo(ProgramBreak, SBRK_ALIGN);
+ Start = roundUp(ProgramBreak, SBRK_ALIGN);
// Don't actually extend the heap if MAP_NOACCESS flag is set since this is
// the case where Scudo tries to reserve a memory region without mapping
// physical pages.
@@ -45,7 +45,7 @@ void *map(UNUSED void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
return reinterpret_cast<void *>(Start);
// Attempt to extend the heap by Size bytes using _trusty_brk.
- End = roundUpTo(Start + Size, SBRK_ALIGN);
+ End = roundUp(Start + Size, SBRK_ALIGN);
ProgramBreak =
reinterpret_cast<uptr>(_trusty_brk(reinterpret_cast<void *>(End)));
if (ProgramBreak < End) {
diff --git a/compiler-rt/lib/scudo/standalone/vector.h b/compiler-rt/lib/scudo/standalone/vector.h
index d43205a7111d..9f2c200958fe 100644
--- a/compiler-rt/lib/scudo/standalone/vector.h
+++ b/compiler-rt/lib/scudo/standalone/vector.h
@@ -40,7 +40,7 @@ template <typename T> class VectorNoCtor {
void push_back(const T &Element) {
DCHECK_LE(Size, capacity());
if (Size == capacity()) {
- const uptr NewCapacity = roundUpToPowerOfTwo(Size + 1);
+ const uptr NewCapacity = roundUpPowerOfTwo(Size + 1);
reallocate(NewCapacity);
}
memcpy(&Data[Size++], &Element, sizeof(T));
@@ -82,7 +82,7 @@ template <typename T> class VectorNoCtor {
void reallocate(uptr NewCapacity) {
DCHECK_GT(NewCapacity, 0);
DCHECK_LE(Size, NewCapacity);
- NewCapacity = roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
+ NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached());
T *NewData = reinterpret_cast<T *>(
map(nullptr, NewCapacity, "scudo:vector", 0, &MapData));
memcpy(NewData, Data, Size * sizeof(T));
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
index 6c4f10d2d898..37e336ee09d6 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -91,7 +91,7 @@ INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
alignment = 1U;
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
- alignment = scudo::roundUpToPowerOfTwo(alignment);
+ alignment = scudo::roundUpPowerOfTwo(alignment);
}
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
@@ -131,9 +131,9 @@ INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
scudo::reportPvallocOverflow(size);
}
// pvalloc(0) should allocate one page.
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- size ? scudo::roundUpTo(size, PageSize) : PageSize,
- scudo::Chunk::Origin::Memalign, PageSize));
+ return scudo::setErrnoOnNull(
+ SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
+ scudo::Chunk::Origin::Memalign, PageSize));
}
INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
index 815d40023b6a..9cd48e82792e 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
@@ -64,7 +64,7 @@ inline bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of PageSize.
inline bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
- return roundUpTo(Size, PageSize) < Size;
+ return roundUp(Size, PageSize) < Size;
}
} // namespace scudo
More information about the llvm-commits
mailing list