[compiler-rt] b3ccfa1 - [hwasan] Increase max allocation size to 1Tb.
Evgenii Stepanov via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 20 14:01:57 PDT 2020
Author: Evgenii Stepanov
Date: 2020-10-20T14:01:48-07:00
New Revision: b3ccfa1e0ce74041a76ce802e3be7944eea57c41
URL: https://github.com/llvm/llvm-project/commit/b3ccfa1e0ce74041a76ce802e3be7944eea57c41
DIFF: https://github.com/llvm/llvm-project/commit/b3ccfa1e0ce74041a76ce802e3be7944eea57c41.diff
LOG: [hwasan] Increase max allocation size to 1Tb.
2Gb is unreasonably low on devices with 12Gb RAM and more.
Differential Revision: https://reviews.llvm.org/D89750
Added:
Modified:
compiler-rt/lib/hwasan/hwasan_allocator.cpp
compiler-rt/lib/hwasan/hwasan_allocator.h
compiler-rt/test/hwasan/TestCases/allocator_returns_null.cpp
compiler-rt/test/hwasan/TestCases/sizes.cpp
Removed:
################################################################################
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index 1d82db0e3944..0b6b7347892e 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -42,7 +42,8 @@ enum RightAlignMode {
static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
bool HwasanChunkView::IsAllocated() const {
- return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
+ return metadata_ && metadata_->alloc_context_id &&
+ metadata_->get_requested_size();
}
// Aligns the 'addr' right to the granule boundary.
@@ -54,14 +55,14 @@ static uptr AlignRight(uptr addr, uptr requested_size) {
uptr HwasanChunkView::Beg() const {
if (metadata_ && metadata_->right_aligned)
- return AlignRight(block_, metadata_->requested_size);
+ return AlignRight(block_, metadata_->get_requested_size());
return block_;
}
uptr HwasanChunkView::End() const {
return Beg() + UsedSize();
}
uptr HwasanChunkView::UsedSize() const {
- return metadata_->requested_size;
+ return metadata_->get_requested_size();
}
u32 HwasanChunkView::GetAllocStackId() const {
return metadata_->alloc_context_id;
@@ -129,7 +130,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
}
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
- meta->requested_size = static_cast<u32>(orig_size);
+ meta->set_requested_size(orig_size);
meta->alloc_context_id = StackDepotPut(*stack);
meta->right_aligned = false;
if (zeroise) {
@@ -191,7 +192,7 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
- uptr orig_size = meta->requested_size;
+ uptr orig_size = meta->get_requested_size();
u32 free_context_id = StackDepotPut(*stack);
u32 alloc_context_id = meta->alloc_context_id;
@@ -208,7 +209,7 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
orig_size, tail_magic);
}
- meta->requested_size = 0;
+ meta->set_requested_size(0);
meta->alloc_context_id = 0;
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
@@ -245,8 +246,9 @@ static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
- internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
- Min(new_size, static_cast<uptr>(meta->requested_size)));
+ internal_memcpy(
+ UntagPtr(tagged_ptr_new), untagged_ptr_old,
+ Min(new_size, static_cast<uptr>(meta->get_requested_size())));
HwasanDeallocate(stack, tagged_ptr_old);
}
return tagged_ptr_new;
@@ -282,7 +284,7 @@ static uptr AllocationSize(const void *tagged_ptr) {
} else {
if (beg != untagged_ptr) return 0;
}
- return b->requested_size;
+ return b->get_requested_size();
}
void *hwasan_malloc(uptr size, StackTrace *stack) {
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.h b/compiler-rt/lib/hwasan/hwasan_allocator.h
index f62be2696021..43670a6a3fb7 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.h
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.h
@@ -28,9 +28,17 @@
namespace __hwasan {
struct Metadata {
- u32 requested_size : 31; // sizes are < 2G.
- u32 right_aligned : 1;
+ u32 requested_size_low;
+ u32 requested_size_high : 31;
+ u32 right_aligned : 1;
u32 alloc_context_id;
+ u64 get_requested_size() {
+ return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
+ }
+ void set_requested_size(u64 size) {
+ requested_size_low = size & ((1ul << 32) - 1);
+ requested_size_high = size >> 32;
+ }
};
struct HwasanMapUnmapCallback {
@@ -43,7 +51,7 @@ struct HwasanMapUnmapCallback {
}
};
-static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+static const uptr kMaxAllowedMallocSize = 1UL << 40; // 1T
struct AP64 {
static const uptr kSpaceBeg = ~0ULL;
diff --git a/compiler-rt/test/hwasan/TestCases/allocator_returns_null.cpp b/compiler-rt/test/hwasan/TestCases/allocator_returns_null.cpp
index a924d9586d28..11a9615f6f50 100644
--- a/compiler-rt/test/hwasan/TestCases/allocator_returns_null.cpp
+++ b/compiler-rt/test/hwasan/TestCases/allocator_returns_null.cpp
@@ -55,7 +55,7 @@ int main(int argc, char **argv) {
const char *action = argv[1];
untag_fprintf(stderr, "%s:\n", action);
- static const size_t kMaxAllowedMallocSizePlusOne = (2UL << 30) + 1;
+ static const size_t kMaxAllowedMallocSizePlusOne = (1UL << 40) + 1;
void *x = nullptr;
if (!untag_strcmp(action, "malloc")) {
diff --git a/compiler-rt/test/hwasan/TestCases/sizes.cpp b/compiler-rt/test/hwasan/TestCases/sizes.cpp
index 5676534bae10..f397256e52e4 100644
--- a/compiler-rt/test/hwasan/TestCases/sizes.cpp
+++ b/compiler-rt/test/hwasan/TestCases/sizes.cpp
@@ -44,7 +44,7 @@ int main(int argc, char **argv) {
static const size_t kChunkHeaderSize = 16;
size_t MallocSize = test_size_max ? std::numeric_limits<size_t>::max()
- : kMaxAllowedMallocSize;
+ : (kMaxAllowedMallocSize + 1);
if (!untag_strcmp(argv[1], "malloc")) {
void *p = malloc(MallocSize);
More information about the llvm-commits
mailing list