[compiler-rt] [HWASan] Prevent same tag for adjacent heap objects (PR #69337)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 20 07:51:38 PDT 2023
https://github.com/KonradHohentanner updated https://github.com/llvm/llvm-project/pull/69337
>From 62120e97084b17ea5d45b2b87b8128855e4bda31 Mon Sep 17 00:00:00 2001
From: Konrad Hohentanner <konrad.hohentanner at aisec.fraunhofer.de>
Date: Tue, 17 Oct 2023 13:09:32 +0000
Subject: [PATCH 1/3] [HWASan] Prevent same tag for adjacent heap objects
---
compiler-rt/lib/hwasan/hwasan_allocator.cpp | 9 +++-
compiler-rt/lib/hwasan/hwasan_thread.cpp | 29 +++++++++++++
compiler-rt/lib/hwasan/hwasan_thread.h | 2 +
.../TestCases/adjacent_tag_collisions_heap.c | 41 +++++++++++++++++++
4 files changed, 79 insertions(+), 2 deletions(-)
create mode 100644 compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index d21ba024a20e12a..2fc9c6978145999 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -237,7 +237,10 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
- tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
+ tag_t tag = t ? t->GenerateRandomNonCollidingTag((uptr)user_ptr - 1,
+ (uptr)user_ptr + size)
+ : kFallbackAllocTag;
+
uptr tag_size = orig_size ? orig_size : 1;
uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
@@ -349,7 +352,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
// would make us attempt to read the memory on a UaF.
// The tag can be zero if tagging is disabled on this thread.
do {
- tag = t->GenerateRandomTag(/*num_bits=*/8);
+ tag = t->GenerateRandomNonCollidingTag((uptr)aligned_ptr - 1,
+ (uptr)aligned_ptr + orig_size,
+ /*num_bits=*/8);
} while (
UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
} else {
diff --git a/compiler-rt/lib/hwasan/hwasan_thread.cpp b/compiler-rt/lib/hwasan/hwasan_thread.cpp
index ce36547580e6e60..f9500ef5f99c863 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_thread.cpp
@@ -156,6 +156,35 @@ tag_t Thread::GenerateRandomTag(uptr num_bits) {
return tag;
}
+// Generate a (pseudo-)random non-zero tag and prevent collisions to neighboring
+// objects.
+tag_t Thread::GenerateRandomNonCollidingTag(uptr prev_ptr, uptr foll_ptr,
+ uptr num_bits) {
+ DCHECK_GT(num_bits, 0);
+ if (tagging_disabled_)
+ return 0;
+ tag_t tag;
+ tag_t previous_tag = *(tag_t *)MemToShadow(prev_ptr);
+ tag_t following_tag = *(tag_t *)MemToShadow(foll_ptr);
+ const uptr tag_mask = (1ULL << num_bits) - 1;
+ do {
+ if (flags()->random_tags) {
+ if (!random_buffer_) {
+ EnsureRandomStateInited();
+ random_buffer_ = random_state_ = xorshift(random_state_);
+ }
+ CHECK(random_buffer_);
+ tag = random_buffer_ & tag_mask;
+ random_buffer_ >>= num_bits;
+ } else {
+ EnsureRandomStateInited();
+ random_state_ += 1;
+ tag = random_state_ & tag_mask;
+ }
+ } while (!tag || tag == previous_tag || tag == following_tag);
+ return tag;
+}
+
void EnsureMainThreadIDIsCorrect() {
auto *t = __hwasan::GetCurrentThread();
if (t && (t->IsMainThread()))
diff --git a/compiler-rt/lib/hwasan/hwasan_thread.h b/compiler-rt/lib/hwasan/hwasan_thread.h
index 9e1b438e48f771b..53fc506b86c981c 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread.h
+++ b/compiler-rt/lib/hwasan/hwasan_thread.h
@@ -58,6 +58,8 @@ class Thread {
StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
tag_t GenerateRandomTag(uptr num_bits = kTagBits);
+ tag_t GenerateRandomNonCollidingTag(uptr prev_ptr, uptr foll_ptr,
+ uptr num_bits = kTagBits);
void DisableTagging() { tagging_disabled_++; }
void EnableTagging() { tagging_disabled_--; }
diff --git a/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c b/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c
new file mode 100644
index 000000000000000..6381c8fd1125f07
--- /dev/null
+++ b/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c
@@ -0,0 +1,41 @@
+// Test that adjacent heap objects are always tagged differently to prevent unexpected under- and overflows.
+// RUN: %clang_hwasan %s -o %t
+// RUN: %env_hwasan_opts=random_tags=1,disable_allocator_tagging=0 %run %t
+
+#include <assert.h>
+#include <sanitizer/allocator_interface.h>
+#include <sanitizer/hwasan_interface.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static const size_t sizes[] = {16, 32, 64, 128, 256, 512, 1024, 2048};
+
+void check_collisions_on_heap(size_t size) {
+ // Allocate 3 heap objects, which should be placed next to each other
+ void *a = malloc(size);
+ void *b = malloc(size);
+ void *c = malloc(size);
+
+ // Confirm that no object can access adjacent objects
+ assert(__hwasan_test_shadow(a, size + 1) != -1);
+ assert(__hwasan_test_shadow(b, size + 1) != -1);
+ assert(__hwasan_test_shadow(c, size + 1) != -1);
+
+ // Confirm that freeing an object does not increase bounds of objects
+ free(b);
+ assert(__hwasan_test_shadow(a, size + 1) != -1);
+ assert(__hwasan_test_shadow(b, size + 1) != -1);
+ assert(__hwasan_test_shadow(c, size + 1) != -1);
+
+ free(a);
+ free(c);
+}
+
+int main() {
+ for (unsigned i = 0; i < sizeof(sizes) / sizeof(sizes[0]); i++) {
+ for (unsigned j = 0; j < 1000; j++) {
+ check_collisions_on_heap(sizes[i]);
+ }
+ }
+ return 0;
+}
>From 3dd915fb1d6ccf807583e9b67ff29b7247b76ba3 Mon Sep 17 00:00:00 2001
From: Konrad Hohentanner <konrad.hohentanner at aisec.fraunhofer.de>
Date: Tue, 17 Oct 2023 14:38:03 +0000
Subject: [PATCH 2/3] Use TaggedSize to calculate adjacent memory address
---
compiler-rt/lib/hwasan/hwasan_allocator.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index 2fc9c6978145999..f560bcd09baa3ec 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -352,9 +352,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
// would make us attempt to read the memory on a UaF.
// The tag can be zero if tagging is disabled on this thread.
do {
- tag = t->GenerateRandomNonCollidingTag((uptr)aligned_ptr - 1,
- (uptr)aligned_ptr + orig_size,
- /*num_bits=*/8);
+ tag = t->GenerateRandomNonCollidingTag(
+ (uptr)aligned_ptr - 1, (uptr)aligned_ptr + TaggedSize(orig_size),
+ /*num_bits=*/8);
} while (
UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
} else {
>From 209134aa17a4d5a17cb1d85fa0442afe57529683 Mon Sep 17 00:00:00 2001
From: Konrad Hohentanner <konrad.hohentanner at aisec.fraunhofer.de>
Date: Thu, 19 Oct 2023 13:51:50 +0000
Subject: [PATCH 3/3] Code Review adjustments
---
compiler-rt/lib/hwasan/hwasan.cpp | 6 +-
compiler-rt/lib/hwasan/hwasan_allocator.cpp | 33 +++++++----
compiler-rt/lib/hwasan/hwasan_thread.cpp | 55 ++++---------------
compiler-rt/lib/hwasan/hwasan_thread.h | 2 -
.../TestCases/adjacent_tag_collisions_heap.c | 24 +++++---
5 files changed, 56 insertions(+), 64 deletions(-)
diff --git a/compiler-rt/lib/hwasan/hwasan.cpp b/compiler-rt/lib/hwasan/hwasan.cpp
index 2f6cb10caf1be60..8c0a730744675b0 100644
--- a/compiler-rt/lib/hwasan/hwasan.cpp
+++ b/compiler-rt/lib/hwasan/hwasan.cpp
@@ -729,7 +729,11 @@ static const u8 kFallbackTag = 0xBB & kTagMask;
u8 __hwasan_generate_tag() {
Thread *t = GetCurrentThread();
if (!t) return kFallbackTag;
- return t->GenerateRandomTag();
+ u8 tag;
+ do {
+ tag = GetCurrentThread()->GenerateRandomTag();
+ } while (tag == 0);
+ return tag;
}
void __hwasan_add_frame_record(u64 frame_record_info) {
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index f560bcd09baa3ec..39cb82c8b499618 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -152,8 +152,11 @@ void HwasanAllocatorInit() {
allocator.InitLinkerInitialized(
common_flags()->allocator_release_to_os_interval_ms,
GetAliasRegionStart());
- for (uptr i = 0; i < sizeof(tail_magic); i++)
- tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
+ for (uptr i = 0; i < sizeof(tail_magic); i++) {
+ do {
+ tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
+ } while (tail_magic[i] == 0);
+ }
if (common_flags()->max_allocation_size_mb) {
max_malloc_size = common_flags()->max_allocation_size_mb << 20;
max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
@@ -237,9 +240,17 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
- tag_t tag = t ? t->GenerateRandomNonCollidingTag((uptr)user_ptr - 1,
- (uptr)user_ptr + size)
- : kFallbackAllocTag;
+ tag_t tag;
+ if (t) {
+ tag_t previous_tag = *(tag_t *)(MemToShadow((uptr)(user_ptr)-1));
+ tag_t following_tag = *(tag_t *)(MemToShadow((uptr)(user_ptr) + size));
+ do {
+ tag = t->GenerateRandomTag();
+ } while (
+ UNLIKELY(tag == previous_tag || tag == following_tag || tag == 0));
+ } else {
+ tag = kFallbackAllocTag;
+ }
uptr tag_size = orig_size ? orig_size : 1;
uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
@@ -348,15 +359,17 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
// Always store full 8-bit tags on free to maximize UAF detection.
tag_t tag;
if (t) {
+ tag_t previous_tag = *(tag_t *)(MemToShadow((uptr)(aligned_ptr)-1));
+ tag_t following_tag =
+ *(tag_t *)(MemToShadow((uptr)(aligned_ptr) + TaggedSize(orig_size)));
// Make sure we are not using a short granule tag as a poison tag. This
// would make us attempt to read the memory on a UaF.
// The tag can be zero if tagging is disabled on this thread.
do {
- tag = t->GenerateRandomNonCollidingTag(
- (uptr)aligned_ptr - 1, (uptr)aligned_ptr + TaggedSize(orig_size),
- /*num_bits=*/8);
- } while (
- UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
+ tag = t->GenerateRandomTag(/*num_bits=*/8);
+ } while (UNLIKELY(tag < kShadowAlignment || tag == pointer_tag ||
+ tag == previous_tag || tag == following_tag) &&
+ tag != 0);
} else {
static_assert(kFallbackFreeTag >= kShadowAlignment,
"fallback tag must not be a short granule tag.");
diff --git a/compiler-rt/lib/hwasan/hwasan_thread.cpp b/compiler-rt/lib/hwasan/hwasan_thread.cpp
index f9500ef5f99c863..a21f2e7e8fd601e 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_thread.cpp
@@ -131,57 +131,26 @@ static u32 xorshift(u32 state) {
return state;
}
-// Generate a (pseudo-)random non-zero tag.
+// Generate a (pseudo-)random tag.
tag_t Thread::GenerateRandomTag(uptr num_bits) {
DCHECK_GT(num_bits, 0);
if (tagging_disabled_)
return 0;
tag_t tag;
const uptr tag_mask = (1ULL << num_bits) - 1;
- do {
- if (flags()->random_tags) {
- if (!random_buffer_) {
- EnsureRandomStateInited();
- random_buffer_ = random_state_ = xorshift(random_state_);
- }
- CHECK(random_buffer_);
- tag = random_buffer_ & tag_mask;
- random_buffer_ >>= num_bits;
- } else {
+ if (flags()->random_tags) {
+ if (!random_buffer_) {
EnsureRandomStateInited();
- random_state_ += 1;
- tag = random_state_ & tag_mask;
+ random_buffer_ = random_state_ = xorshift(random_state_);
}
- } while (!tag);
- return tag;
-}
-
-// Generate a (pseudo-)random non-zero tag and prevent collisions to neighboring
-// objects.
-tag_t Thread::GenerateRandomNonCollidingTag(uptr prev_ptr, uptr foll_ptr,
- uptr num_bits) {
- DCHECK_GT(num_bits, 0);
- if (tagging_disabled_)
- return 0;
- tag_t tag;
- tag_t previous_tag = *(tag_t *)MemToShadow(prev_ptr);
- tag_t following_tag = *(tag_t *)MemToShadow(foll_ptr);
- const uptr tag_mask = (1ULL << num_bits) - 1;
- do {
- if (flags()->random_tags) {
- if (!random_buffer_) {
- EnsureRandomStateInited();
- random_buffer_ = random_state_ = xorshift(random_state_);
- }
- CHECK(random_buffer_);
- tag = random_buffer_ & tag_mask;
- random_buffer_ >>= num_bits;
- } else {
- EnsureRandomStateInited();
- random_state_ += 1;
- tag = random_state_ & tag_mask;
- }
- } while (!tag || tag == previous_tag || tag == following_tag);
+ CHECK(random_buffer_);
+ tag = random_buffer_ & tag_mask;
+ random_buffer_ >>= num_bits;
+ } else {
+ EnsureRandomStateInited();
+ random_state_ += 1;
+ tag = random_state_ & tag_mask;
+ }
return tag;
}
diff --git a/compiler-rt/lib/hwasan/hwasan_thread.h b/compiler-rt/lib/hwasan/hwasan_thread.h
index 53fc506b86c981c..9e1b438e48f771b 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread.h
+++ b/compiler-rt/lib/hwasan/hwasan_thread.h
@@ -58,8 +58,6 @@ class Thread {
StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
tag_t GenerateRandomTag(uptr num_bits = kTagBits);
- tag_t GenerateRandomNonCollidingTag(uptr prev_ptr, uptr foll_ptr,
- uptr num_bits = kTagBits);
void DisableTagging() { tagging_disabled_++; }
void EnableTagging() { tagging_disabled_--; }
diff --git a/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c b/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c
index 6381c8fd1125f07..f11693d28bc4b79 100644
--- a/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c
+++ b/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c
@@ -16,16 +16,24 @@ void check_collisions_on_heap(size_t size) {
void *b = malloc(size);
void *c = malloc(size);
- // Confirm that no object can access adjacent objects
- assert(__hwasan_test_shadow(a, size + 1) != -1);
- assert(__hwasan_test_shadow(b, size + 1) != -1);
- assert(__hwasan_test_shadow(c, size + 1) != -1);
+ // Confirm that no pointer can access right adjacent objects
+ assert(__hwasan_test_shadow(a, size + 1) == size);
+ assert(__hwasan_test_shadow(b, size + 1) == size);
+ assert(__hwasan_test_shadow(c, size + 1) == size);
- // Confirm that freeing an object does not increase bounds of objects
+ // Confirm that no pointer can access left adjacent objects
+ assert(__hwasan_test_shadow(a - 1, 1) == 0);
+ assert(__hwasan_test_shadow(b - 1, 1) == 0);
+ assert(__hwasan_test_shadow(c - 1, 1) == 0);
+
+ // Confirm that freeing an object does not increase bounds of adjacent objects and sets accessible bounds of freed pointer to zero
free(b);
- assert(__hwasan_test_shadow(a, size + 1) != -1);
- assert(__hwasan_test_shadow(b, size + 1) != -1);
- assert(__hwasan_test_shadow(c, size + 1) != -1);
+ assert(__hwasan_test_shadow(a, size + 1) == size);
+ assert(__hwasan_test_shadow(b, size + 1) == 0);
+ assert(__hwasan_test_shadow(c, size + 1) == size);
+ assert(__hwasan_test_shadow(a - 1, 1) == 0);
+ assert(__hwasan_test_shadow(b - 1, 1) == 0);
+ assert(__hwasan_test_shadow(c - 1, 1) == 0);
free(a);
free(c);
More information about the llvm-commits
mailing list