[compiler-rt] [HWASan] Prevent same tag for adjacent heap objects (PR #69337)

via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 17 07:28:15 PDT 2023


https://github.com/KonradHohentanner created https://github.com/llvm/llvm-project/pull/69337

Previously, the random tag generation could lead to undetected heap under- and overflows due to tag collisions.
This modification to the tag generaten ensures that different tags are generated for adjacent heap objects.
For stack objects, additional changes in the instrumentation pass are necessary.


>From 62120e97084b17ea5d45b2b87b8128855e4bda31 Mon Sep 17 00:00:00 2001
From: Konrad Hohentanner <konrad.hohentanner at aisec.fraunhofer.de>
Date: Tue, 17 Oct 2023 13:09:32 +0000
Subject: [PATCH] [HWASan] Prevent same tag for adjacent heap objects

---
 compiler-rt/lib/hwasan/hwasan_allocator.cpp   |  9 +++-
 compiler-rt/lib/hwasan/hwasan_thread.cpp      | 29 +++++++++++++
 compiler-rt/lib/hwasan/hwasan_thread.h        |  2 +
 .../TestCases/adjacent_tag_collisions_heap.c  | 41 +++++++++++++++++++
 4 files changed, 79 insertions(+), 2 deletions(-)
 create mode 100644 compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c

diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index d21ba024a20e12a..2fc9c6978145999 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -237,7 +237,10 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
   if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
       atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
       flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
-    tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
+    tag_t tag = t ? t->GenerateRandomNonCollidingTag((uptr)user_ptr - 1,
+                                                     (uptr)user_ptr + size)
+                  : kFallbackAllocTag;
+
     uptr tag_size = orig_size ? orig_size : 1;
     uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
     user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
@@ -349,7 +352,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
       // would make us attempt to read the memory on a UaF.
       // The tag can be zero if tagging is disabled on this thread.
       do {
-        tag = t->GenerateRandomTag(/*num_bits=*/8);
+        tag = t->GenerateRandomNonCollidingTag((uptr)aligned_ptr - 1,
+                                               (uptr)aligned_ptr + orig_size,
+                                               /*num_bits=*/8);
       } while (
           UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
     } else {
diff --git a/compiler-rt/lib/hwasan/hwasan_thread.cpp b/compiler-rt/lib/hwasan/hwasan_thread.cpp
index ce36547580e6e60..f9500ef5f99c863 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_thread.cpp
@@ -156,6 +156,35 @@ tag_t Thread::GenerateRandomTag(uptr num_bits) {
   return tag;
 }
 
+// Generate a (pseudo-)random non-zero tag and prevent collisions to neighboring
+// objects.
+tag_t Thread::GenerateRandomNonCollidingTag(uptr prev_ptr, uptr foll_ptr,
+                                            uptr num_bits) {
+  DCHECK_GT(num_bits, 0);
+  if (tagging_disabled_)
+    return 0;
+  tag_t tag;
+  tag_t previous_tag = *(tag_t *)MemToShadow(prev_ptr);
+  tag_t following_tag = *(tag_t *)MemToShadow(foll_ptr);
+  const uptr tag_mask = (1ULL << num_bits) - 1;
+  do {
+    if (flags()->random_tags) {
+      if (!random_buffer_) {
+        EnsureRandomStateInited();
+        random_buffer_ = random_state_ = xorshift(random_state_);
+      }
+      CHECK(random_buffer_);
+      tag = random_buffer_ & tag_mask;
+      random_buffer_ >>= num_bits;
+    } else {
+      EnsureRandomStateInited();
+      random_state_ += 1;
+      tag = random_state_ & tag_mask;
+    }
+  } while (!tag || tag == previous_tag || tag == following_tag);
+  return tag;
+}
+
 void EnsureMainThreadIDIsCorrect() {
   auto *t = __hwasan::GetCurrentThread();
   if (t && (t->IsMainThread()))
diff --git a/compiler-rt/lib/hwasan/hwasan_thread.h b/compiler-rt/lib/hwasan/hwasan_thread.h
index 9e1b438e48f771b..53fc506b86c981c 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread.h
+++ b/compiler-rt/lib/hwasan/hwasan_thread.h
@@ -58,6 +58,8 @@ class Thread {
   StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
 
   tag_t GenerateRandomTag(uptr num_bits = kTagBits);
+  tag_t GenerateRandomNonCollidingTag(uptr prev_ptr, uptr foll_ptr,
+                                      uptr num_bits = kTagBits);
 
   void DisableTagging() { tagging_disabled_++; }
   void EnableTagging() { tagging_disabled_--; }
diff --git a/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c b/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c
new file mode 100644
index 000000000000000..6381c8fd1125f07
--- /dev/null
+++ b/compiler-rt/test/hwasan/TestCases/adjacent_tag_collisions_heap.c
@@ -0,0 +1,41 @@
+// Test that adjacent heap objects are always tagged differently to prevent unexpected under- and overflows.
+// RUN: %clang_hwasan %s -o %t
+// RUN: %env_hwasan_opts=random_tags=1,disable_allocator_tagging=0 %run %t
+
+#include <assert.h>
+#include <sanitizer/allocator_interface.h>
+#include <sanitizer/hwasan_interface.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static const size_t sizes[] = {16, 32, 64, 128, 256, 512, 1024, 2048};
+
+void check_collisions_on_heap(size_t size) {
+  // Allocate 3 heap objects, which should be placed next to each other
+  void *a = malloc(size);
+  void *b = malloc(size);
+  void *c = malloc(size);
+
+  // Confirm that no object can access adjacent objects
+  assert(__hwasan_test_shadow(a, size + 1) != -1);
+  assert(__hwasan_test_shadow(b, size + 1) != -1);
+  assert(__hwasan_test_shadow(c, size + 1) != -1);
+
+  // Confirm that freeing an object does not increase bounds of objects
+  free(b);
+  assert(__hwasan_test_shadow(a, size + 1) != -1);
+  assert(__hwasan_test_shadow(b, size + 1) != -1);
+  assert(__hwasan_test_shadow(c, size + 1) != -1);
+
+  free(a);
+  free(c);
+}
+
+int main() {
+  for (unsigned i = 0; i < sizeof(sizes) / sizeof(sizes[0]); i++) {
+    for (unsigned j = 0; j < 1000; j++) {
+      check_collisions_on_heap(sizes[i]);
+    }
+  }
+  return 0;
+}



More information about the llvm-commits mailing list