[compiler-rt] 0aafeb8 - Reland [TSan] Clarify and enforce shadow end alignment (#146676)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 2 05:28:34 PDT 2025
Author: Kunqiu Chen
Date: 2025-07-02T20:28:30+08:00
New Revision: 0aafeb8ba1b1adcf04d5d82553228ca82845fcb0
URL: https://github.com/llvm/llvm-project/commit/0aafeb8ba1b1adcf04d5d82553228ca82845fcb0
DIFF: https://github.com/llvm/llvm-project/commit/0aafeb8ba1b1adcf04d5d82553228ca82845fcb0.diff
LOG: Reland [TSan] Clarify and enforce shadow end alignment (#146676)
#144648 was reverted because it failed the new sanitizer test
`munmap_clear_shadow.c` in IOS's CI.
That issue could be fixed by disabling the test on some platforms, due
to the incompatibility of the test on these platforms.
In detail, we should disable the test in FreeBSD, Apple, NetBSD,
Solaris, and Haiku, where `ReleaseMemoryPagesToOS` executes
`madvise(beg, end, MADV_FREE)`, which tags the relevant pages as 'FREE'
and does not release them immediately.
Added:
Modified:
compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
compiler-rt/lib/tsan/rtl/tsan_sync.cpp
compiler-rt/test/tsan/java_heap_init2.cpp
compiler-rt/test/tsan/munmap_clear_shadow.c
Removed:
################################################################################
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
index 7c15a16388268..cb4d767d903d3 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
@@ -122,7 +122,6 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) {
DCHECK_GE(dst, jctx->heap_begin);
DCHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
DCHECK_NE(dst, src);
- DCHECK_NE(size, 0);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index 981f37b89e784..0d7247a56a4c2 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -566,17 +566,32 @@ static bool IsValidMmapRange(uptr addr, uptr size) {
return false;
}
-void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
+void UnmapShadow(ThreadState* thr, uptr addr, uptr size) {
if (size == 0 || !IsValidMmapRange(addr, size))
return;
- DontNeedShadowFor(addr, size);
+ // unmap shadow is related to semantic of mmap/munmap, so we
+ // should clear the whole shadow range, including the tail shadow
+ // while addr + size % kShadowCell != 0.
+ uptr rounded_size_shadow = RoundUp(addr + size, kShadowCell) - addr;
+ DontNeedShadowFor(addr, rounded_size_shadow);
ScopedGlobalProcessor sgp;
SlotLocker locker(thr, true);
- ctx->metamap.ResetRange(thr->proc(), addr, size, true);
+ uptr rounded_size_meta = RoundUp(addr + size, kMetaShadowCell) - addr;
+ ctx->metamap.ResetRange(thr->proc(), addr, rounded_size_meta, true);
}
#endif
void MapShadow(uptr addr, uptr size) {
+ // Although named MapShadow, this function's semantic is unrelated to
+ // UnmapShadow. This function currently only used for Go's lazy allocation
+ // of shadow, whose targets are program section (e.g., bss, data, etc.).
+ // Therefore, we can guarantee that the addr and size align to kShadowCell
+ // and kMetaShadowCell by the following assertions.
+ DCHECK_EQ(addr % kShadowCell, 0);
+ DCHECK_EQ(size % kShadowCell, 0);
+ DCHECK_EQ(addr % kMetaShadowCell, 0);
+ DCHECK_EQ(size % kMetaShadowCell, 0);
+
// Ensure thead registry lock held, so as to synchronize
// with DoReset, which also access the mapped_shadow_* ctxt fields.
ThreadRegistryLock lock0(&ctx->thread_registry);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
index bd8deefefa1bc..487fa490636eb 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
@@ -688,16 +688,18 @@ void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) {
DCHECK(IsShadowMem(shadow_mem));
}
- RawShadow* shadow_mem_end = reinterpret_cast<RawShadow*>(
- reinterpret_cast<uptr>(shadow_mem) + size * kShadowMultiplier - 1);
- if (!IsShadowMem(shadow_mem_end)) {
- Printf("Bad shadow end addr: %p (%p)\n", shadow_mem_end,
+ uptr rounded_size =
+ (RoundUpTo(addr + size, kShadowCell) - RoundDownTo(addr, kShadowCell));
+ RawShadow* shadow_mem_end =
+ shadow_mem + rounded_size / kShadowCell * kShadowCnt;
+ if (!IsShadowMem(shadow_mem_end - 1)) {
+ Printf("Bad shadow end addr: %p (%p)\n", shadow_mem_end - 1,
(void*)(addr + size - 1));
Printf(
- "Shadow start addr (ok): %p (%p); size: 0x%zx; kShadowMultiplier: "
- "%zx\n",
- shadow_mem, (void*)addr, size, kShadowMultiplier);
- DCHECK(IsShadowMem(shadow_mem_end));
+ "Shadow start addr (ok): %p (%p); size: 0x%zx; rounded_size: 0x%zx; "
+ "kShadowMultiplier: %zx\n",
+ shadow_mem, (void*)addr, size, rounded_size, kShadowMultiplier);
+ DCHECK(IsShadowMem(shadow_mem_end - 1));
}
#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
index 97335bc8ecf71..be5829bc823dc 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
@@ -246,6 +246,20 @@ void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
// there are no concurrent accesses to the regions (e.g. stop-the-world).
CHECK_NE(src, dst);
CHECK_NE(sz, 0);
+
+ // The current MoveMemory implementation behaves incorrectly when src, dst,
+ // and sz are not aligned to kMetaShadowCell.
+ // For example, with kMetaShadowCell == 8:
+ // - src = 4: unexpectedly clears the metadata for the range [0, 4).
+ // - src = 16, dst = 4, size = 8: A sync variable for addr = 20, which should
+ // be moved to the metadata for address 8, is incorrectly moved to the
+ // metadata for address 0 instead.
+ // - src = 0, sz = 4: fails to move the tail metadata.
+ // Therefore, the following assertions is needed.
+ DCHECK_EQ(src % kMetaShadowCell, 0);
+ DCHECK_EQ(dst % kMetaShadowCell, 0);
+ DCHECK_EQ(sz % kMetaShadowCell, 0);
+
uptr
diff = dst - src;
u32 *src_meta, *dst_meta, *src_meta_end;
uptr inc;
diff --git a/compiler-rt/test/tsan/java_heap_init2.cpp b/compiler-rt/test/tsan/java_heap_init2.cpp
index 2e5724d930e8f..3317a5b3c2f43 100644
--- a/compiler-rt/test/tsan/java_heap_init2.cpp
+++ b/compiler-rt/test/tsan/java_heap_init2.cpp
@@ -1,5 +1,4 @@
// RUN: %clangxx_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s
-// XFAIL: *
#include "java.h"
#include <errno.h>
diff --git a/compiler-rt/test/tsan/munmap_clear_shadow.c b/compiler-rt/test/tsan/munmap_clear_shadow.c
index 8a435a84258f5..1fbc77ec8fd14 100644
--- a/compiler-rt/test/tsan/munmap_clear_shadow.c
+++ b/compiler-rt/test/tsan/munmap_clear_shadow.c
@@ -1,5 +1,9 @@
// RUN: %clang_tsan %s -o %t && %run %t | FileCheck %s
-// XFAIL: *
+
+// In these systems, the behavior of ReleaseMemoryPagesToOS is madvise(beg, end, MADV_FREE),
+// which tags the relevant pages as 'FREE' and does not release them immediately.
+// Therefore, we cannot assume that __tsan_read1 will not race with the shadow cleared.
+// UNSUPPORTED: darwin,target={{.*(freebsd|netbsd|solaris|haiku).*}}
#include "test.h"
#include <assert.h>
More information about the llvm-commits
mailing list