[compiler-rt] 58c62fd - [sanitizer] Improve accuracy of GetTls on x86/s390

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 29 22:14:37 PDT 2021


Author: Fangrui Song
Date: 2021-03-29T22:14:29-07:00
New Revision: 58c62fd9768594ec8dd57e8320ba2396bf8b87e5

URL: https://github.com/llvm/llvm-project/commit/58c62fd9768594ec8dd57e8320ba2396bf8b87e5
DIFF: https://github.com/llvm/llvm-project/commit/58c62fd9768594ec8dd57e8320ba2396bf8b87e5.diff

LOG: [sanitizer] Improve accuracy of GetTls on x86/s390

The previous code may underestimate the static TLS surplus part, which may cause
false positives to LeakSanitizer if a dynamically loaded module uses the surplus
and there is an allocation only referenced by a thread's TLS.

Added: 
    

Modified: 
    compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index 18441e4ab1a0e..bd8f5d330b30c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -304,7 +304,7 @@ static int CollectStaticTlsRanges(struct dl_phdr_info *info, size_t size,
   return 0;
 }
 
-static void GetStaticTlsRange(uptr *addr, uptr *size) {
+static void GetStaticTlsRange(uptr *addr, uptr *size, uptr *align) {
   InternalMmapVector<TlsRange> ranges;
   dl_iterate_phdr(CollectStaticTlsRanges, &ranges);
   uptr len = ranges.size();
@@ -318,17 +318,19 @@ static void GetStaticTlsRange(uptr *addr, uptr *size) {
     // This may happen with musl if no module uses PT_TLS.
     *addr = 0;
     *size = 0;
+    *align = 1;
     return;
   }
   // Find the maximum consecutive ranges. We consider two modules consecutive if
   // the gap is smaller than the alignment. The dynamic loader places static TLS
   // blocks this way not to waste space.
   uptr l = one;
+  *align = ranges[l].align;
   while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align)
-    --l;
+    *align = Max(*align, ranges[--l].align);
   uptr r = one + 1;
   while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align)
-    ++r;
+    *align = Max(*align, ranges[r++].align);
   *addr = ranges[l].begin;
   *size = ranges[r - 1].end - ranges[l].begin;
 }
@@ -406,21 +408,31 @@ static void GetTls(uptr *addr, uptr *size) {
     *size = 0;
   }
 #elif SANITIZER_LINUX
-  GetStaticTlsRange(addr, size);
+  uptr align;
+  GetStaticTlsRange(addr, size, &align);
 #if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
+  if (SANITIZER_GLIBC) {
+#if defined(__s390__)
+    align = Max<uptr>(align, 16);
+#else
+    align = Max<uptr>(align, 64);
+#endif
+  }
+  const uptr tp = RoundUpTo(*addr + *size, align);
+
   // lsan requires the range to additionally cover the static TLS surplus
   // (elf/dl-tls.c defines 1664). Otherwise there may be false positives for
   // allocations only referenced by tls in dynamically loaded modules.
-  if (SANITIZER_GLIBC) {
-    *addr -= 1664;
-    *size += 1664;
-  }
+  if (SANITIZER_GLIBC)
+    *size += 1644;
+
   // Extend the range to include the thread control block. On glibc, lsan needs
   // the range to include pthread::{specific_1stblock,specific} so that
   // allocations only referenced by pthread_setspecific can be scanned. This may
   // underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine
   // because the number of bytes after pthread::specific is larger.
-  *size += ThreadDescriptorSize();
+  *addr = tp - RoundUpTo(*size, align);
+  *size = tp - *addr + ThreadDescriptorSize();
 #else
   if (SANITIZER_GLIBC)
     *size += 1664;


        


More information about the llvm-commits mailing list