[compiler-rt] r344329 - [TSan] Cleanup TSan runtime support for Go on linux-aarch64. NFC.
Arnaud A. de Grandmaison via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 12 03:01:10 PDT 2018
Author: aadg
Date: Fri Oct 12 03:01:09 2018
New Revision: 344329
URL: http://llvm.org/viewvc/llvm-project?rev=344329&view=rev
Log:
[TSan] Cleanup TSan runtime support for Go on linux-aarch64. NFC.
This is a follow-up patch to r342541. After further investigations, only
48bits VMA size can be supported. As this is enforced in function
InitializePlatformEarly from lib/rt1/tsan_platform_linux.cc, the access
to the global variable vmaSize variable + switch can be removed. This
also addresses a comment from https://reviews.llvm.org/D52167.
vmaSize of 39 or 42bits are not compatible with a Go program memory
layout as the Go heap will not fit in the shadow memory area.
Patch by: Fangming Fang <Fangming.Fang at arm.com>
Modified:
compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h
Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h?rev=344329&r1=344328&r2=344329&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h Fri Oct 12 03:01:09 2018
@@ -473,7 +473,7 @@ struct Mapping47 {
6200 0000 0000 - 8000 0000 0000: -
*/
-struct Mapping48 {
+struct Mapping {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x400000000000ull;
static const uptr kTraceMemBeg = 0x600000000000ull;
@@ -549,12 +549,10 @@ uptr MappingImpl(void) {
template<int Type>
uptr MappingArchImpl(void) {
-#if defined(__aarch64__) && !defined(__APPLE__)
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
switch (vmaSize) {
-#if !SANITIZER_GO
case 39: return MappingImpl<Mapping39, Type>();
case 42: return MappingImpl<Mapping42, Type>();
-#endif
case 48: return MappingImpl<Mapping48, Type>();
}
DCHECK(0);
@@ -708,12 +706,10 @@ bool IsAppMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__)
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
switch (vmaSize) {
-#if !SANITIZER_GO
case 39: return IsAppMemImpl<Mapping39>(mem);
case 42: return IsAppMemImpl<Mapping42>(mem);
-#endif
case 48: return IsAppMemImpl<Mapping48>(mem);
}
DCHECK(0);
@@ -741,12 +737,10 @@ bool IsShadowMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__)
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
switch (vmaSize) {
-#if !SANITIZER_GO
case 39: return IsShadowMemImpl<Mapping39>(mem);
case 42: return IsShadowMemImpl<Mapping42>(mem);
-#endif
case 48: return IsShadowMemImpl<Mapping48>(mem);
}
DCHECK(0);
@@ -774,12 +768,10 @@ bool IsMetaMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__)
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
switch (vmaSize) {
-#if !SANITIZER_GO
case 39: return IsMetaMemImpl<Mapping39>(mem);
case 42: return IsMetaMemImpl<Mapping42>(mem);
-#endif
case 48: return IsMetaMemImpl<Mapping48>(mem);
}
DCHECK(0);
@@ -817,12 +809,10 @@ uptr MemToShadowImpl(uptr x) {
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
-#if defined(__aarch64__) && !defined(__APPLE__)
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
switch (vmaSize) {
-#if !SANITIZER_GO
case 39: return MemToShadowImpl<Mapping39>(x);
case 42: return MemToShadowImpl<Mapping42>(x);
-#endif
case 48: return MemToShadowImpl<Mapping48>(x);
}
DCHECK(0);
@@ -862,12 +852,10 @@ u32 *MemToMetaImpl(uptr x) {
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
-#if defined(__aarch64__) && !defined(__APPLE__)
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
switch (vmaSize) {
-#if !SANITIZER_GO
case 39: return MemToMetaImpl<Mapping39>(x);
case 42: return MemToMetaImpl<Mapping42>(x);
-#endif
case 48: return MemToMetaImpl<Mapping48>(x);
}
DCHECK(0);
@@ -920,12 +908,10 @@ uptr ShadowToMemImpl(uptr s) {
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
-#if defined(__aarch64__) && !defined(__APPLE__)
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
switch (vmaSize) {
-#if !SANITIZER_GO
case 39: return ShadowToMemImpl<Mapping39>(s);
case 42: return ShadowToMemImpl<Mapping42>(s);
-#endif
case 48: return ShadowToMemImpl<Mapping48>(s);
}
DCHECK(0);
@@ -961,12 +947,10 @@ uptr GetThreadTraceImpl(int tid) {
ALWAYS_INLINE
uptr GetThreadTrace(int tid) {
-#if defined(__aarch64__) && !defined(__APPLE__)
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
switch (vmaSize) {
-#if !SANITIZER_GO
case 39: return GetThreadTraceImpl<Mapping39>(tid);
case 42: return GetThreadTraceImpl<Mapping42>(tid);
-#endif
case 48: return GetThreadTraceImpl<Mapping48>(tid);
}
DCHECK(0);
@@ -997,12 +981,10 @@ uptr GetThreadTraceHeaderImpl(int tid) {
ALWAYS_INLINE
uptr GetThreadTraceHeader(int tid) {
-#if defined(__aarch64__) && !defined(__APPLE__)
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
switch (vmaSize) {
-#if !SANITIZER_GO
case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
-#endif
case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
}
DCHECK(0);
More information about the llvm-commits
mailing list