[compiler-rt] 22a251c - tsan: remove hacky call

Dmitry Vyukov via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 21 10:53:54 PST 2021


Author: Dmitry Vyukov
Date: 2021-12-21T19:53:49+01:00
New Revision: 22a251c3d0d31e25fd26a06de8ec1df30a5e1dc7

URL: https://github.com/llvm/llvm-project/commit/22a251c3d0d31e25fd26a06de8ec1df30a5e1dc7
DIFF: https://github.com/llvm/llvm-project/commit/22a251c3d0d31e25fd26a06de8ec1df30a5e1dc7.diff

LOG: tsan: remove hacky call

It's unused in the new tsan runtime.

Depends on D112603.

Reviewed By: vitalybuka, melver

Differential Revision: https://reviews.llvm.org/D112604

Added: 
    

Modified: 
    compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
    compiler-rt/lib/tsan/rtl/tsan_rtl.h
    compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index 17fba0c584a1..68c0ec7bd437 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -974,12 +974,6 @@ void TraceSwitchPartImpl(ThreadState* thr) {
           atomic_load_relaxed(&thr->trace_pos));
 }
 
-#if !SANITIZER_GO
-extern "C" void __tsan_trace_switch() {}
-
-extern "C" void __tsan_report_race() {}
-#endif
-
 void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
   DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
   thr->ignore_reads_and_writes++;

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index 013d6910de78..db8696485b14 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -563,27 +563,6 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
 void AfterSleep(ThreadState *thr, uptr pc);
 void IncrementEpoch(ThreadState *thr);
 
-// The hacky call uses custom calling convention and an assembly thunk.
-// It is considerably faster that a normal call for the caller
-// if it is not executed (it is intended for slow paths from hot functions).
-// The trick is that the call preserves all registers and the compiler
-// does not treat it as a call.
-// If it does not work for you, use normal call.
-#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
-// The caller may not create the stack frame for itself at all,
-// so we create a reserve stack frame for it (1024b must be enough).
-#define HACKY_CALL(f) \
-  __asm__ __volatile__("sub $1024, %%rsp;" \
-                       CFI_INL_ADJUST_CFA_OFFSET(1024) \
-                       ".hidden " #f "_thunk;" \
-                       "call " #f "_thunk;" \
-                       "add $1024, %%rsp;" \
-                       CFI_INL_ADJUST_CFA_OFFSET(-1024) \
-                       ::: "memory", "cc");
-#else
-#define HACKY_CALL(f) f()
-#endif
-
 #if !SANITIZER_GO
 uptr ALWAYS_INLINE HeapEnd() {
   return HeapMemEnd() + PrimaryAllocator::AdditionalSize();

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S b/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S
index 632b19d18158..f848be9dd46c 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S
@@ -9,242 +9,6 @@
 .section __TEXT,__text
 #endif
 
-ASM_HIDDEN(__tsan_trace_switch)
-.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
-ASM_SYMBOL(__tsan_trace_switch_thunk):
-  CFI_STARTPROC
-  _CET_ENDBR
-  # Save scratch registers.
-  push %rax
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rax, 0)
-  push %rcx
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rcx, 0)
-  push %rdx
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rdx, 0)
-  push %rsi
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rsi, 0)
-  push %rdi
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rdi, 0)
-  push %r8
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%r8, 0)
-  push %r9
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%r9, 0)
-  push %r10
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%r10, 0)
-  push %r11
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%r11, 0)
-  # All XMM registers are caller-saved.
-  sub $0x100, %rsp
-  CFI_ADJUST_CFA_OFFSET(0x100)
-  vmovdqu %xmm0, 0x0(%rsp)
-  vmovdqu %xmm1, 0x10(%rsp)
-  vmovdqu %xmm2, 0x20(%rsp)
-  vmovdqu %xmm3, 0x30(%rsp)
-  vmovdqu %xmm4, 0x40(%rsp)
-  vmovdqu %xmm5, 0x50(%rsp)
-  vmovdqu %xmm6, 0x60(%rsp)
-  vmovdqu %xmm7, 0x70(%rsp)
-  vmovdqu %xmm8, 0x80(%rsp)
-  vmovdqu %xmm9, 0x90(%rsp)
-  vmovdqu %xmm10, 0xa0(%rsp)
-  vmovdqu %xmm11, 0xb0(%rsp)
-  vmovdqu %xmm12, 0xc0(%rsp)
-  vmovdqu %xmm13, 0xd0(%rsp)
-  vmovdqu %xmm14, 0xe0(%rsp)
-  vmovdqu %xmm15, 0xf0(%rsp)
-  # Align stack frame.
-  push %rbx  # non-scratch
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rbx, 0)
-  mov %rsp, %rbx  # save current rsp
-  CFI_DEF_CFA_REGISTER(%rbx)
-  shr $4, %rsp  # clear 4 lsb, align to 16
-  shl $4, %rsp
-
-  call ASM_SYMBOL(__tsan_trace_switch)
-
-  # Unalign stack frame back.
-  mov %rbx, %rsp  # restore the original rsp
-  CFI_DEF_CFA_REGISTER(%rsp)
-  pop %rbx
-  CFI_ADJUST_CFA_OFFSET(-8)
-  # Restore scratch registers.
-  vmovdqu 0x0(%rsp), %xmm0
-  vmovdqu 0x10(%rsp), %xmm1
-  vmovdqu 0x20(%rsp), %xmm2
-  vmovdqu 0x30(%rsp), %xmm3
-  vmovdqu 0x40(%rsp), %xmm4
-  vmovdqu 0x50(%rsp), %xmm5
-  vmovdqu 0x60(%rsp), %xmm6
-  vmovdqu 0x70(%rsp), %xmm7
-  vmovdqu 0x80(%rsp), %xmm8
-  vmovdqu 0x90(%rsp), %xmm9
-  vmovdqu 0xa0(%rsp), %xmm10
-  vmovdqu 0xb0(%rsp), %xmm11
-  vmovdqu 0xc0(%rsp), %xmm12
-  vmovdqu 0xd0(%rsp), %xmm13
-  vmovdqu 0xe0(%rsp), %xmm14
-  vmovdqu 0xf0(%rsp), %xmm15
-  add $0x100, %rsp
-  CFI_ADJUST_CFA_OFFSET(-0x100)
-  pop %r11
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %r10
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %r9
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %r8
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rdi
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rsi
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rdx
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rcx
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rax
-  CFI_ADJUST_CFA_OFFSET(-8)
-  CFI_RESTORE(%rax)
-  CFI_RESTORE(%rbx)
-  CFI_RESTORE(%rcx)
-  CFI_RESTORE(%rdx)
-  CFI_RESTORE(%rsi)
-  CFI_RESTORE(%rdi)
-  CFI_RESTORE(%r8)
-  CFI_RESTORE(%r9)
-  CFI_RESTORE(%r10)
-  CFI_RESTORE(%r11)
-  ret
-  CFI_ENDPROC
-
-ASM_HIDDEN(__tsan_report_race)
-.globl ASM_SYMBOL(__tsan_report_race_thunk)
-ASM_SYMBOL(__tsan_report_race_thunk):
-  CFI_STARTPROC
-  _CET_ENDBR
-  # Save scratch registers.
-  push %rax
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rax, 0)
-  push %rcx
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rcx, 0)
-  push %rdx
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rdx, 0)
-  push %rsi
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rsi, 0)
-  push %rdi
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rdi, 0)
-  push %r8
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%r8, 0)
-  push %r9
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%r9, 0)
-  push %r10
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%r10, 0)
-  push %r11
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%r11, 0)
-  # All XMM registers are caller-saved.
-  sub $0x100, %rsp
-  CFI_ADJUST_CFA_OFFSET(0x100)
-  vmovdqu %xmm0, 0x0(%rsp)
-  vmovdqu %xmm1, 0x10(%rsp)
-  vmovdqu %xmm2, 0x20(%rsp)
-  vmovdqu %xmm3, 0x30(%rsp)
-  vmovdqu %xmm4, 0x40(%rsp)
-  vmovdqu %xmm5, 0x50(%rsp)
-  vmovdqu %xmm6, 0x60(%rsp)
-  vmovdqu %xmm7, 0x70(%rsp)
-  vmovdqu %xmm8, 0x80(%rsp)
-  vmovdqu %xmm9, 0x90(%rsp)
-  vmovdqu %xmm10, 0xa0(%rsp)
-  vmovdqu %xmm11, 0xb0(%rsp)
-  vmovdqu %xmm12, 0xc0(%rsp)
-  vmovdqu %xmm13, 0xd0(%rsp)
-  vmovdqu %xmm14, 0xe0(%rsp)
-  vmovdqu %xmm15, 0xf0(%rsp)
-  # Align stack frame.
-  push %rbx  # non-scratch
-  CFI_ADJUST_CFA_OFFSET(8)
-  CFI_REL_OFFSET(%rbx, 0)
-  mov %rsp, %rbx  # save current rsp
-  CFI_DEF_CFA_REGISTER(%rbx)
-  shr $4, %rsp  # clear 4 lsb, align to 16
-  shl $4, %rsp
-
-  call ASM_SYMBOL(__tsan_report_race)
-
-  # Unalign stack frame back.
-  mov %rbx, %rsp  # restore the original rsp
-  CFI_DEF_CFA_REGISTER(%rsp)
-  pop %rbx
-  CFI_ADJUST_CFA_OFFSET(-8)
-  # Restore scratch registers.
-  vmovdqu 0x0(%rsp), %xmm0
-  vmovdqu 0x10(%rsp), %xmm1
-  vmovdqu 0x20(%rsp), %xmm2
-  vmovdqu 0x30(%rsp), %xmm3
-  vmovdqu 0x40(%rsp), %xmm4
-  vmovdqu 0x50(%rsp), %xmm5
-  vmovdqu 0x60(%rsp), %xmm6
-  vmovdqu 0x70(%rsp), %xmm7
-  vmovdqu 0x80(%rsp), %xmm8
-  vmovdqu 0x90(%rsp), %xmm9
-  vmovdqu 0xa0(%rsp), %xmm10
-  vmovdqu 0xb0(%rsp), %xmm11
-  vmovdqu 0xc0(%rsp), %xmm12
-  vmovdqu 0xd0(%rsp), %xmm13
-  vmovdqu 0xe0(%rsp), %xmm14
-  vmovdqu 0xf0(%rsp), %xmm15
-  add $0x100, %rsp
-  CFI_ADJUST_CFA_OFFSET(-0x100)
-  pop %r11
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %r10
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %r9
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %r8
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rdi
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rsi
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rdx
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rcx
-  CFI_ADJUST_CFA_OFFSET(-8)
-  pop %rax
-  CFI_ADJUST_CFA_OFFSET(-8)
-  CFI_RESTORE(%rax)
-  CFI_RESTORE(%rbx)
-  CFI_RESTORE(%rcx)
-  CFI_RESTORE(%rdx)
-  CFI_RESTORE(%rsi)
-  CFI_RESTORE(%rdi)
-  CFI_RESTORE(%r8)
-  CFI_RESTORE(%r9)
-  CFI_RESTORE(%r10)
-  CFI_RESTORE(%r11)
-  ret
-  CFI_ENDPROC
-
 ASM_HIDDEN(__tsan_setjmp)
 #if defined(__NetBSD__)
 .comm _ZN14__interception15real___setjmp14E,8,8


        


More information about the llvm-commits mailing list