[libcxxabi] r193840 - Swap contents of UnwindRegistersRestore.s and UnwindRegistersSave.s

Nick Kledzik kledzik at apple.com
Thu Oct 31 17:37:20 PDT 2013


Author: kledzik
Date: Thu Oct 31 19:37:20 2013
New Revision: 193840

URL: http://llvm.org/viewvc/llvm-project?rev=193840&view=rev
Log:
Swap contents of UnwindRegistersRestore.s and UnwindRegistersSave.s

Modified:
    libcxxabi/trunk/src/Unwind/UnwindRegistersRestore.s
    libcxxabi/trunk/src/Unwind/UnwindRegistersSave.s

Modified: libcxxabi/trunk/src/Unwind/UnwindRegistersRestore.s
URL: http://llvm.org/viewvc/llvm-project/libcxxabi/trunk/src/Unwind/UnwindRegistersRestore.s?rev=193840&r1=193839&r2=193840&view=diff
==============================================================================
--- libcxxabi/trunk/src/Unwind/UnwindRegistersRestore.s (original)
+++ libcxxabi/trunk/src/Unwind/UnwindRegistersRestore.s Thu Oct 31 19:37:20 2013
@@ -8,279 +8,316 @@
 //===----------------------------------------------------------------------===//
 
 
-    .text
-
 #if __i386__
-
+  .text
+  .globl __ZN9libunwind13Registers_x866jumptoEv
+  .private_extern __ZN9libunwind13Registers_x866jumptoEv
+__ZN9libunwind13Registers_x866jumptoEv:
 #
-# extern int unw_getcontext(unw_context_t* thread_state)
+# void libunwind::Registers_x86::jumpto()
 #
 # On entry:
-#   +                       +
-#   +-----------------------+
-#   + thread_state pointer  +
-#   +-----------------------+
-#   + return address        +
-#   +-----------------------+   <-- SP
-#   +                       +
-#
-  .globl _unw_getcontext
-_unw_getcontext:
-  push  %eax
-  movl  8(%esp), %eax
-  movl  %ebx,  4(%eax)
-  movl  %ecx,  8(%eax)
-  movl  %edx, 12(%eax)
-  movl  %edi, 16(%eax)
-  movl  %esi, 20(%eax)
-  movl  %ebp, 24(%eax)
-  movl  %esp, %edx
-  addl  $8, %edx
-  movl  %edx, 28(%eax)  # store what sp was at call site as esp
+#  +                       +
+#  +-----------------------+
+#  + thread_state pointer  +
+#  +-----------------------+
+#  + return address        +
+#  +-----------------------+   <-- SP
+#  +                       +
+  movl   4(%esp), %eax
+  # set up eax and ret on new stack location
+  movl  28(%eax), %edx # edx holds new stack pointer
+  subl  $8,%edx
+  movl  %edx, 28(%eax)
+  movl  0(%eax), %ebx
+  movl  %ebx, 0(%edx)
+  movl  40(%eax), %ebx
+  movl  %ebx, 4(%edx)
+  # we now have ret and eax pushed onto where new stack will be
+  # restore all registers
+  movl   4(%eax), %ebx
+  movl   8(%eax), %ecx
+  movl  12(%eax), %edx
+  movl  16(%eax), %edi
+  movl  20(%eax), %esi
+  movl  24(%eax), %ebp
+  movl  28(%eax), %esp
   # skip ss
   # skip eflags
-  movl  4(%esp), %edx
-  movl  %edx, 40(%eax)  # store return address as eip
+  pop    %eax  # eax was already pushed on new stack
+  ret        # eip was already pushed on new stack
   # skip cs
   # skip ds
   # skip es
   # skip fs
   # skip gs
-  movl  (%esp), %edx
-  movl  %edx, (%eax)  # store original eax
-  popl  %eax
-  xorl  %eax, %eax    # return UNW_ESUCCESS
-  ret
 
 #elif __x86_64__
 
+  .text
+  .globl __ZN9libunwind16Registers_x86_646jumptoEv
+  .private_extern __ZN9libunwind16Registers_x86_646jumptoEv
+__ZN9libunwind16Registers_x86_646jumptoEv:
 #
-# extern int unw_getcontext(unw_context_t* thread_state)
+# void libunwind::Registers_x86_64::jumpto()
 #
-# On entry:
-#  thread_state pointer is in rdi
-#
-  .globl _unw_getcontext
-_unw_getcontext:
-  movq  %rax,   (%rdi)
-  movq  %rbx,  8(%rdi)
-  movq  %rcx, 16(%rdi)
-  movq  %rdx, 24(%rdi)
-  movq  %rdi, 32(%rdi)
-  movq  %rsi, 40(%rdi)
-  movq  %rbp, 48(%rdi)
-  movq  %rsp, 56(%rdi)
-  addq  $8,   56(%rdi)
-  movq  %r8,  64(%rdi)
-  movq  %r9,  72(%rdi)
-  movq  %r10, 80(%rdi)
-  movq  %r11, 88(%rdi)
-  movq  %r12, 96(%rdi)
-  movq  %r13,104(%rdi)
-  movq  %r14,112(%rdi)
-  movq  %r15,120(%rdi)
-  movq  (%rsp),%rsi
-  movq  %rsi,128(%rdi) # store return address as rip
+# On entry, thread_state pointer is in rdi
+
+  movq  56(%rdi), %rax # rax holds new stack pointer
+  subq  $16, %rax
+  movq  %rax, 56(%rdi)
+  movq  32(%rdi), %rbx  # store new rdi on new stack
+  movq  %rbx, 0(%rax)
+  movq  128(%rdi), %rbx # store new rip on new stack
+  movq  %rbx, 8(%rax)
+  # restore all registers
+  movq    0(%rdi), %rax
+  movq    8(%rdi), %rbx
+  movq   16(%rdi), %rcx
+  movq   24(%rdi), %rdx
+  # restore rdi later
+  movq   40(%rdi), %rsi
+  movq   48(%rdi), %rbp
+  # restore rsp later
+  movq   64(%rdi), %r8
+  movq   72(%rdi), %r9
+  movq   80(%rdi), %r10
+  movq   88(%rdi), %r11
+  movq   96(%rdi), %r12
+  movq  104(%rdi), %r13
+  movq  112(%rdi), %r14
+  movq  120(%rdi), %r15
   # skip rflags
   # skip cs
   # skip fs
   # skip gs
-  xorl  %eax, %eax    # return UNW_ESUCCESS
-  ret
+  movq  56(%rdi), %rsp  # cut back rsp to new location
+  pop    %rdi      # rdi was saved here earlier
+  ret            # rip was saved here
+
 
 #elif __ppc__
 
+  .text
+  .globl __ZN9libunwind13Registers_ppc6jumptoEv
+  .private_extern __ZN9libunwind13Registers_ppc6jumptoEv
+__ZN9libunwind13Registers_ppc6jumptoEv:
 ;
-; extern int unw_getcontext(unw_context_t* thread_state)
+; void libunwind::Registers_ppc::jumpto()
 ;
 ; On entry:
 ;  thread_state pointer is in r3
 ;
-  .globl _unw_getcontext
-_unw_getcontext:
-  stw    r0,  8(r3)
-  mflr  r0
-  stw    r0,  0(r3)  ; store lr as ssr0
-  stw    r1, 12(r3)
-  stw    r2, 16(r3)
-  stw    r3, 20(r3)
-  stw    r4, 24(r3)
-  stw    r5, 28(r3)
-  stw    r6, 32(r3)
-  stw    r7, 36(r3)
-  stw    r8, 40(r3)
-  stw    r9, 44(r3)
-  stw     r10, 48(r3)
-  stw     r11, 52(r3)
-  stw     r12, 56(r3)
-  stw     r13, 60(r3)
-  stw     r14, 64(r3)
-  stw     r15, 68(r3)
-  stw     r16, 72(r3)
-  stw     r17, 76(r3)
-  stw     r18, 80(r3)
-  stw     r19, 84(r3)
-  stw     r20, 88(r3)
-  stw     r21, 92(r3)
-  stw     r22, 96(r3)
-  stw     r23,100(r3)
-  stw     r24,104(r3)
-  stw     r25,108(r3)
-  stw     r26,112(r3)
-  stw     r27,116(r3)
-  stw     r28,120(r3)
-  stw     r29,124(r3)
-  stw     r30,128(r3)
-  stw     r31,132(r3)
-
-  ; save VRSave register
-  mfspr  r0,256
-  stw    r0,156(r3)
-  ; save CR registers
-  mfcr  r0
-  stw    r0,136(r3)
-  ; save CTR register
-  mfctr  r0
-  stw    r0,148(r3)
-
-  ; save float registers
-  stfd    f0, 160(r3)
-  stfd    f1, 168(r3)
-  stfd    f2, 176(r3)
-  stfd    f3, 184(r3)
-  stfd    f4, 192(r3)
-  stfd    f5, 200(r3)
-  stfd    f6, 208(r3)
-  stfd    f7, 216(r3)
-  stfd    f8, 224(r3)
-  stfd    f9, 232(r3)
-  stfd    f10,240(r3)
-  stfd    f11,248(r3)
-  stfd    f12,256(r3)
-  stfd    f13,264(r3)
-  stfd    f14,272(r3)
-  stfd    f15,280(r3)
-  stfd    f16,288(r3)
-  stfd    f17,296(r3)
-  stfd    f18,304(r3)
-  stfd    f19,312(r3)
-  stfd    f20,320(r3)
-  stfd    f21,328(r3)
-  stfd    f22,336(r3)
-  stfd    f23,344(r3)
-  stfd    f24,352(r3)
-  stfd    f25,360(r3)
-  stfd    f26,368(r3)
-  stfd    f27,376(r3)
-  stfd    f28,384(r3)
-  stfd    f29,392(r3)
-  stfd    f30,400(r3)
-  stfd    f31,408(r3)
 
-
-  ; save vector registers
+  ; restore integral registerrs
+  ; skip r0 for now
+  ; skip r1 for now
+  lwz     r2, 16(r3)
+  ; skip r3 for now
+  ; skip r4 for now
+  ; skip r5 for now
+  lwz     r6, 32(r3)
+  lwz     r7, 36(r3)
+  lwz     r8, 40(r3)
+  lwz     r9, 44(r3)
+  lwz    r10, 48(r3)
+  lwz    r11, 52(r3)
+  lwz    r12, 56(r3)
+  lwz    r13, 60(r3)
+  lwz    r14, 64(r3)
+  lwz    r15, 68(r3)
+  lwz    r16, 72(r3)
+  lwz    r17, 76(r3)
+  lwz    r18, 80(r3)
+  lwz    r19, 84(r3)
+  lwz    r20, 88(r3)
+  lwz    r21, 92(r3)
+  lwz    r22, 96(r3)
+  lwz    r23,100(r3)
+  lwz    r24,104(r3)
+  lwz    r25,108(r3)
+  lwz    r26,112(r3)
+  lwz    r27,116(r3)
+  lwz    r28,120(r3)
+  lwz    r29,124(r3)
+  lwz    r30,128(r3)
+  lwz    r31,132(r3)
+
+  ; restore float registers
+  lfd    f0, 160(r3)
+  lfd    f1, 168(r3)
+  lfd    f2, 176(r3)
+  lfd    f3, 184(r3)
+  lfd    f4, 192(r3)
+  lfd    f5, 200(r3)
+  lfd    f6, 208(r3)
+  lfd    f7, 216(r3)
+  lfd    f8, 224(r3)
+  lfd    f9, 232(r3)
+  lfd    f10,240(r3)
+  lfd    f11,248(r3)
+  lfd    f12,256(r3)
+  lfd    f13,264(r3)
+  lfd    f14,272(r3)
+  lfd    f15,280(r3)
+  lfd    f16,288(r3)
+  lfd    f17,296(r3)
+  lfd    f18,304(r3)
+  lfd    f19,312(r3)
+  lfd    f20,320(r3)
+  lfd    f21,328(r3)
+  lfd    f22,336(r3)
+  lfd    f23,344(r3)
+  lfd    f24,352(r3)
+  lfd    f25,360(r3)
+  lfd    f26,368(r3)
+  lfd    f27,376(r3)
+  lfd    f28,384(r3)
+  lfd    f29,392(r3)
+  lfd    f30,400(r3)
+  lfd    f31,408(r3)
+
+  ; restore vector registers if any are in use
+  lwz    r5,156(r3)  ; test VRsave
+  cmpwi  r5,0
+  beq    Lnovec
 
   subi  r4,r1,16
   rlwinm  r4,r4,0,0,27  ; mask low 4-bits
   ; r4 is now a 16-byte aligned pointer into the red zone
+  ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
 
-#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
-  stvx  _vec,0,r4           @\
-  lwz    r5, 0(r4)          @\
-  stw    r5, _offset(r3)    @\
-  lwz    r5, 4(r4)          @\
-  stw    r5, _offset+4(r3)  @\
-  lwz    r5, 8(r4)          @\
-  stw    r5, _offset+8(r3)  @\
-  lwz    r5, 12(r4)         @\
-  stw    r5, _offset+12(r3)
-
-  SAVE_VECTOR_UNALIGNED( v0, 424+0x000)
-  SAVE_VECTOR_UNALIGNED( v1, 424+0x010)
-  SAVE_VECTOR_UNALIGNED( v2, 424+0x020)
-  SAVE_VECTOR_UNALIGNED( v3, 424+0x030)
-  SAVE_VECTOR_UNALIGNED( v4, 424+0x040)
-  SAVE_VECTOR_UNALIGNED( v5, 424+0x050)
-  SAVE_VECTOR_UNALIGNED( v6, 424+0x060)
-  SAVE_VECTOR_UNALIGNED( v7, 424+0x070)
-  SAVE_VECTOR_UNALIGNED( v8, 424+0x080)
-  SAVE_VECTOR_UNALIGNED( v9, 424+0x090)
-  SAVE_VECTOR_UNALIGNED(v10, 424+0x0A0)
-  SAVE_VECTOR_UNALIGNED(v11, 424+0x0B0)
-  SAVE_VECTOR_UNALIGNED(v12, 424+0x0C0)
-  SAVE_VECTOR_UNALIGNED(v13, 424+0x0D0)
-  SAVE_VECTOR_UNALIGNED(v14, 424+0x0E0)
-  SAVE_VECTOR_UNALIGNED(v15, 424+0x0F0)
-  SAVE_VECTOR_UNALIGNED(v16, 424+0x100)
-  SAVE_VECTOR_UNALIGNED(v17, 424+0x110)
-  SAVE_VECTOR_UNALIGNED(v18, 424+0x120)
-  SAVE_VECTOR_UNALIGNED(v19, 424+0x130)
-  SAVE_VECTOR_UNALIGNED(v20, 424+0x140)
-  SAVE_VECTOR_UNALIGNED(v21, 424+0x150)
-  SAVE_VECTOR_UNALIGNED(v22, 424+0x160)
-  SAVE_VECTOR_UNALIGNED(v23, 424+0x170)
-  SAVE_VECTOR_UNALIGNED(v24, 424+0x180)
-  SAVE_VECTOR_UNALIGNED(v25, 424+0x190)
-  SAVE_VECTOR_UNALIGNED(v26, 424+0x1A0)
-  SAVE_VECTOR_UNALIGNED(v27, 424+0x1B0)
-  SAVE_VECTOR_UNALIGNED(v28, 424+0x1C0)
-  SAVE_VECTOR_UNALIGNED(v29, 424+0x1D0)
-  SAVE_VECTOR_UNALIGNED(v30, 424+0x1E0)
-  SAVE_VECTOR_UNALIGNED(v31, 424+0x1F0)
-
-  li  r3, 0    ; return UNW_ESUCCESS
-  blr
 
+#define LOAD_VECTOR_UNALIGNEDl(_index) \
+  andis.  r0,r5,(1<<(15-_index))  @\
+  beq    Ldone  ## _index     @\
+  lwz    r0, 424+_index*16(r3)  @\
+  stw    r0, 0(r4)        @\
+  lwz    r0, 424+_index*16+4(r3)  @\
+  stw    r0, 4(r4)        @\
+  lwz    r0, 424+_index*16+8(r3)  @\
+  stw    r0, 8(r4)        @\
+  lwz    r0, 424+_index*16+12(r3)@\
+  stw    r0, 12(r4)        @\
+  lvx    v ## _index,0,r4    @\
+Ldone  ## _index:
+
+#define LOAD_VECTOR_UNALIGNEDh(_index) \
+  andi.  r0,r5,(1<<(31-_index))  @\
+  beq    Ldone  ## _index    @\
+  lwz    r0, 424+_index*16(r3)  @\
+  stw    r0, 0(r4)        @\
+  lwz    r0, 424+_index*16+4(r3)  @\
+  stw    r0, 4(r4)        @\
+  lwz    r0, 424+_index*16+8(r3)  @\
+  stw    r0, 8(r4)        @\
+  lwz    r0, 424+_index*16+12(r3)@\
+  stw    r0, 12(r4)        @\
+  lvx    v ## _index,0,r4    @\
+  Ldone  ## _index:
+
+
+  LOAD_VECTOR_UNALIGNEDl(0)
+  LOAD_VECTOR_UNALIGNEDl(1)
+  LOAD_VECTOR_UNALIGNEDl(2)
+  LOAD_VECTOR_UNALIGNEDl(3)
+  LOAD_VECTOR_UNALIGNEDl(4)
+  LOAD_VECTOR_UNALIGNEDl(5)
+  LOAD_VECTOR_UNALIGNEDl(6)
+  LOAD_VECTOR_UNALIGNEDl(7)
+  LOAD_VECTOR_UNALIGNEDl(8)
+  LOAD_VECTOR_UNALIGNEDl(9)
+  LOAD_VECTOR_UNALIGNEDl(10)
+  LOAD_VECTOR_UNALIGNEDl(11)
+  LOAD_VECTOR_UNALIGNEDl(12)
+  LOAD_VECTOR_UNALIGNEDl(13)
+  LOAD_VECTOR_UNALIGNEDl(14)
+  LOAD_VECTOR_UNALIGNEDl(15)
+  LOAD_VECTOR_UNALIGNEDh(16)
+  LOAD_VECTOR_UNALIGNEDh(17)
+  LOAD_VECTOR_UNALIGNEDh(18)
+  LOAD_VECTOR_UNALIGNEDh(19)
+  LOAD_VECTOR_UNALIGNEDh(20)
+  LOAD_VECTOR_UNALIGNEDh(21)
+  LOAD_VECTOR_UNALIGNEDh(22)
+  LOAD_VECTOR_UNALIGNEDh(23)
+  LOAD_VECTOR_UNALIGNEDh(24)
+  LOAD_VECTOR_UNALIGNEDh(25)
+  LOAD_VECTOR_UNALIGNEDh(26)
+  LOAD_VECTOR_UNALIGNEDh(27)
+  LOAD_VECTOR_UNALIGNEDh(28)
+  LOAD_VECTOR_UNALIGNEDh(29)
+  LOAD_VECTOR_UNALIGNEDh(30)
+  LOAD_VECTOR_UNALIGNEDh(31)
+
+Lnovec:
+  lwz    r0, 136(r3) ; __cr
+  mtocrf  255,r0
+  lwz    r0, 148(r3) ; __ctr
+  mtctr  r0
+  lwz    r0, 0(r3)  ; __ssr0
+  mtctr  r0
+  lwz    r0, 8(r3)  ; do r0 now
+  lwz    r5,28(r3)  ; do r5 now
+  lwz    r4,24(r3)  ; do r4 now
+  lwz    r1,12(r3)  ; do sp now
+  lwz    r3,20(r3)  ; do r3 last
+  bctr
 
 #elif __arm64__
 
+  .text
+  .globl __ZN9libunwind15Registers_arm646jumptoEv
+  .private_extern __ZN9libunwind15Registers_arm646jumptoEv
+__ZN9libunwind15Registers_arm646jumptoEv:
 ;
-; extern int unw_getcontext(unw_context_t* thread_state)
+; void libunwind::Registers_arm64::jumpto()
 ;
 ; On entry:
 ;  thread_state pointer is in x0
 ;
-  .globl _unw_getcontext
-_unw_getcontext:
-  stp    x0, x1,  [x0, #0x000]
-  stp    x2, x3,  [x0, #0x010]
-  stp    x4, x5,  [x0, #0x020]
-  stp    x6, x7,  [x0, #0x030]
-  stp    x8, x9,  [x0, #0x040]
-  stp    x10,x11, [x0, #0x050]
-  stp    x12,x13, [x0, #0x060]
-  stp    x14,x15, [x0, #0x070]
-  stp    x16,x17, [x0, #0x080]
-  stp    x18,x19, [x0, #0x090]
-  stp    x20,x21, [x0, #0x0A0]
-  stp    x22,x23, [x0, #0x0B0]
-  stp    x24,x25, [x0, #0x0C0]
-  stp    x26,x27, [x0, #0x0D0]
-  stp    x28,fp,  [x0, #0x0E0]
-  str    lr,      [x0, #0x0F0]
-  mov    x1,sp
-  str    x1,      [x0, #0x0F8]
-  str    lr,      [x0, #0x100]    ; store return address as pc
-  ; skip cpsr
-  stp    d0, d1,  [x0, #0x110]
-  stp    d2, d3,  [x0, #0x120]
-  stp    d4, d5,  [x0, #0x130]
-  stp    d6, d7,  [x0, #0x140]
-  stp    d8, d9,  [x0, #0x150]
-  stp    d10,d11, [x0, #0x160]
-  stp    d12,d13, [x0, #0x170]
-  stp    d14,d15, [x0, #0x180]
-  stp    d16,d17, [x0, #0x190]
-  stp    d18,d19, [x0, #0x1A0]
-  stp    d20,d21, [x0, #0x1B0]
-  stp    d22,d23, [x0, #0x1C0]
-  stp    d24,d25, [x0, #0x1D0]
-  stp    d26,d27, [x0, #0x1E0]
-  stp    d28,d29, [x0, #0x1F0]
-  str    d30,     [x0, #0x200]
-  str    d31,     [x0, #0x208]
-  ldr    x0, #0      ; return UNW_ESUCCESS
-  ret
+  ; skip restore of x0,x1 for now
+  ldp    x2, x3,  [x0, #0x010]
+  ldp    x4, x5,  [x0, #0x020]
+  ldp    x6, x7,  [x0, #0x030]
+  ldp    x8, x9,  [x0, #0x040]
+  ldp    x10,x11, [x0, #0x050]
+  ldp    x12,x13, [x0, #0x060]
+  ldp    x14,x15, [x0, #0x070]
+  ldp    x16,x17, [x0, #0x080]
+  ldp    x18,x19, [x0, #0x090]
+  ldp    x20,x21, [x0, #0x0A0]
+  ldp    x22,x23, [x0, #0x0B0]
+  ldp    x24,x25, [x0, #0x0C0]
+  ldp    x26,x27, [x0, #0x0D0]
+  ldp    x28,fp,  [x0, #0x0E0]
+  ldr    lr,      [x0, #0x100]  ; restore pc into lr
+  ldr    x1,      [x0, #0x0F8]
+  mov    sp,x1          ; restore sp
+
+  ldp    d0, d1,  [x0, #0x110]
+  ldp    d2, d3,  [x0, #0x120]
+  ldp    d4, d5,  [x0, #0x130]
+  ldp    d6, d7,  [x0, #0x140]
+  ldp    d8, d9,  [x0, #0x150]
+  ldp    d10,d11, [x0, #0x160]
+  ldp    d12,d13, [x0, #0x170]
+  ldp    d14,d15, [x0, #0x180]
+  ldp    d16,d17, [x0, #0x190]
+  ldp    d18,d19, [x0, #0x1A0]
+  ldp    d20,d21, [x0, #0x1B0]
+  ldp    d22,d23, [x0, #0x1C0]
+  ldp    d24,d25, [x0, #0x1D0]
+  ldp    d26,d27, [x0, #0x1E0]
+  ldp    d28,d29, [x0, #0x1F0]
+  ldr    d30,     [x0, #0x200]
+  ldr    d31,     [x0, #0x208]
+
+  ldp    x0, x1,  [x0, #0x000]  ; restore x0,x1
+  ret    lr            ; jump to pc
+
+
+
 
 #endif
 

Modified: libcxxabi/trunk/src/Unwind/UnwindRegistersSave.s
URL: http://llvm.org/viewvc/llvm-project/libcxxabi/trunk/src/Unwind/UnwindRegistersSave.s?rev=193840&r1=193839&r2=193840&view=diff
==============================================================================
--- libcxxabi/trunk/src/Unwind/UnwindRegistersSave.s (original)
+++ libcxxabi/trunk/src/Unwind/UnwindRegistersSave.s Thu Oct 31 19:37:20 2013
@@ -8,316 +8,279 @@
 //===----------------------------------------------------------------------===//
 
 
+    .text
+
 #if __i386__
-  .text
-  .globl __ZN9libunwind13Registers_x866jumptoEv
-  .private_extern __ZN9libunwind13Registers_x866jumptoEv
-__ZN9libunwind13Registers_x866jumptoEv:
+
 #
-# void libunwind::Registers_x86::jumpto()
+# extern int unw_getcontext(unw_context_t* thread_state)
 #
 # On entry:
-#  +                       +
-#  +-----------------------+
-#  + thread_state pointer  +
-#  +-----------------------+
-#  + return address        +
-#  +-----------------------+   <-- SP
-#  +                       +
-  movl   4(%esp), %eax
-  # set up eax and ret on new stack location
-  movl  28(%eax), %edx # edx holds new stack pointer
-  subl  $8,%edx
-  movl  %edx, 28(%eax)
-  movl  0(%eax), %ebx
-  movl  %ebx, 0(%edx)
-  movl  40(%eax), %ebx
-  movl  %ebx, 4(%edx)
-  # we now have ret and eax pushed onto where new stack will be
-  # restore all registers
-  movl   4(%eax), %ebx
-  movl   8(%eax), %ecx
-  movl  12(%eax), %edx
-  movl  16(%eax), %edi
-  movl  20(%eax), %esi
-  movl  24(%eax), %ebp
-  movl  28(%eax), %esp
+#   +                       +
+#   +-----------------------+
+#   + thread_state pointer  +
+#   +-----------------------+
+#   + return address        +
+#   +-----------------------+   <-- SP
+#   +                       +
+#
+  .globl _unw_getcontext
+_unw_getcontext:
+  push  %eax
+  movl  8(%esp), %eax
+  movl  %ebx,  4(%eax)
+  movl  %ecx,  8(%eax)
+  movl  %edx, 12(%eax)
+  movl  %edi, 16(%eax)
+  movl  %esi, 20(%eax)
+  movl  %ebp, 24(%eax)
+  movl  %esp, %edx
+  addl  $8, %edx
+  movl  %edx, 28(%eax)  # store what sp was at call site as esp
   # skip ss
   # skip eflags
-  pop    %eax  # eax was already pushed on new stack
-  ret        # eip was already pushed on new stack
+  movl  4(%esp), %edx
+  movl  %edx, 40(%eax)  # store return address as eip
   # skip cs
   # skip ds
   # skip es
   # skip fs
   # skip gs
+  movl  (%esp), %edx
+  movl  %edx, (%eax)  # store original eax
+  popl  %eax
+  xorl  %eax, %eax    # return UNW_ESUCCESS
+  ret
 
 #elif __x86_64__
 
-  .text
-  .globl __ZN9libunwind16Registers_x86_646jumptoEv
-  .private_extern __ZN9libunwind16Registers_x86_646jumptoEv
-__ZN9libunwind16Registers_x86_646jumptoEv:
 #
-# void libunwind::Registers_x86_64::jumpto()
+# extern int unw_getcontext(unw_context_t* thread_state)
 #
-# On entry, thread_state pointer is in rdi
-
-  movq  56(%rdi), %rax # rax holds new stack pointer
-  subq  $16, %rax
-  movq  %rax, 56(%rdi)
-  movq  32(%rdi), %rbx  # store new rdi on new stack
-  movq  %rbx, 0(%rax)
-  movq  128(%rdi), %rbx # store new rip on new stack
-  movq  %rbx, 8(%rax)
-  # restore all registers
-  movq    0(%rdi), %rax
-  movq    8(%rdi), %rbx
-  movq   16(%rdi), %rcx
-  movq   24(%rdi), %rdx
-  # restore rdi later
-  movq   40(%rdi), %rsi
-  movq   48(%rdi), %rbp
-  # restore rsp later
-  movq   64(%rdi), %r8
-  movq   72(%rdi), %r9
-  movq   80(%rdi), %r10
-  movq   88(%rdi), %r11
-  movq   96(%rdi), %r12
-  movq  104(%rdi), %r13
-  movq  112(%rdi), %r14
-  movq  120(%rdi), %r15
+# On entry:
+#  thread_state pointer is in rdi
+#
+  .globl _unw_getcontext
+_unw_getcontext:
+  movq  %rax,   (%rdi)
+  movq  %rbx,  8(%rdi)
+  movq  %rcx, 16(%rdi)
+  movq  %rdx, 24(%rdi)
+  movq  %rdi, 32(%rdi)
+  movq  %rsi, 40(%rdi)
+  movq  %rbp, 48(%rdi)
+  movq  %rsp, 56(%rdi)
+  addq  $8,   56(%rdi)
+  movq  %r8,  64(%rdi)
+  movq  %r9,  72(%rdi)
+  movq  %r10, 80(%rdi)
+  movq  %r11, 88(%rdi)
+  movq  %r12, 96(%rdi)
+  movq  %r13,104(%rdi)
+  movq  %r14,112(%rdi)
+  movq  %r15,120(%rdi)
+  movq  (%rsp),%rsi
+  movq  %rsi,128(%rdi) # store return address as rip
   # skip rflags
   # skip cs
   # skip fs
   # skip gs
-  movq  56(%rdi), %rsp  # cut back rsp to new location
-  pop    %rdi      # rdi was saved here earlier
-  ret            # rip was saved here
-
+  xorl  %eax, %eax    # return UNW_ESUCCESS
+  ret
 
 #elif __ppc__
 
-  .text
-  .globl __ZN9libunwind13Registers_ppc6jumptoEv
-  .private_extern __ZN9libunwind13Registers_ppc6jumptoEv
-__ZN9libunwind13Registers_ppc6jumptoEv:
 ;
-; void libunwind::Registers_ppc::jumpto()
+; extern int unw_getcontext(unw_context_t* thread_state)
 ;
 ; On entry:
 ;  thread_state pointer is in r3
 ;
+  .globl _unw_getcontext
+_unw_getcontext:
+  stw    r0,  8(r3)
+  mflr  r0
+  stw    r0,  0(r3)  ; store lr as ssr0
+  stw    r1, 12(r3)
+  stw    r2, 16(r3)
+  stw    r3, 20(r3)
+  stw    r4, 24(r3)
+  stw    r5, 28(r3)
+  stw    r6, 32(r3)
+  stw    r7, 36(r3)
+  stw    r8, 40(r3)
+  stw    r9, 44(r3)
+  stw     r10, 48(r3)
+  stw     r11, 52(r3)
+  stw     r12, 56(r3)
+  stw     r13, 60(r3)
+  stw     r14, 64(r3)
+  stw     r15, 68(r3)
+  stw     r16, 72(r3)
+  stw     r17, 76(r3)
+  stw     r18, 80(r3)
+  stw     r19, 84(r3)
+  stw     r20, 88(r3)
+  stw     r21, 92(r3)
+  stw     r22, 96(r3)
+  stw     r23,100(r3)
+  stw     r24,104(r3)
+  stw     r25,108(r3)
+  stw     r26,112(r3)
+  stw     r27,116(r3)
+  stw     r28,120(r3)
+  stw     r29,124(r3)
+  stw     r30,128(r3)
+  stw     r31,132(r3)
+
+  ; save VRSave register
+  mfspr  r0,256
+  stw    r0,156(r3)
+  ; save CR registers
+  mfcr  r0
+  stw    r0,136(r3)
+  ; save CTR register
+  mfctr  r0
+  stw    r0,148(r3)
+
+  ; save float registers
+  stfd    f0, 160(r3)
+  stfd    f1, 168(r3)
+  stfd    f2, 176(r3)
+  stfd    f3, 184(r3)
+  stfd    f4, 192(r3)
+  stfd    f5, 200(r3)
+  stfd    f6, 208(r3)
+  stfd    f7, 216(r3)
+  stfd    f8, 224(r3)
+  stfd    f9, 232(r3)
+  stfd    f10,240(r3)
+  stfd    f11,248(r3)
+  stfd    f12,256(r3)
+  stfd    f13,264(r3)
+  stfd    f14,272(r3)
+  stfd    f15,280(r3)
+  stfd    f16,288(r3)
+  stfd    f17,296(r3)
+  stfd    f18,304(r3)
+  stfd    f19,312(r3)
+  stfd    f20,320(r3)
+  stfd    f21,328(r3)
+  stfd    f22,336(r3)
+  stfd    f23,344(r3)
+  stfd    f24,352(r3)
+  stfd    f25,360(r3)
+  stfd    f26,368(r3)
+  stfd    f27,376(r3)
+  stfd    f28,384(r3)
+  stfd    f29,392(r3)
+  stfd    f30,400(r3)
+  stfd    f31,408(r3)
 
-  ; restore integral registerrs
-  ; skip r0 for now
-  ; skip r1 for now
-  lwz     r2, 16(r3)
-  ; skip r3 for now
-  ; skip r4 for now
-  ; skip r5 for now
-  lwz     r6, 32(r3)
-  lwz     r7, 36(r3)
-  lwz     r8, 40(r3)
-  lwz     r9, 44(r3)
-  lwz    r10, 48(r3)
-  lwz    r11, 52(r3)
-  lwz    r12, 56(r3)
-  lwz    r13, 60(r3)
-  lwz    r14, 64(r3)
-  lwz    r15, 68(r3)
-  lwz    r16, 72(r3)
-  lwz    r17, 76(r3)
-  lwz    r18, 80(r3)
-  lwz    r19, 84(r3)
-  lwz    r20, 88(r3)
-  lwz    r21, 92(r3)
-  lwz    r22, 96(r3)
-  lwz    r23,100(r3)
-  lwz    r24,104(r3)
-  lwz    r25,108(r3)
-  lwz    r26,112(r3)
-  lwz    r27,116(r3)
-  lwz    r28,120(r3)
-  lwz    r29,124(r3)
-  lwz    r30,128(r3)
-  lwz    r31,132(r3)
-
-  ; restore float registers
-  lfd    f0, 160(r3)
-  lfd    f1, 168(r3)
-  lfd    f2, 176(r3)
-  lfd    f3, 184(r3)
-  lfd    f4, 192(r3)
-  lfd    f5, 200(r3)
-  lfd    f6, 208(r3)
-  lfd    f7, 216(r3)
-  lfd    f8, 224(r3)
-  lfd    f9, 232(r3)
-  lfd    f10,240(r3)
-  lfd    f11,248(r3)
-  lfd    f12,256(r3)
-  lfd    f13,264(r3)
-  lfd    f14,272(r3)
-  lfd    f15,280(r3)
-  lfd    f16,288(r3)
-  lfd    f17,296(r3)
-  lfd    f18,304(r3)
-  lfd    f19,312(r3)
-  lfd    f20,320(r3)
-  lfd    f21,328(r3)
-  lfd    f22,336(r3)
-  lfd    f23,344(r3)
-  lfd    f24,352(r3)
-  lfd    f25,360(r3)
-  lfd    f26,368(r3)
-  lfd    f27,376(r3)
-  lfd    f28,384(r3)
-  lfd    f29,392(r3)
-  lfd    f30,400(r3)
-  lfd    f31,408(r3)
-
-  ; restore vector registers if any are in use
-  lwz    r5,156(r3)  ; test VRsave
-  cmpwi  r5,0
-  beq    Lnovec
+
+  ; save vector registers
 
   subi  r4,r1,16
   rlwinm  r4,r4,0,0,27  ; mask low 4-bits
   ; r4 is now a 16-byte aligned pointer into the red zone
-  ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
 
+#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
+  stvx  _vec,0,r4           @\
+  lwz    r5, 0(r4)          @\
+  stw    r5, _offset(r3)    @\
+  lwz    r5, 4(r4)          @\
+  stw    r5, _offset+4(r3)  @\
+  lwz    r5, 8(r4)          @\
+  stw    r5, _offset+8(r3)  @\
+  lwz    r5, 12(r4)         @\
+  stw    r5, _offset+12(r3)
+
+  SAVE_VECTOR_UNALIGNED( v0, 424+0x000)
+  SAVE_VECTOR_UNALIGNED( v1, 424+0x010)
+  SAVE_VECTOR_UNALIGNED( v2, 424+0x020)
+  SAVE_VECTOR_UNALIGNED( v3, 424+0x030)
+  SAVE_VECTOR_UNALIGNED( v4, 424+0x040)
+  SAVE_VECTOR_UNALIGNED( v5, 424+0x050)
+  SAVE_VECTOR_UNALIGNED( v6, 424+0x060)
+  SAVE_VECTOR_UNALIGNED( v7, 424+0x070)
+  SAVE_VECTOR_UNALIGNED( v8, 424+0x080)
+  SAVE_VECTOR_UNALIGNED( v9, 424+0x090)
+  SAVE_VECTOR_UNALIGNED(v10, 424+0x0A0)
+  SAVE_VECTOR_UNALIGNED(v11, 424+0x0B0)
+  SAVE_VECTOR_UNALIGNED(v12, 424+0x0C0)
+  SAVE_VECTOR_UNALIGNED(v13, 424+0x0D0)
+  SAVE_VECTOR_UNALIGNED(v14, 424+0x0E0)
+  SAVE_VECTOR_UNALIGNED(v15, 424+0x0F0)
+  SAVE_VECTOR_UNALIGNED(v16, 424+0x100)
+  SAVE_VECTOR_UNALIGNED(v17, 424+0x110)
+  SAVE_VECTOR_UNALIGNED(v18, 424+0x120)
+  SAVE_VECTOR_UNALIGNED(v19, 424+0x130)
+  SAVE_VECTOR_UNALIGNED(v20, 424+0x140)
+  SAVE_VECTOR_UNALIGNED(v21, 424+0x150)
+  SAVE_VECTOR_UNALIGNED(v22, 424+0x160)
+  SAVE_VECTOR_UNALIGNED(v23, 424+0x170)
+  SAVE_VECTOR_UNALIGNED(v24, 424+0x180)
+  SAVE_VECTOR_UNALIGNED(v25, 424+0x190)
+  SAVE_VECTOR_UNALIGNED(v26, 424+0x1A0)
+  SAVE_VECTOR_UNALIGNED(v27, 424+0x1B0)
+  SAVE_VECTOR_UNALIGNED(v28, 424+0x1C0)
+  SAVE_VECTOR_UNALIGNED(v29, 424+0x1D0)
+  SAVE_VECTOR_UNALIGNED(v30, 424+0x1E0)
+  SAVE_VECTOR_UNALIGNED(v31, 424+0x1F0)
+
+  li  r3, 0    ; return UNW_ESUCCESS
+  blr
 
-#define LOAD_VECTOR_UNALIGNEDl(_index) \
-  andis.  r0,r5,(1<<(15-_index))  @\
-  beq    Ldone  ## _index     @\
-  lwz    r0, 424+_index*16(r3)  @\
-  stw    r0, 0(r4)        @\
-  lwz    r0, 424+_index*16+4(r3)  @\
-  stw    r0, 4(r4)        @\
-  lwz    r0, 424+_index*16+8(r3)  @\
-  stw    r0, 8(r4)        @\
-  lwz    r0, 424+_index*16+12(r3)@\
-  stw    r0, 12(r4)        @\
-  lvx    v ## _index,0,r4    @\
-Ldone  ## _index:
-
-#define LOAD_VECTOR_UNALIGNEDh(_index) \
-  andi.  r0,r5,(1<<(31-_index))  @\
-  beq    Ldone  ## _index    @\
-  lwz    r0, 424+_index*16(r3)  @\
-  stw    r0, 0(r4)        @\
-  lwz    r0, 424+_index*16+4(r3)  @\
-  stw    r0, 4(r4)        @\
-  lwz    r0, 424+_index*16+8(r3)  @\
-  stw    r0, 8(r4)        @\
-  lwz    r0, 424+_index*16+12(r3)@\
-  stw    r0, 12(r4)        @\
-  lvx    v ## _index,0,r4    @\
-  Ldone  ## _index:
-
-
-  LOAD_VECTOR_UNALIGNEDl(0)
-  LOAD_VECTOR_UNALIGNEDl(1)
-  LOAD_VECTOR_UNALIGNEDl(2)
-  LOAD_VECTOR_UNALIGNEDl(3)
-  LOAD_VECTOR_UNALIGNEDl(4)
-  LOAD_VECTOR_UNALIGNEDl(5)
-  LOAD_VECTOR_UNALIGNEDl(6)
-  LOAD_VECTOR_UNALIGNEDl(7)
-  LOAD_VECTOR_UNALIGNEDl(8)
-  LOAD_VECTOR_UNALIGNEDl(9)
-  LOAD_VECTOR_UNALIGNEDl(10)
-  LOAD_VECTOR_UNALIGNEDl(11)
-  LOAD_VECTOR_UNALIGNEDl(12)
-  LOAD_VECTOR_UNALIGNEDl(13)
-  LOAD_VECTOR_UNALIGNEDl(14)
-  LOAD_VECTOR_UNALIGNEDl(15)
-  LOAD_VECTOR_UNALIGNEDh(16)
-  LOAD_VECTOR_UNALIGNEDh(17)
-  LOAD_VECTOR_UNALIGNEDh(18)
-  LOAD_VECTOR_UNALIGNEDh(19)
-  LOAD_VECTOR_UNALIGNEDh(20)
-  LOAD_VECTOR_UNALIGNEDh(21)
-  LOAD_VECTOR_UNALIGNEDh(22)
-  LOAD_VECTOR_UNALIGNEDh(23)
-  LOAD_VECTOR_UNALIGNEDh(24)
-  LOAD_VECTOR_UNALIGNEDh(25)
-  LOAD_VECTOR_UNALIGNEDh(26)
-  LOAD_VECTOR_UNALIGNEDh(27)
-  LOAD_VECTOR_UNALIGNEDh(28)
-  LOAD_VECTOR_UNALIGNEDh(29)
-  LOAD_VECTOR_UNALIGNEDh(30)
-  LOAD_VECTOR_UNALIGNEDh(31)
-
-Lnovec:
-  lwz    r0, 136(r3) ; __cr
-  mtocrf  255,r0
-  lwz    r0, 148(r3) ; __ctr
-  mtctr  r0
-  lwz    r0, 0(r3)  ; __ssr0
-  mtctr  r0
-  lwz    r0, 8(r3)  ; do r0 now
-  lwz    r5,28(r3)  ; do r5 now
-  lwz    r4,24(r3)  ; do r4 now
-  lwz    r1,12(r3)  ; do sp now
-  lwz    r3,20(r3)  ; do r3 last
-  bctr
 
 #elif __arm64__
 
-  .text
-  .globl __ZN9libunwind15Registers_arm646jumptoEv
-  .private_extern __ZN9libunwind15Registers_arm646jumptoEv
-__ZN9libunwind15Registers_arm646jumptoEv:
 ;
-; void libunwind::Registers_arm64::jumpto()
+; extern int unw_getcontext(unw_context_t* thread_state)
 ;
 ; On entry:
 ;  thread_state pointer is in x0
 ;
-  ; skip restore of x0,x1 for now
-  ldp    x2, x3,  [x0, #0x010]
-  ldp    x4, x5,  [x0, #0x020]
-  ldp    x6, x7,  [x0, #0x030]
-  ldp    x8, x9,  [x0, #0x040]
-  ldp    x10,x11, [x0, #0x050]
-  ldp    x12,x13, [x0, #0x060]
-  ldp    x14,x15, [x0, #0x070]
-  ldp    x16,x17, [x0, #0x080]
-  ldp    x18,x19, [x0, #0x090]
-  ldp    x20,x21, [x0, #0x0A0]
-  ldp    x22,x23, [x0, #0x0B0]
-  ldp    x24,x25, [x0, #0x0C0]
-  ldp    x26,x27, [x0, #0x0D0]
-  ldp    x28,fp,  [x0, #0x0E0]
-  ldr    lr,      [x0, #0x100]  ; restore pc into lr
-  ldr    x1,      [x0, #0x0F8]
-  mov    sp,x1          ; restore sp
-
-  ldp    d0, d1,  [x0, #0x110]
-  ldp    d2, d3,  [x0, #0x120]
-  ldp    d4, d5,  [x0, #0x130]
-  ldp    d6, d7,  [x0, #0x140]
-  ldp    d8, d9,  [x0, #0x150]
-  ldp    d10,d11, [x0, #0x160]
-  ldp    d12,d13, [x0, #0x170]
-  ldp    d14,d15, [x0, #0x180]
-  ldp    d16,d17, [x0, #0x190]
-  ldp    d18,d19, [x0, #0x1A0]
-  ldp    d20,d21, [x0, #0x1B0]
-  ldp    d22,d23, [x0, #0x1C0]
-  ldp    d24,d25, [x0, #0x1D0]
-  ldp    d26,d27, [x0, #0x1E0]
-  ldp    d28,d29, [x0, #0x1F0]
-  ldr    d30,     [x0, #0x200]
-  ldr    d31,     [x0, #0x208]
-
-  ldp    x0, x1,  [x0, #0x000]  ; restore x0,x1
-  ret    lr            ; jump to pc
-
-
-
+  .globl _unw_getcontext
+_unw_getcontext:
+  stp    x0, x1,  [x0, #0x000]
+  stp    x2, x3,  [x0, #0x010]
+  stp    x4, x5,  [x0, #0x020]
+  stp    x6, x7,  [x0, #0x030]
+  stp    x8, x9,  [x0, #0x040]
+  stp    x10,x11, [x0, #0x050]
+  stp    x12,x13, [x0, #0x060]
+  stp    x14,x15, [x0, #0x070]
+  stp    x16,x17, [x0, #0x080]
+  stp    x18,x19, [x0, #0x090]
+  stp    x20,x21, [x0, #0x0A0]
+  stp    x22,x23, [x0, #0x0B0]
+  stp    x24,x25, [x0, #0x0C0]
+  stp    x26,x27, [x0, #0x0D0]
+  stp    x28,fp,  [x0, #0x0E0]
+  str    lr,      [x0, #0x0F0]
+  mov    x1,sp
+  str    x1,      [x0, #0x0F8]
+  str    lr,      [x0, #0x100]    ; store return address as pc
+  ; skip cpsr
+  stp    d0, d1,  [x0, #0x110]
+  stp    d2, d3,  [x0, #0x120]
+  stp    d4, d5,  [x0, #0x130]
+  stp    d6, d7,  [x0, #0x140]
+  stp    d8, d9,  [x0, #0x150]
+  stp    d10,d11, [x0, #0x160]
+  stp    d12,d13, [x0, #0x170]
+  stp    d14,d15, [x0, #0x180]
+  stp    d16,d17, [x0, #0x190]
+  stp    d18,d19, [x0, #0x1A0]
+  stp    d20,d21, [x0, #0x1B0]
+  stp    d22,d23, [x0, #0x1C0]
+  stp    d24,d25, [x0, #0x1D0]
+  stp    d26,d27, [x0, #0x1E0]
+  stp    d28,d29, [x0, #0x1F0]
+  str    d30,     [x0, #0x200]
+  str    d31,     [x0, #0x208]
+  ldr    x0, #0      ; return UNW_ESUCCESS
+  ret
 
 #endif
 





More information about the cfe-commits mailing list