1 //===-------------------- UnwindRegistersRestore.S ------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
13 #if !defined(__USING_SJLJ_EXCEPTIONS__)
16 DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
18 # extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);
22 # +-----------------------+
23 # + thread_state pointer +
24 # +-----------------------+
26 # +-----------------------+ <-- SP
29 # set up eax and ret on new stack location
30 movl 28(%eax), %edx # edx holds new stack pointer
37 # we now have ret and eax pushed onto where new stack will be
38 # restore all registers
48 pop %eax # eax was already pushed on new stack
49 ret # eip was already pushed on new stack
56 #elif defined(__x86_64__)
58 DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)
60 # extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);
63 # On entry, thread_state pointer is in rcx; move it into rdi
64 # to share restore code below. Since this routine restores and
65 # overwrites all registers, we can use the same registers for
66 # pointers and temporaries as on unix even though win64 normally
67 # mustn't clobber some of them.
70 # On entry, thread_state pointer is in rdi
73 movq 56(%rdi), %rax # rax holds new stack pointer
76 movq 32(%rdi), %rbx # store new rdi on new stack
78 movq 128(%rdi), %rbx # store new rip on new stack
80 # restore all registers
103 movdqu 176(%rdi),%xmm0
104 movdqu 192(%rdi),%xmm1
105 movdqu 208(%rdi),%xmm2
106 movdqu 224(%rdi),%xmm3
107 movdqu 240(%rdi),%xmm4
108 movdqu 256(%rdi),%xmm5
109 movdqu 272(%rdi),%xmm6
110 movdqu 288(%rdi),%xmm7
111 movdqu 304(%rdi),%xmm8
112 movdqu 320(%rdi),%xmm9
113 movdqu 336(%rdi),%xmm10
114 movdqu 352(%rdi),%xmm11
115 movdqu 368(%rdi),%xmm12
116 movdqu 384(%rdi),%xmm13
117 movdqu 400(%rdi),%xmm14
118 movdqu 416(%rdi),%xmm15
120 movq 56(%rdi), %rsp # cut back rsp to new location
121 pop %rdi # rdi was saved here earlier
122 ret # rip was saved here
125 #elif defined(__powerpc64__)
127 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
129 // void libunwind::Registers_ppc64::jumpto()
132 // thread_state pointer is in r3
135 // load register (GPR)
136 #define PPC64_LR(n) \
137 ld %r##n, (8 * (n + 2))(%r3)
139 // restore integral registers
175 // restore VS registers
176 // (note that this also restores floating point registers and V registers,
177 // because part of VS is mapped to these registers)
179 addi %r4, %r3, PPC64_OFFS_FP
182 #define PPC64_LVS(n) \
183 lxvd2x %vs##n, 0, %r4 ;\
186 // restore the first 32 VS regs (and also all floating point regs)
220 // use VRSAVE to conditionally restore the remaining VS regs,
221 // that are where the V regs are mapped
223 ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave
227 // conditionally load VS
228 #define PPC64_CLVS_BOTTOM(n) \
230 addi %r4, %r3, PPC64_OFFS_FP + n * 16 ;\
231 lxvd2x %vs##n, 0, %r4 ;\
234 #define PPC64_CLVSl(n) \
235 andis. %r0, %r5, (1<<(47-n)) ;\
238 #define PPC64_CLVSh(n) \
239 andi. %r0, %r5, (1<<(63-n)) ;\
278 #define PPC64_LF(n) \
279 lfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
281 // restore float registers
315 #if defined(__ALTIVEC__)
316 // restore vector registers if any are in use
317 ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave
322 // r4 is now a 16-byte aligned pointer into the red zone
323 // the _vectorScalarRegisters may not be 16-byte aligned
324 // so copy via red zone temp buffer
326 #define PPC64_CLV_UNALIGNED_BOTTOM(n) \
328 ld %r0, (PPC64_OFFS_V + n * 16)(%r3) ;\
330 ld %r0, (PPC64_OFFS_V + n * 16 + 8)(%r3) ;\
335 #define PPC64_CLV_UNALIGNEDl(n) \
336 andis. %r0, %r5, (1<<(15-n)) ;\
337 PPC64_CLV_UNALIGNED_BOTTOM(n)
339 #define PPC64_CLV_UNALIGNEDh(n) \
340 andi. %r0, %r5, (1<<(31-n)) ;\
341 PPC64_CLV_UNALIGNED_BOTTOM(n)
343 PPC64_CLV_UNALIGNEDl(0)
344 PPC64_CLV_UNALIGNEDl(1)
345 PPC64_CLV_UNALIGNEDl(2)
346 PPC64_CLV_UNALIGNEDl(3)
347 PPC64_CLV_UNALIGNEDl(4)
348 PPC64_CLV_UNALIGNEDl(5)
349 PPC64_CLV_UNALIGNEDl(6)
350 PPC64_CLV_UNALIGNEDl(7)
351 PPC64_CLV_UNALIGNEDl(8)
352 PPC64_CLV_UNALIGNEDl(9)
353 PPC64_CLV_UNALIGNEDl(10)
354 PPC64_CLV_UNALIGNEDl(11)
355 PPC64_CLV_UNALIGNEDl(12)
356 PPC64_CLV_UNALIGNEDl(13)
357 PPC64_CLV_UNALIGNEDl(14)
358 PPC64_CLV_UNALIGNEDl(15)
359 PPC64_CLV_UNALIGNEDh(16)
360 PPC64_CLV_UNALIGNEDh(17)
361 PPC64_CLV_UNALIGNEDh(18)
362 PPC64_CLV_UNALIGNEDh(19)
363 PPC64_CLV_UNALIGNEDh(20)
364 PPC64_CLV_UNALIGNEDh(21)
365 PPC64_CLV_UNALIGNEDh(22)
366 PPC64_CLV_UNALIGNEDh(23)
367 PPC64_CLV_UNALIGNEDh(24)
368 PPC64_CLV_UNALIGNEDh(25)
369 PPC64_CLV_UNALIGNEDh(26)
370 PPC64_CLV_UNALIGNEDh(27)
371 PPC64_CLV_UNALIGNEDh(28)
372 PPC64_CLV_UNALIGNEDh(29)
373 PPC64_CLV_UNALIGNEDh(30)
374 PPC64_CLV_UNALIGNEDh(31)
380 ld %r0, PPC64_OFFS_CR(%r3)
382 ld %r0, PPC64_OFFS_SRR0(%r3)
392 #elif defined(__ppc__)
394 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
396 // void libunwind::Registers_ppc::jumpto()
399 // thread_state pointer is in r3
402 // restore integral registerrs
437 // restore float registers
472 #if defined(__ALTIVEC__)
473 // restore vector registers if any are in use
474 lwz %r5, 156(%r3) // test VRsave
479 rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits
480 // r4 is now a 16-byte aligned pointer into the red zone
481 // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
484 #define LOAD_VECTOR_UNALIGNEDl(_index) \
485 andis. %r0, %r5, (1<<(15-_index)) SEPARATOR \
486 beq Ldone ## _index SEPARATOR \
487 lwz %r0, 424+_index*16(%r3) SEPARATOR \
488 stw %r0, 0(%r4) SEPARATOR \
489 lwz %r0, 424+_index*16+4(%r3) SEPARATOR \
490 stw %r0, 4(%r4) SEPARATOR \
491 lwz %r0, 424+_index*16+8(%r3) SEPARATOR \
492 stw %r0, 8(%r4) SEPARATOR \
493 lwz %r0, 424+_index*16+12(%r3) SEPARATOR \
494 stw %r0, 12(%r4) SEPARATOR \
495 lvx %v ## _index, 0, %r4 SEPARATOR \
498 #define LOAD_VECTOR_UNALIGNEDh(_index) \
499 andi. %r0, %r5, (1<<(31-_index)) SEPARATOR \
500 beq Ldone ## _index SEPARATOR \
501 lwz %r0, 424+_index*16(%r3) SEPARATOR \
502 stw %r0, 0(%r4) SEPARATOR \
503 lwz %r0, 424+_index*16+4(%r3) SEPARATOR \
504 stw %r0, 4(%r4) SEPARATOR \
505 lwz %r0, 424+_index*16+8(%r3) SEPARATOR \
506 stw %r0, 8(%r4) SEPARATOR \
507 lwz %r0, 424+_index*16+12(%r3) SEPARATOR \
508 stw %r0, 12(%r4) SEPARATOR \
509 lvx %v ## _index, 0, %r4 SEPARATOR \
513 LOAD_VECTOR_UNALIGNEDl(0)
514 LOAD_VECTOR_UNALIGNEDl(1)
515 LOAD_VECTOR_UNALIGNEDl(2)
516 LOAD_VECTOR_UNALIGNEDl(3)
517 LOAD_VECTOR_UNALIGNEDl(4)
518 LOAD_VECTOR_UNALIGNEDl(5)
519 LOAD_VECTOR_UNALIGNEDl(6)
520 LOAD_VECTOR_UNALIGNEDl(7)
521 LOAD_VECTOR_UNALIGNEDl(8)
522 LOAD_VECTOR_UNALIGNEDl(9)
523 LOAD_VECTOR_UNALIGNEDl(10)
524 LOAD_VECTOR_UNALIGNEDl(11)
525 LOAD_VECTOR_UNALIGNEDl(12)
526 LOAD_VECTOR_UNALIGNEDl(13)
527 LOAD_VECTOR_UNALIGNEDl(14)
528 LOAD_VECTOR_UNALIGNEDl(15)
529 LOAD_VECTOR_UNALIGNEDh(16)
530 LOAD_VECTOR_UNALIGNEDh(17)
531 LOAD_VECTOR_UNALIGNEDh(18)
532 LOAD_VECTOR_UNALIGNEDh(19)
533 LOAD_VECTOR_UNALIGNEDh(20)
534 LOAD_VECTOR_UNALIGNEDh(21)
535 LOAD_VECTOR_UNALIGNEDh(22)
536 LOAD_VECTOR_UNALIGNEDh(23)
537 LOAD_VECTOR_UNALIGNEDh(24)
538 LOAD_VECTOR_UNALIGNEDh(25)
539 LOAD_VECTOR_UNALIGNEDh(26)
540 LOAD_VECTOR_UNALIGNEDh(27)
541 LOAD_VECTOR_UNALIGNEDh(28)
542 LOAD_VECTOR_UNALIGNEDh(29)
543 LOAD_VECTOR_UNALIGNEDh(30)
544 LOAD_VECTOR_UNALIGNEDh(31)
548 lwz %r0, 136(%r3) // __cr
550 lwz %r0, 148(%r3) // __ctr
552 lwz %r0, 0(%r3) // __ssr0
554 lwz %r0, 8(%r3) // do r0 now
555 lwz %r5, 28(%r3) // do r5 now
556 lwz %r4, 24(%r3) // do r4 now
557 lwz %r1, 12(%r3) // do sp now
558 lwz %r3, 20(%r3) // do r3 last
561 #elif defined(__aarch64__)
564 // extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *);
567 // thread_state pointer is in x0
570 DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)
571 // skip restore of x0,x1 for now
572 ldp x2, x3, [x0, #0x010]
573 ldp x4, x5, [x0, #0x020]
574 ldp x6, x7, [x0, #0x030]
575 ldp x8, x9, [x0, #0x040]
576 ldp x10,x11, [x0, #0x050]
577 ldp x12,x13, [x0, #0x060]
578 ldp x14,x15, [x0, #0x070]
579 // x16 and x17 were clobbered by the call into the unwinder, so no point in
581 ldp x18,x19, [x0, #0x090]
582 ldp x20,x21, [x0, #0x0A0]
583 ldp x22,x23, [x0, #0x0B0]
584 ldp x24,x25, [x0, #0x0C0]
585 ldp x26,x27, [x0, #0x0D0]
586 ldp x28,x29, [x0, #0x0E0]
587 ldr x30, [x0, #0x100] // restore pc into lr
589 ldp d0, d1, [x0, #0x110]
590 ldp d2, d3, [x0, #0x120]
591 ldp d4, d5, [x0, #0x130]
592 ldp d6, d7, [x0, #0x140]
593 ldp d8, d9, [x0, #0x150]
594 ldp d10,d11, [x0, #0x160]
595 ldp d12,d13, [x0, #0x170]
596 ldp d14,d15, [x0, #0x180]
597 ldp d16,d17, [x0, #0x190]
598 ldp d18,d19, [x0, #0x1A0]
599 ldp d20,d21, [x0, #0x1B0]
600 ldp d22,d23, [x0, #0x1C0]
601 ldp d24,d25, [x0, #0x1D0]
602 ldp d26,d27, [x0, #0x1E0]
603 ldp d28,d29, [x0, #0x1F0]
604 ldr d30, [x0, #0x200]
605 ldr d31, [x0, #0x208]
607 // Finally, restore sp. This must be done after the the last read from the
608 // context struct, because it is allocated on the stack, and an exception
609 // could clobber the de-allocated portion of the stack after sp has been
611 ldr x16, [x0, #0x0F8]
612 ldp x0, x1, [x0, #0x000] // restore x0,x1
613 mov sp,x16 // restore sp
614 ret x30 // jump to pc
616 #elif defined(__arm__) && !defined(__APPLE__)
618 #if !defined(__ARM_ARCH_ISA_ARM)
619 #if (__ARM_ARCH_ISA_THUMB == 2)
626 @ void libunwind::Registers_arm::restoreCoreAndJumpTo()
629 @ thread_state pointer is in r0
632 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
633 #if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
634 @ r8-r11: ldm into r1-r4, then mov to r8-r11
642 @ r12 does not need loading, it it the intra-procedure-call scratch register
646 mov lr, r3 @ restore pc into lr
649 @ Use lr as base so that r0 can be restored.
651 @ 32bit thumb-2 restrictions for ldm:
652 @ . the sp (r13) cannot be in the list
653 @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
656 ldr lr, [lr, #60] @ restore pc into lr
661 @ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
664 @ values pointer is in r0
670 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
671 @ VFP and iwMMX instructions are only available when compiling with the flags
672 @ that enable them. We do not want to do that in the library (because we do not
673 @ want the compiler to generate instructions that access those) but this is
674 @ only accessed if the personality routine needs these registers. Use of
675 @ these registers implies they are, actually, available on the target, so
676 @ it's ok to execute.
677 @ So, generate the instruction using the corresponding coprocessor mnemonic.
682 @ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
685 @ values pointer is in r0
691 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
692 vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
696 @ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
699 @ values pointer is in r0
705 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
709 #if defined(__ARM_WMMX)
712 @ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
715 @ values pointer is in r0
721 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
722 ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8
723 ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8
724 ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8
725 ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8
726 ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8
727 ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8
728 ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8
729 ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8
730 ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8
731 ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8
732 ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8
733 ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8
734 ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8
735 ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8
736 ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8
737 ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8
741 @ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
744 @ values pointer is in r0
750 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
751 ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4
752 ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4
753 ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4
754 ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4
759 #elif defined(__or1k__)
761 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
763 # void libunwind::Registers_or1k::jumpto()
766 # thread_state pointer is in r3
769 # restore integral registers
803 # at last, restore r3
806 # load new pc into ra
812 #elif defined(__hexagon__)
814 # thread_state pointer is in r2
815 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
817 # void libunwind::Registers_hexagon::jumpto()
850 c4 = r1 // Predicate register
854 #elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
857 // void libunwind::Registers_mips_o32::jumpto()
860 // thread state pointer is in a0 ($4)
862 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
867 #ifdef __mips_hard_float
869 ldc1 $f0, (4 * 36 + 8 * 0)($4)
870 ldc1 $f2, (4 * 36 + 8 * 2)($4)
871 ldc1 $f4, (4 * 36 + 8 * 4)($4)
872 ldc1 $f6, (4 * 36 + 8 * 6)($4)
873 ldc1 $f8, (4 * 36 + 8 * 8)($4)
874 ldc1 $f10, (4 * 36 + 8 * 10)($4)
875 ldc1 $f12, (4 * 36 + 8 * 12)($4)
876 ldc1 $f14, (4 * 36 + 8 * 14)($4)
877 ldc1 $f16, (4 * 36 + 8 * 16)($4)
878 ldc1 $f18, (4 * 36 + 8 * 18)($4)
879 ldc1 $f20, (4 * 36 + 8 * 20)($4)
880 ldc1 $f22, (4 * 36 + 8 * 22)($4)
881 ldc1 $f24, (4 * 36 + 8 * 24)($4)
882 ldc1 $f26, (4 * 36 + 8 * 26)($4)
883 ldc1 $f28, (4 * 36 + 8 * 28)($4)
884 ldc1 $f30, (4 * 36 + 8 * 30)($4)
886 ldc1 $f0, (4 * 36 + 8 * 0)($4)
887 ldc1 $f1, (4 * 36 + 8 * 1)($4)
888 ldc1 $f2, (4 * 36 + 8 * 2)($4)
889 ldc1 $f3, (4 * 36 + 8 * 3)($4)
890 ldc1 $f4, (4 * 36 + 8 * 4)($4)
891 ldc1 $f5, (4 * 36 + 8 * 5)($4)
892 ldc1 $f6, (4 * 36 + 8 * 6)($4)
893 ldc1 $f7, (4 * 36 + 8 * 7)($4)
894 ldc1 $f8, (4 * 36 + 8 * 8)($4)
895 ldc1 $f9, (4 * 36 + 8 * 9)($4)
896 ldc1 $f10, (4 * 36 + 8 * 10)($4)
897 ldc1 $f11, (4 * 36 + 8 * 11)($4)
898 ldc1 $f12, (4 * 36 + 8 * 12)($4)
899 ldc1 $f13, (4 * 36 + 8 * 13)($4)
900 ldc1 $f14, (4 * 36 + 8 * 14)($4)
901 ldc1 $f15, (4 * 36 + 8 * 15)($4)
902 ldc1 $f16, (4 * 36 + 8 * 16)($4)
903 ldc1 $f17, (4 * 36 + 8 * 17)($4)
904 ldc1 $f18, (4 * 36 + 8 * 18)($4)
905 ldc1 $f19, (4 * 36 + 8 * 19)($4)
906 ldc1 $f20, (4 * 36 + 8 * 20)($4)
907 ldc1 $f21, (4 * 36 + 8 * 21)($4)
908 ldc1 $f22, (4 * 36 + 8 * 22)($4)
909 ldc1 $f23, (4 * 36 + 8 * 23)($4)
910 ldc1 $f24, (4 * 36 + 8 * 24)($4)
911 ldc1 $f25, (4 * 36 + 8 * 25)($4)
912 ldc1 $f26, (4 * 36 + 8 * 26)($4)
913 ldc1 $f27, (4 * 36 + 8 * 27)($4)
914 ldc1 $f28, (4 * 36 + 8 * 28)($4)
915 ldc1 $f29, (4 * 36 + 8 * 29)($4)
916 ldc1 $f30, (4 * 36 + 8 * 30)($4)
917 ldc1 $f31, (4 * 36 + 8 * 31)($4)
956 // load new pc into ra
958 // jump to ra, load a0 in the delay slot
963 #elif defined(__mips64)
966 // void libunwind::Registers_mips_newabi::jumpto()
969 // thread state pointer is in a0 ($4)
971 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
976 #ifdef __mips_hard_float
977 ldc1 $f0, (8 * 35)($4)
978 ldc1 $f1, (8 * 36)($4)
979 ldc1 $f2, (8 * 37)($4)
980 ldc1 $f3, (8 * 38)($4)
981 ldc1 $f4, (8 * 39)($4)
982 ldc1 $f5, (8 * 40)($4)
983 ldc1 $f6, (8 * 41)($4)
984 ldc1 $f7, (8 * 42)($4)
985 ldc1 $f8, (8 * 43)($4)
986 ldc1 $f9, (8 * 44)($4)
987 ldc1 $f10, (8 * 45)($4)
988 ldc1 $f11, (8 * 46)($4)
989 ldc1 $f12, (8 * 47)($4)
990 ldc1 $f13, (8 * 48)($4)
991 ldc1 $f14, (8 * 49)($4)
992 ldc1 $f15, (8 * 50)($4)
993 ldc1 $f16, (8 * 51)($4)
994 ldc1 $f17, (8 * 52)($4)
995 ldc1 $f18, (8 * 53)($4)
996 ldc1 $f19, (8 * 54)($4)
997 ldc1 $f20, (8 * 55)($4)
998 ldc1 $f21, (8 * 56)($4)
999 ldc1 $f22, (8 * 57)($4)
1000 ldc1 $f23, (8 * 58)($4)
1001 ldc1 $f24, (8 * 59)($4)
1002 ldc1 $f25, (8 * 60)($4)
1003 ldc1 $f26, (8 * 61)($4)
1004 ldc1 $f27, (8 * 62)($4)
1005 ldc1 $f28, (8 * 63)($4)
1006 ldc1 $f29, (8 * 64)($4)
1007 ldc1 $f30, (8 * 65)($4)
1008 ldc1 $f31, (8 * 66)($4)
1010 // restore hi and lo
1025 ld $10, (8 * 10)($4)
1026 ld $11, (8 * 11)($4)
1027 ld $12, (8 * 12)($4)
1028 ld $13, (8 * 13)($4)
1029 ld $14, (8 * 14)($4)
1030 ld $15, (8 * 15)($4)
1031 ld $16, (8 * 16)($4)
1032 ld $17, (8 * 17)($4)
1033 ld $18, (8 * 18)($4)
1034 ld $19, (8 * 19)($4)
1035 ld $20, (8 * 20)($4)
1036 ld $21, (8 * 21)($4)
1037 ld $22, (8 * 22)($4)
1038 ld $23, (8 * 23)($4)
1039 ld $24, (8 * 24)($4)
1040 ld $25, (8 * 25)($4)
1041 ld $26, (8 * 26)($4)
1042 ld $27, (8 * 27)($4)
1043 ld $28, (8 * 28)($4)
1044 ld $29, (8 * 29)($4)
1045 ld $30, (8 * 30)($4)
1046 // load new pc into ra
1047 ld $31, (8 * 32)($4)
1048 // jump to ra, load a0 in the delay slot
1053 #elif defined(__sparc__)
1056 // void libunwind::Registers_sparc_o32::jumpto()
1059 // thread_state pointer is in o0
1061 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1068 ldd [%o0 + 104], %i2
1069 ldd [%o0 + 112], %i4
1070 ldd [%o0 + 120], %i6
1075 #elif defined(__riscv) && __riscv_xlen == 64
1078 // void libunwind::Registers_riscv::jumpto()
1081 // thread_state pointer is in a0
1084 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
1085 #if defined(__riscv_flen) && __riscv_flen == 64
1086 fld f0, (8 * 32 + 8 * 0)(a0)
1087 fld f1, (8 * 32 + 8 * 1)(a0)
1088 fld f2, (8 * 32 + 8 * 2)(a0)
1089 fld f3, (8 * 32 + 8 * 3)(a0)
1090 fld f4, (8 * 32 + 8 * 4)(a0)
1091 fld f5, (8 * 32 + 8 * 5)(a0)
1092 fld f6, (8 * 32 + 8 * 6)(a0)
1093 fld f7, (8 * 32 + 8 * 7)(a0)
1094 fld f8, (8 * 32 + 8 * 8)(a0)
1095 fld f9, (8 * 32 + 8 * 9)(a0)
1096 fld f10, (8 * 32 + 8 * 10)(a0)
1097 fld f11, (8 * 32 + 8 * 11)(a0)
1098 fld f12, (8 * 32 + 8 * 12)(a0)
1099 fld f13, (8 * 32 + 8 * 13)(a0)
1100 fld f14, (8 * 32 + 8 * 14)(a0)
1101 fld f15, (8 * 32 + 8 * 15)(a0)
1102 fld f16, (8 * 32 + 8 * 16)(a0)
1103 fld f17, (8 * 32 + 8 * 17)(a0)
1104 fld f18, (8 * 32 + 8 * 18)(a0)
1105 fld f19, (8 * 32 + 8 * 19)(a0)
1106 fld f20, (8 * 32 + 8 * 20)(a0)
1107 fld f21, (8 * 32 + 8 * 21)(a0)
1108 fld f22, (8 * 32 + 8 * 22)(a0)
1109 fld f23, (8 * 32 + 8 * 23)(a0)
1110 fld f24, (8 * 32 + 8 * 24)(a0)
1111 fld f25, (8 * 32 + 8 * 25)(a0)
1112 fld f26, (8 * 32 + 8 * 26)(a0)
1113 fld f27, (8 * 32 + 8 * 27)(a0)
1114 fld f28, (8 * 32 + 8 * 28)(a0)
1115 fld f29, (8 * 32 + 8 * 29)(a0)
1116 fld f30, (8 * 32 + 8 * 30)(a0)
1117 fld f31, (8 * 32 + 8 * 31)(a0)
1121 ld x1, (8 * 0)(a0) // restore pc into ra
1131 ld x11, (8 * 11)(a0)
1132 ld x12, (8 * 12)(a0)
1133 ld x13, (8 * 13)(a0)
1134 ld x14, (8 * 14)(a0)
1135 ld x15, (8 * 15)(a0)
1136 ld x16, (8 * 16)(a0)
1137 ld x17, (8 * 17)(a0)
1138 ld x18, (8 * 18)(a0)
1139 ld x19, (8 * 19)(a0)
1140 ld x20, (8 * 20)(a0)
1141 ld x21, (8 * 21)(a0)
1142 ld x22, (8 * 22)(a0)
1143 ld x23, (8 * 23)(a0)
1144 ld x24, (8 * 24)(a0)
1145 ld x25, (8 * 25)(a0)
1146 ld x26, (8 * 26)(a0)
1147 ld x27, (8 * 27)(a0)
1148 ld x28, (8 * 28)(a0)
1149 ld x29, (8 * 29)(a0)
1150 ld x30, (8 * 30)(a0)
1151 ld x31, (8 * 31)(a0)
1152 ld x10, (8 * 10)(a0) // restore a0
1158 #endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1160 NO_EXEC_STACK_DIRECTIVE