1 //===----------------------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 #define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
12 #define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
14 #define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
15 #define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
23 #if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)
26 DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
28 # extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);
32 # +-----------------------+
33 # + thread_state pointer +
34 # +-----------------------+
36 # +-----------------------+ <-- SP
41 # set up eax and ret on new stack location
42 movl 28(%eax), %edx # edx holds new stack pointer
49 # we now have ret and eax pushed onto where new stack will be
50 # restore all registers
60 pop %eax # eax was already pushed on new stack
69 #elif defined(__x86_64__)
71 DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)
73 # extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);
76 # On entry, thread_state pointer is in rcx; move it into rdi
77 # to share restore code below. Since this routine restores and
78 # overwrites all registers, we can use the same registers for
79 # pointers and temporaries as on unix even though win64 normally
80 # mustn't clobber some of them.
83 # On entry, thread_state pointer is in rdi
87 movq 56(%rdi), %rax # rax holds new stack pointer
90 movq 32(%rdi), %rbx # store new rdi on new stack
92 movq 128(%rdi), %rbx # store new rip on new stack
94 # restore all registers
117 movdqu 176(%rdi),%xmm0
118 movdqu 192(%rdi),%xmm1
119 movdqu 208(%rdi),%xmm2
120 movdqu 224(%rdi),%xmm3
121 movdqu 240(%rdi),%xmm4
122 movdqu 256(%rdi),%xmm5
123 movdqu 272(%rdi),%xmm6
124 movdqu 288(%rdi),%xmm7
125 movdqu 304(%rdi),%xmm8
126 movdqu 320(%rdi),%xmm9
127 movdqu 336(%rdi),%xmm10
128 movdqu 352(%rdi),%xmm11
129 movdqu 368(%rdi),%xmm12
130 movdqu 384(%rdi),%xmm13
131 movdqu 400(%rdi),%xmm14
132 movdqu 416(%rdi),%xmm15
134 movq 56(%rdi), %rsp # cut back rsp to new location
135 pop %rdi # rdi was saved here earlier
140 #elif defined(__powerpc64__)
142 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
144 // void libunwind::Registers_ppc64::jumpto()
147 // thread_state pointer is in r3
150 // load register (GPR)
151 #define PPC64_LR(n) \
152 ld n, (8 * (n + 2))(3)
154 // restore integral registers
190 // restore VS registers
191 // (note that this also restores floating point registers and V registers,
192 // because part of VS is mapped to these registers)
194 addi 4, 3, PPC64_OFFS_FP
197 #ifdef __LITTLE_ENDIAN__
198 // For little-endian targets, we need a swap since lxvd2x will load the register
199 // in the incorrect doubleword order.
200 // FIXME: when supporting targets older than Power9 on LE is no longer required,
201 // this can be changed to simply `lxv n, (16 * n)(4)`.
202 #define PPC64_LVS(n) \
207 #define PPC64_LVS(n) \
212 // restore the first 32 VS regs (and also all floating point regs)
246 #ifdef __LITTLE_ENDIAN__
247 #define PPC64_CLVS_RESTORE(n) \
248 addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
252 #define PPC64_CLVS_RESTORE(n) \
253 addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
258 // use VRSAVE to conditionally restore the remaining VS regs, that are
259 // where the V regs are mapped. In the AIX ABI, VRSAVE is not used.
260 ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave
264 // conditionally load VS
265 #define PPC64_CLVSl(n) \
266 andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n)) ;\
268 PPC64_CLVS_RESTORE(n) ;\
271 #define PPC64_CLVSh(n) \
272 andi. 0, 5, (1 PPC_LEFT_SHIFT(63-n)) ;\
274 PPC64_CLVS_RESTORE(n) ;\
279 #define PPC64_CLVSl(n) PPC64_CLVS_RESTORE(n)
280 #define PPC64_CLVSh(n) PPC64_CLVS_RESTORE(n)
282 #endif // !defined(_AIX)
320 #define PPC64_LF(n) \
321 lfd n, (PPC64_OFFS_FP + n * 16)(3)
323 // restore float registers
357 #if defined(__ALTIVEC__)
359 #define PPC64_CLV_UNALIGNED_RESTORE(n) \
360 ld 0, (PPC64_OFFS_V + n * 16)(3) ;\
362 ld 0, (PPC64_OFFS_V + n * 16 + 8)(3) ;\
367 // restore vector registers if any are in use. In the AIX ABI, VRSAVE is
369 ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave
373 #define PPC64_CLV_UNALIGNEDl(n) \
374 andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n)) ;\
376 PPC64_CLV_UNALIGNED_RESTORE(n) ;\
379 #define PPC64_CLV_UNALIGNEDh(n) \
380 andi. 0, 5, (1 PPC_LEFT_SHIFT(31-n)) ;\
382 PPC64_CLV_UNALIGNED_RESTORE(n) ;\
387 #define PPC64_CLV_UNALIGNEDl(n) PPC64_CLV_UNALIGNED_RESTORE(n)
388 #define PPC64_CLV_UNALIGNEDh(n) PPC64_CLV_UNALIGNED_RESTORE(n)
390 #endif // !defined(_AIX)
393 // r4 is now a 16-byte aligned pointer into the red zone
394 // the _vectorScalarRegisters may not be 16-byte aligned
395 // so copy via red zone temp buffer
397 PPC64_CLV_UNALIGNEDl(0)
398 PPC64_CLV_UNALIGNEDl(1)
399 PPC64_CLV_UNALIGNEDl(2)
400 PPC64_CLV_UNALIGNEDl(3)
401 PPC64_CLV_UNALIGNEDl(4)
402 PPC64_CLV_UNALIGNEDl(5)
403 PPC64_CLV_UNALIGNEDl(6)
404 PPC64_CLV_UNALIGNEDl(7)
405 PPC64_CLV_UNALIGNEDl(8)
406 PPC64_CLV_UNALIGNEDl(9)
407 PPC64_CLV_UNALIGNEDl(10)
408 PPC64_CLV_UNALIGNEDl(11)
409 PPC64_CLV_UNALIGNEDl(12)
410 PPC64_CLV_UNALIGNEDl(13)
411 PPC64_CLV_UNALIGNEDl(14)
412 PPC64_CLV_UNALIGNEDl(15)
413 PPC64_CLV_UNALIGNEDh(16)
414 PPC64_CLV_UNALIGNEDh(17)
415 PPC64_CLV_UNALIGNEDh(18)
416 PPC64_CLV_UNALIGNEDh(19)
417 PPC64_CLV_UNALIGNEDh(20)
418 PPC64_CLV_UNALIGNEDh(21)
419 PPC64_CLV_UNALIGNEDh(22)
420 PPC64_CLV_UNALIGNEDh(23)
421 PPC64_CLV_UNALIGNEDh(24)
422 PPC64_CLV_UNALIGNEDh(25)
423 PPC64_CLV_UNALIGNEDh(26)
424 PPC64_CLV_UNALIGNEDh(27)
425 PPC64_CLV_UNALIGNEDh(28)
426 PPC64_CLV_UNALIGNEDh(29)
427 PPC64_CLV_UNALIGNEDh(30)
428 PPC64_CLV_UNALIGNEDh(31)
434 ld 0, PPC64_OFFS_CR(3)
436 ld 0, PPC64_OFFS_SRR0(3)
440 // After setting GPR1 to a higher address, AIX wipes out the original
441 // stack space below that address invalidated by the new GPR1 value. Use
442 // GPR0 to save the value of GPR3 in the context before it is wiped out.
443 // This compromises the content of GPR0 which is a volatile register.
444 ld 0, (8 * (3 + 2))(3)
458 #elif defined(__powerpc__)
460 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
462 // void libunwind::Registers_ppc::jumpto()
465 // thread_state pointer is in r3
468 // restore integral registers
503 // restore float registers
538 #if defined(__ALTIVEC__)
540 #define LOAD_VECTOR_RESTORE(_index) \
541 lwz 0, 424+_index*16(3) SEPARATOR \
542 stw 0, 0(4) SEPARATOR \
543 lwz 0, 424+_index*16+4(3) SEPARATOR \
544 stw 0, 4(4) SEPARATOR \
545 lwz 0, 424+_index*16+8(3) SEPARATOR \
546 stw 0, 8(4) SEPARATOR \
547 lwz 0, 424+_index*16+12(3) SEPARATOR \
548 stw 0, 12(4) SEPARATOR \
552 // restore vector registers if any are in use. In the AIX ABI, VRSAVE
554 lwz 5, 156(3) // test VRsave
558 #define LOAD_VECTOR_UNALIGNEDl(_index) \
559 andis. 0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \
560 beq Ldone ## _index SEPARATOR \
561 LOAD_VECTOR_RESTORE(_index) SEPARATOR \
564 #define LOAD_VECTOR_UNALIGNEDh(_index) \
565 andi. 0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \
566 beq Ldone ## _index SEPARATOR \
567 LOAD_VECTOR_RESTORE(_index) SEPARATOR \
572 #define LOAD_VECTOR_UNALIGNEDl(_index) LOAD_VECTOR_RESTORE(_index)
573 #define LOAD_VECTOR_UNALIGNEDh(_index) LOAD_VECTOR_RESTORE(_index)
575 #endif // !defined(_AIX)
578 rlwinm 4, 4, 0, 0, 27 // mask low 4-bits
579 // r4 is now a 16-byte aligned pointer into the red zone
580 // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
582 LOAD_VECTOR_UNALIGNEDl(0)
583 LOAD_VECTOR_UNALIGNEDl(1)
584 LOAD_VECTOR_UNALIGNEDl(2)
585 LOAD_VECTOR_UNALIGNEDl(3)
586 LOAD_VECTOR_UNALIGNEDl(4)
587 LOAD_VECTOR_UNALIGNEDl(5)
588 LOAD_VECTOR_UNALIGNEDl(6)
589 LOAD_VECTOR_UNALIGNEDl(7)
590 LOAD_VECTOR_UNALIGNEDl(8)
591 LOAD_VECTOR_UNALIGNEDl(9)
592 LOAD_VECTOR_UNALIGNEDl(10)
593 LOAD_VECTOR_UNALIGNEDl(11)
594 LOAD_VECTOR_UNALIGNEDl(12)
595 LOAD_VECTOR_UNALIGNEDl(13)
596 LOAD_VECTOR_UNALIGNEDl(14)
597 LOAD_VECTOR_UNALIGNEDl(15)
598 LOAD_VECTOR_UNALIGNEDh(16)
599 LOAD_VECTOR_UNALIGNEDh(17)
600 LOAD_VECTOR_UNALIGNEDh(18)
601 LOAD_VECTOR_UNALIGNEDh(19)
602 LOAD_VECTOR_UNALIGNEDh(20)
603 LOAD_VECTOR_UNALIGNEDh(21)
604 LOAD_VECTOR_UNALIGNEDh(22)
605 LOAD_VECTOR_UNALIGNEDh(23)
606 LOAD_VECTOR_UNALIGNEDh(24)
607 LOAD_VECTOR_UNALIGNEDh(25)
608 LOAD_VECTOR_UNALIGNEDh(26)
609 LOAD_VECTOR_UNALIGNEDh(27)
610 LOAD_VECTOR_UNALIGNEDh(28)
611 LOAD_VECTOR_UNALIGNEDh(29)
612 LOAD_VECTOR_UNALIGNEDh(30)
613 LOAD_VECTOR_UNALIGNEDh(31)
617 lwz 0, 136(3) // __cr
619 lwz 0, 148(3) // __ctr
621 lwz 0, 0(3) // __ssr0
623 lwz 0, 8(3) // do r0 now
624 lwz 5, 28(3) // do r5 now
625 lwz 4, 24(3) // do r4 now
626 lwz 1, 12(3) // do sp now
627 lwz 3, 20(3) // do r3 last
630 #elif defined(__aarch64__)
632 #if defined(__ARM_FEATURE_GCS_DEFAULT)
637 // extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *);
640 // thread_state pointer is in x0
643 DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)
644 // skip restore of x0,x1 for now
645 ldp x2, x3, [x0, #0x010]
646 ldp x4, x5, [x0, #0x020]
647 ldp x6, x7, [x0, #0x030]
648 ldp x8, x9, [x0, #0x040]
649 ldp x10,x11, [x0, #0x050]
650 ldp x12,x13, [x0, #0x060]
651 ldp x14,x15, [x0, #0x070]
652 // x16 and x17 were clobbered by the call into the unwinder, so no point in
654 ldp x18,x19, [x0, #0x090]
655 ldp x20,x21, [x0, #0x0A0]
656 ldp x22,x23, [x0, #0x0B0]
657 ldp x24,x25, [x0, #0x0C0]
658 ldp x26,x27, [x0, #0x0D0]
659 ldp x28,x29, [x0, #0x0E0]
660 ldr x30, [x0, #0x100] // restore pc into lr
661 #if defined(__ARM_FP) && __ARM_FP != 0
662 ldp d0, d1, [x0, #0x110]
663 ldp d2, d3, [x0, #0x120]
664 ldp d4, d5, [x0, #0x130]
665 ldp d6, d7, [x0, #0x140]
666 ldp d8, d9, [x0, #0x150]
667 ldp d10,d11, [x0, #0x160]
668 ldp d12,d13, [x0, #0x170]
669 ldp d14,d15, [x0, #0x180]
670 ldp d16,d17, [x0, #0x190]
671 ldp d18,d19, [x0, #0x1A0]
672 ldp d20,d21, [x0, #0x1B0]
673 ldp d22,d23, [x0, #0x1C0]
674 ldp d24,d25, [x0, #0x1D0]
675 ldp d26,d27, [x0, #0x1E0]
676 ldp d28,d29, [x0, #0x1F0]
677 ldr d30, [x0, #0x200]
678 ldr d31, [x0, #0x208]
680 // Finally, restore sp. This must be done after the last read from the
681 // context struct, because it is allocated on the stack, and an exception
682 // could clobber the de-allocated portion of the stack after sp has been
684 ldr x16, [x0, #0x0F8]
685 ldp x0, x1, [x0, #0x000] // restore x0,x1
686 mov sp,x16 // restore sp
687 #if defined(__ARM_FEATURE_GCS_DEFAULT)
688 // If GCS is enabled we need to push the address we're returning to onto the
689 // GCS stack. We can't just return using br, as there won't be a BTI landing
690 // pad instruction at the destination.
697 ret x30 // jump to pc
699 #elif defined(__arm__) && !defined(__APPLE__)
701 #if !defined(__ARM_ARCH_ISA_ARM)
702 #if (__ARM_ARCH_ISA_THUMB == 2)
709 @ void libunwind::Registers_arm::restoreCoreAndJumpTo()
712 @ thread_state pointer is in r0
715 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
716 #if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
717 @ r8-r11: ldm into r1-r4, then mov to r8-r11
725 @ r12 does not need loading, it it the intra-procedure-call scratch register
729 mov lr, r3 @ restore pc into lr
732 @ Use lr as base so that r0 can be restored.
734 @ 32bit thumb-2 restrictions for ldm:
735 @ . the sp (r13) cannot be in the list
736 @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
739 ldr lr, [lr, #60] @ restore pc into lr
741 #if defined(__ARM_FEATURE_BTI_DEFAULT) && !defined(__ARM_ARCH_ISA_ARM)
742 // 'bx' is not BTI setting when used with lr, therefore r12 is used instead
750 @ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
753 @ values pointer is in r0
759 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
760 @ VFP and iwMMX instructions are only available when compiling with the flags
761 @ that enable them. We do not want to do that in the library (because we do not
762 @ want the compiler to generate instructions that access those) but this is
763 @ only accessed if the personality routine needs these registers. Use of
764 @ these registers implies they are, actually, available on the target, so
765 @ it's ok to execute.
766 @ So, generate the instruction using the corresponding coprocessor mnemonic.
771 @ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
774 @ values pointer is in r0
780 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
781 vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
785 @ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
788 @ values pointer is in r0
794 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
798 #if defined(__ARM_WMMX)
801 @ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
804 @ values pointer is in r0
810 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
811 ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8
812 ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8
813 ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8
814 ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8
815 ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8
816 ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8
817 ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8
818 ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8
819 ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8
820 ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8
821 ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8
822 ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8
823 ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8
824 ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8
825 ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8
826 ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8
830 @ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
833 @ values pointer is in r0
839 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
840 ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4
841 ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4
842 ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4
843 ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4
848 #elif defined(__or1k__)
850 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
852 # void libunwind::Registers_or1k::jumpto()
855 # thread_state pointer is in r3
858 # restore integral registers
892 # load new pc into ra
895 # at last, restore r3
902 #elif defined(__hexagon__)
904 # thread_state pointer is in r2
905 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
907 # void libunwind::Registers_hexagon::jumpto()
940 c4 = r1 // Predicate register
944 #elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
947 // void libunwind::Registers_mips_o32::jumpto()
950 // thread state pointer is in a0 ($4)
952 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
957 #ifdef __mips_hard_float
959 ldc1 $f0, (4 * 36 + 8 * 0)($4)
960 ldc1 $f2, (4 * 36 + 8 * 2)($4)
961 ldc1 $f4, (4 * 36 + 8 * 4)($4)
962 ldc1 $f6, (4 * 36 + 8 * 6)($4)
963 ldc1 $f8, (4 * 36 + 8 * 8)($4)
964 ldc1 $f10, (4 * 36 + 8 * 10)($4)
965 ldc1 $f12, (4 * 36 + 8 * 12)($4)
966 ldc1 $f14, (4 * 36 + 8 * 14)($4)
967 ldc1 $f16, (4 * 36 + 8 * 16)($4)
968 ldc1 $f18, (4 * 36 + 8 * 18)($4)
969 ldc1 $f20, (4 * 36 + 8 * 20)($4)
970 ldc1 $f22, (4 * 36 + 8 * 22)($4)
971 ldc1 $f24, (4 * 36 + 8 * 24)($4)
972 ldc1 $f26, (4 * 36 + 8 * 26)($4)
973 ldc1 $f28, (4 * 36 + 8 * 28)($4)
974 ldc1 $f30, (4 * 36 + 8 * 30)($4)
976 ldc1 $f0, (4 * 36 + 8 * 0)($4)
977 ldc1 $f1, (4 * 36 + 8 * 1)($4)
978 ldc1 $f2, (4 * 36 + 8 * 2)($4)
979 ldc1 $f3, (4 * 36 + 8 * 3)($4)
980 ldc1 $f4, (4 * 36 + 8 * 4)($4)
981 ldc1 $f5, (4 * 36 + 8 * 5)($4)
982 ldc1 $f6, (4 * 36 + 8 * 6)($4)
983 ldc1 $f7, (4 * 36 + 8 * 7)($4)
984 ldc1 $f8, (4 * 36 + 8 * 8)($4)
985 ldc1 $f9, (4 * 36 + 8 * 9)($4)
986 ldc1 $f10, (4 * 36 + 8 * 10)($4)
987 ldc1 $f11, (4 * 36 + 8 * 11)($4)
988 ldc1 $f12, (4 * 36 + 8 * 12)($4)
989 ldc1 $f13, (4 * 36 + 8 * 13)($4)
990 ldc1 $f14, (4 * 36 + 8 * 14)($4)
991 ldc1 $f15, (4 * 36 + 8 * 15)($4)
992 ldc1 $f16, (4 * 36 + 8 * 16)($4)
993 ldc1 $f17, (4 * 36 + 8 * 17)($4)
994 ldc1 $f18, (4 * 36 + 8 * 18)($4)
995 ldc1 $f19, (4 * 36 + 8 * 19)($4)
996 ldc1 $f20, (4 * 36 + 8 * 20)($4)
997 ldc1 $f21, (4 * 36 + 8 * 21)($4)
998 ldc1 $f22, (4 * 36 + 8 * 22)($4)
999 ldc1 $f23, (4 * 36 + 8 * 23)($4)
1000 ldc1 $f24, (4 * 36 + 8 * 24)($4)
1001 ldc1 $f25, (4 * 36 + 8 * 25)($4)
1002 ldc1 $f26, (4 * 36 + 8 * 26)($4)
1003 ldc1 $f27, (4 * 36 + 8 * 27)($4)
1004 ldc1 $f28, (4 * 36 + 8 * 28)($4)
1005 ldc1 $f29, (4 * 36 + 8 * 29)($4)
1006 ldc1 $f30, (4 * 36 + 8 * 30)($4)
1007 ldc1 $f31, (4 * 36 + 8 * 31)($4)
1010 #if __mips_isa_rev < 6
1011 // restore hi and lo
1027 lw $10, (4 * 10)($4)
1028 lw $11, (4 * 11)($4)
1029 lw $12, (4 * 12)($4)
1030 lw $13, (4 * 13)($4)
1031 lw $14, (4 * 14)($4)
1032 lw $15, (4 * 15)($4)
1033 lw $16, (4 * 16)($4)
1034 lw $17, (4 * 17)($4)
1035 lw $18, (4 * 18)($4)
1036 lw $19, (4 * 19)($4)
1037 lw $20, (4 * 20)($4)
1038 lw $21, (4 * 21)($4)
1039 lw $22, (4 * 22)($4)
1040 lw $23, (4 * 23)($4)
1041 lw $24, (4 * 24)($4)
1042 lw $25, (4 * 25)($4)
1043 lw $26, (4 * 26)($4)
1044 lw $27, (4 * 27)($4)
1045 lw $28, (4 * 28)($4)
1046 lw $29, (4 * 29)($4)
1047 lw $30, (4 * 30)($4)
1048 // load new pc into ra
1049 lw $31, (4 * 32)($4)
1050 // jump to ra, load a0 in the delay slot
1055 #elif defined(__mips64)
1058 // void libunwind::Registers_mips_newabi::jumpto()
1061 // thread state pointer is in a0 ($4)
1063 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
1068 #ifdef __mips_hard_float
1070 ldc1 $f\i, (280+8*\i)($4)
1073 #if __mips_isa_rev < 6
1074 // restore hi and lo
1085 .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
1086 ld $\i, (8 * \i)($4)
1088 // load new pc into ra
1089 ld $31, (8 * 32)($4)
1090 // jump to ra, load a0 in the delay slot
1095 #elif defined(__sparc__) && defined(__arch64__)
1097 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_sparc646jumptoEv)
1099 // void libunwind::Registers_sparc64::jumpto()
1102 // thread_state pointer is in %o0
1104 .register %g2, #scratch
1105 .register %g3, #scratch
1106 .register %g6, #scratch
1107 .register %g7, #scratch
1109 ldx [%o0 + 0x08], %g1
1110 ldx [%o0 + 0x10], %g2
1111 ldx [%o0 + 0x18], %g3
1112 ldx [%o0 + 0x20], %g4
1113 ldx [%o0 + 0x28], %g5
1114 ldx [%o0 + 0x30], %g6
1115 ldx [%o0 + 0x38], %g7
1116 ldx [%o0 + 0x48], %o1
1117 ldx [%o0 + 0x50], %o2
1118 ldx [%o0 + 0x58], %o3
1119 ldx [%o0 + 0x60], %o4
1120 ldx [%o0 + 0x68], %o5
1121 ldx [%o0 + 0x70], %o6
1122 ldx [%o0 + 0x78], %o7
1123 ldx [%o0 + 0x80], %l0
1124 ldx [%o0 + 0x88], %l1
1125 ldx [%o0 + 0x90], %l2
1126 ldx [%o0 + 0x98], %l3
1127 ldx [%o0 + 0xa0], %l4
1128 ldx [%o0 + 0xa8], %l5
1129 ldx [%o0 + 0xb0], %l6
1130 ldx [%o0 + 0xb8], %l7
1131 ldx [%o0 + 0xc0], %i0
1132 ldx [%o0 + 0xc8], %i1
1133 ldx [%o0 + 0xd0], %i2
1134 ldx [%o0 + 0xd8], %i3
1135 ldx [%o0 + 0xe0], %i4
1136 ldx [%o0 + 0xe8], %i5
1137 ldx [%o0 + 0xf0], %i6
1138 ldx [%o0 + 0xf8], %i7
1140 ldx [%o0 + 0x40], %o0
1142 #elif defined(__sparc__)
1145 // void libunwind::Registers_sparc_o32::jumpto()
1148 // thread_state pointer is in o0
1150 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1157 ldd [%o0 + 104], %i2
1158 ldd [%o0 + 112], %i4
1159 ldd [%o0 + 120], %i6
1164 #elif defined(__riscv)
1167 // void libunwind::Registers_riscv::jumpto()
1170 // thread_state pointer is in a0
1173 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
1174 # if defined(__riscv_flen)
1176 FLOAD f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
1181 ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra
1182 .irp i,2,3,4,5,6,7,8,9
1183 ILOAD x\i, (RISCV_ISIZE * \i)(a0)
1186 #if defined(__riscv_32e)
1187 .irp i,11,12,13,14,15
1189 .irp i,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1191 ILOAD x\i, (RISCV_ISIZE * \i)(a0)
1193 ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a0
1197 #elif defined(__s390x__)
1199 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv)
1201 // void libunwind::Registers_s390x::jumpto()
1204 // thread_state pointer is in r2
1207 // Skip PSWM, but load PSWA into r1
1212 ld %f\i, (144+8*\i)(%r2)
1215 // Restore GPRs - skipping %r0 and %r1
1216 lmg %r2, %r15, 32(%r2)
1218 // Return to PSWA (was loaded into %r1 above)
1221 #elif defined(__loongarch__) && __loongarch_grlen == 64
1224 // void libunwind::Registers_loongarch::jumpto()
1227 // thread_state pointer is in $a0($r4)
1230 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv)
1231 # if __loongarch_frlen == 64
1233 fld.d $f\i, $a0, (8 * 33 + 8 * \i)
1239 ld.d $r\i, $a0, (8 * \i)
1242 .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1243 ld.d $r\i, $a0, (8 * \i)
1246 ld.d $ra, $a0, (8 * 32) // load new pc into $ra
1247 ld.d $a0, $a0, (8 * 4) // restore $a0 last
1253 #endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */
1255 NO_EXEC_STACK_DIRECTIVE