1 //===----------------------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 #define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
12 #define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
14 #define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
15 #define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
23 #if !defined(__USING_SJLJ_EXCEPTIONS__)
26 DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
28 # extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);
32 # +-----------------------+
33 # + thread_state pointer +
34 # +-----------------------+
36 # +-----------------------+ <-- SP
41 # set up eax and ret on new stack location
42 movl 28(%eax), %edx # edx holds new stack pointer
49 # we now have ret and eax pushed onto where new stack will be
50 # restore all registers
60 pop %eax # eax was already pushed on new stack
69 #elif defined(__x86_64__)
71 DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)
73 # extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);
76 # On entry, thread_state pointer is in rcx; move it into rdi
77 # to share restore code below. Since this routine restores and
78 # overwrites all registers, we can use the same registers for
79 # pointers and temporaries as on unix even though win64 normally
80 # mustn't clobber some of them.
83 # On entry, thread_state pointer is in rdi
87 movq 56(%rdi), %rax # rax holds new stack pointer
90 movq 32(%rdi), %rbx # store new rdi on new stack
92 movq 128(%rdi), %rbx # store new rip on new stack
94 # restore all registers
117 movdqu 176(%rdi),%xmm0
118 movdqu 192(%rdi),%xmm1
119 movdqu 208(%rdi),%xmm2
120 movdqu 224(%rdi),%xmm3
121 movdqu 240(%rdi),%xmm4
122 movdqu 256(%rdi),%xmm5
123 movdqu 272(%rdi),%xmm6
124 movdqu 288(%rdi),%xmm7
125 movdqu 304(%rdi),%xmm8
126 movdqu 320(%rdi),%xmm9
127 movdqu 336(%rdi),%xmm10
128 movdqu 352(%rdi),%xmm11
129 movdqu 368(%rdi),%xmm12
130 movdqu 384(%rdi),%xmm13
131 movdqu 400(%rdi),%xmm14
132 movdqu 416(%rdi),%xmm15
134 movq 56(%rdi), %rsp # cut back rsp to new location
135 pop %rdi # rdi was saved here earlier
140 #elif defined(__powerpc64__)
142 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
144 // void libunwind::Registers_ppc64::jumpto()
147 // thread_state pointer is in r3
150 // load register (GPR)
151 #define PPC64_LR(n) \
152 ld n, (8 * (n + 2))(3)
154 // restore integral registers
190 // restore VS registers
191 // (note that this also restores floating point registers and V registers,
192 // because part of VS is mapped to these registers)
194 addi 4, 3, PPC64_OFFS_FP
197 #ifdef __LITTLE_ENDIAN__
198 // For little-endian targets, we need a swap since lxvd2x will load the register
199 // in the incorrect doubleword order.
200 // FIXME: when supporting targets older than Power9 on LE is no longer required,
201 // this can be changed to simply `lxv n, (16 * n)(4)`.
202 #define PPC64_LVS(n) \
207 #define PPC64_LVS(n) \
212 // restore the first 32 VS regs (and also all floating point regs)
246 #ifdef __LITTLE_ENDIAN__
247 #define PPC64_CLVS_RESTORE(n) \
248 addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
252 #define PPC64_CLVS_RESTORE(n) \
253 addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
258 // use VRSAVE to conditionally restore the remaining VS regs, that are
259 // where the V regs are mapped. In the AIX ABI, VRSAVE is not used.
260 ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave
264 // conditionally load VS
265 #define PPC64_CLVSl(n) \
266 andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n)) ;\
268 PPC64_CLVS_RESTORE(n) ;\
271 #define PPC64_CLVSh(n) \
272 andi. 0, 5, (1 PPC_LEFT_SHIFT(63-n)) ;\
274 PPC64_CLVS_RESTORE(n) ;\
279 #define PPC64_CLVSl(n) PPC64_CLVS_RESTORE(n)
280 #define PPC64_CLVSh(n) PPC64_CLVS_RESTORE(n)
282 #endif // !defined(_AIX)
320 #define PPC64_LF(n) \
321 lfd n, (PPC64_OFFS_FP + n * 16)(3)
323 // restore float registers
357 #if defined(__ALTIVEC__)
359 #define PPC64_CLV_UNALIGNED_RESTORE(n) \
360 ld 0, (PPC64_OFFS_V + n * 16)(3) ;\
362 ld 0, (PPC64_OFFS_V + n * 16 + 8)(3) ;\
367 // restore vector registers if any are in use. In the AIX ABI, VRSAVE is
369 ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave
373 #define PPC64_CLV_UNALIGNEDl(n) \
374 andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n)) ;\
376 PPC64_CLV_UNALIGNED_RESTORE(n) ;\
379 #define PPC64_CLV_UNALIGNEDh(n) \
380 andi. 0, 5, (1 PPC_LEFT_SHIFT(31-n)) ;\
382 PPC64_CLV_UNALIGNED_RESTORE(n) ;\
387 #define PPC64_CLV_UNALIGNEDl(n) PPC64_CLV_UNALIGNED_RESTORE(n)
388 #define PPC64_CLV_UNALIGNEDh(n) PPC64_CLV_UNALIGNED_RESTORE(n)
390 #endif // !defined(_AIX)
393 // r4 is now a 16-byte aligned pointer into the red zone
394 // the _vectorScalarRegisters may not be 16-byte aligned
395 // so copy via red zone temp buffer
397 PPC64_CLV_UNALIGNEDl(0)
398 PPC64_CLV_UNALIGNEDl(1)
399 PPC64_CLV_UNALIGNEDl(2)
400 PPC64_CLV_UNALIGNEDl(3)
401 PPC64_CLV_UNALIGNEDl(4)
402 PPC64_CLV_UNALIGNEDl(5)
403 PPC64_CLV_UNALIGNEDl(6)
404 PPC64_CLV_UNALIGNEDl(7)
405 PPC64_CLV_UNALIGNEDl(8)
406 PPC64_CLV_UNALIGNEDl(9)
407 PPC64_CLV_UNALIGNEDl(10)
408 PPC64_CLV_UNALIGNEDl(11)
409 PPC64_CLV_UNALIGNEDl(12)
410 PPC64_CLV_UNALIGNEDl(13)
411 PPC64_CLV_UNALIGNEDl(14)
412 PPC64_CLV_UNALIGNEDl(15)
413 PPC64_CLV_UNALIGNEDh(16)
414 PPC64_CLV_UNALIGNEDh(17)
415 PPC64_CLV_UNALIGNEDh(18)
416 PPC64_CLV_UNALIGNEDh(19)
417 PPC64_CLV_UNALIGNEDh(20)
418 PPC64_CLV_UNALIGNEDh(21)
419 PPC64_CLV_UNALIGNEDh(22)
420 PPC64_CLV_UNALIGNEDh(23)
421 PPC64_CLV_UNALIGNEDh(24)
422 PPC64_CLV_UNALIGNEDh(25)
423 PPC64_CLV_UNALIGNEDh(26)
424 PPC64_CLV_UNALIGNEDh(27)
425 PPC64_CLV_UNALIGNEDh(28)
426 PPC64_CLV_UNALIGNEDh(29)
427 PPC64_CLV_UNALIGNEDh(30)
428 PPC64_CLV_UNALIGNEDh(31)
434 ld 0, PPC64_OFFS_CR(3)
436 ld 0, PPC64_OFFS_SRR0(3)
440 // After setting GPR1 to a higher address, AIX wipes out the original
441 // stack space below that address invalidated by the new GPR1 value. Use
442 // GPR0 to save the value of GPR3 in the context before it is wiped out.
443 // This compromises the content of GPR0 which is a volatile register.
444 ld 0, (8 * (3 + 2))(3)
458 #elif defined(__powerpc__)
460 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
462 // void libunwind::Registers_ppc::jumpto()
465 // thread_state pointer is in r3
468 // restore integral registers
503 // restore float registers
538 #if defined(__ALTIVEC__)
540 #define LOAD_VECTOR_RESTORE(_index) \
541 lwz 0, 424+_index*16(3) SEPARATOR \
542 stw 0, 0(4) SEPARATOR \
543 lwz 0, 424+_index*16+4(3) SEPARATOR \
544 stw 0, 4(4) SEPARATOR \
545 lwz 0, 424+_index*16+8(3) SEPARATOR \
546 stw 0, 8(4) SEPARATOR \
547 lwz 0, 424+_index*16+12(3) SEPARATOR \
548 stw 0, 12(4) SEPARATOR \
552 // restore vector registers if any are in use. In the AIX ABI, VRSAVE
554 lwz 5, 156(3) // test VRsave
558 #define LOAD_VECTOR_UNALIGNEDl(_index) \
559 andis. 0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \
560 beq Ldone ## _index SEPARATOR \
561 LOAD_VECTOR_RESTORE(_index) SEPARATOR \
564 #define LOAD_VECTOR_UNALIGNEDh(_index) \
565 andi. 0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \
566 beq Ldone ## _index SEPARATOR \
567 LOAD_VECTOR_RESTORE(_index) SEPARATOR \
572 #define LOAD_VECTOR_UNALIGNEDl(_index) LOAD_VECTOR_RESTORE(_index)
573 #define LOAD_VECTOR_UNALIGNEDh(_index) LOAD_VECTOR_RESTORE(_index)
575 #endif // !defined(_AIX)
578 rlwinm 4, 4, 0, 0, 27 // mask low 4-bits
579 // r4 is now a 16-byte aligned pointer into the red zone
580 // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
582 LOAD_VECTOR_UNALIGNEDl(0)
583 LOAD_VECTOR_UNALIGNEDl(1)
584 LOAD_VECTOR_UNALIGNEDl(2)
585 LOAD_VECTOR_UNALIGNEDl(3)
586 LOAD_VECTOR_UNALIGNEDl(4)
587 LOAD_VECTOR_UNALIGNEDl(5)
588 LOAD_VECTOR_UNALIGNEDl(6)
589 LOAD_VECTOR_UNALIGNEDl(7)
590 LOAD_VECTOR_UNALIGNEDl(8)
591 LOAD_VECTOR_UNALIGNEDl(9)
592 LOAD_VECTOR_UNALIGNEDl(10)
593 LOAD_VECTOR_UNALIGNEDl(11)
594 LOAD_VECTOR_UNALIGNEDl(12)
595 LOAD_VECTOR_UNALIGNEDl(13)
596 LOAD_VECTOR_UNALIGNEDl(14)
597 LOAD_VECTOR_UNALIGNEDl(15)
598 LOAD_VECTOR_UNALIGNEDh(16)
599 LOAD_VECTOR_UNALIGNEDh(17)
600 LOAD_VECTOR_UNALIGNEDh(18)
601 LOAD_VECTOR_UNALIGNEDh(19)
602 LOAD_VECTOR_UNALIGNEDh(20)
603 LOAD_VECTOR_UNALIGNEDh(21)
604 LOAD_VECTOR_UNALIGNEDh(22)
605 LOAD_VECTOR_UNALIGNEDh(23)
606 LOAD_VECTOR_UNALIGNEDh(24)
607 LOAD_VECTOR_UNALIGNEDh(25)
608 LOAD_VECTOR_UNALIGNEDh(26)
609 LOAD_VECTOR_UNALIGNEDh(27)
610 LOAD_VECTOR_UNALIGNEDh(28)
611 LOAD_VECTOR_UNALIGNEDh(29)
612 LOAD_VECTOR_UNALIGNEDh(30)
613 LOAD_VECTOR_UNALIGNEDh(31)
617 lwz 0, 136(3) // __cr
619 lwz 0, 148(3) // __ctr
621 lwz 0, 0(3) // __ssr0
623 lwz 0, 8(3) // do r0 now
624 lwz 5, 28(3) // do r5 now
625 lwz 4, 24(3) // do r4 now
626 lwz 1, 12(3) // do sp now
627 lwz 3, 20(3) // do r3 last
630 #elif defined(__aarch64__)
633 // extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *);
636 // thread_state pointer is in x0
639 DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)
640 // skip restore of x0,x1 for now
641 ldp x2, x3, [x0, #0x010]
642 ldp x4, x5, [x0, #0x020]
643 ldp x6, x7, [x0, #0x030]
644 ldp x8, x9, [x0, #0x040]
645 ldp x10,x11, [x0, #0x050]
646 ldp x12,x13, [x0, #0x060]
647 ldp x14,x15, [x0, #0x070]
648 // x16 and x17 were clobbered by the call into the unwinder, so no point in
650 ldp x18,x19, [x0, #0x090]
651 ldp x20,x21, [x0, #0x0A0]
652 ldp x22,x23, [x0, #0x0B0]
653 ldp x24,x25, [x0, #0x0C0]
654 ldp x26,x27, [x0, #0x0D0]
655 ldp x28,x29, [x0, #0x0E0]
656 ldr x30, [x0, #0x100] // restore pc into lr
658 ldp d0, d1, [x0, #0x110]
659 ldp d2, d3, [x0, #0x120]
660 ldp d4, d5, [x0, #0x130]
661 ldp d6, d7, [x0, #0x140]
662 ldp d8, d9, [x0, #0x150]
663 ldp d10,d11, [x0, #0x160]
664 ldp d12,d13, [x0, #0x170]
665 ldp d14,d15, [x0, #0x180]
666 ldp d16,d17, [x0, #0x190]
667 ldp d18,d19, [x0, #0x1A0]
668 ldp d20,d21, [x0, #0x1B0]
669 ldp d22,d23, [x0, #0x1C0]
670 ldp d24,d25, [x0, #0x1D0]
671 ldp d26,d27, [x0, #0x1E0]
672 ldp d28,d29, [x0, #0x1F0]
673 ldr d30, [x0, #0x200]
674 ldr d31, [x0, #0x208]
676 // Finally, restore sp. This must be done after the last read from the
677 // context struct, because it is allocated on the stack, and an exception
678 // could clobber the de-allocated portion of the stack after sp has been
680 ldr x16, [x0, #0x0F8]
681 ldp x0, x1, [x0, #0x000] // restore x0,x1
682 mov sp,x16 // restore sp
683 ret x30 // jump to pc
685 #elif defined(__arm__) && !defined(__APPLE__)
687 #if !defined(__ARM_ARCH_ISA_ARM)
688 #if (__ARM_ARCH_ISA_THUMB == 2)
695 @ void libunwind::Registers_arm::restoreCoreAndJumpTo()
698 @ thread_state pointer is in r0
701 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
702 #if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
703 @ r8-r11: ldm into r1-r4, then mov to r8-r11
711 @ r12 does not need loading, it it the intra-procedure-call scratch register
715 mov lr, r3 @ restore pc into lr
718 @ Use lr as base so that r0 can be restored.
720 @ 32bit thumb-2 restrictions for ldm:
721 @ . the sp (r13) cannot be in the list
722 @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
725 ldr lr, [lr, #60] @ restore pc into lr
727 #if defined(__ARM_FEATURE_BTI_DEFAULT) && !defined(__ARM_ARCH_ISA_ARM)
728 // 'bx' is not BTI setting when used with lr, therefore r12 is used instead
736 @ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
739 @ values pointer is in r0
745 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
746 @ VFP and iwMMX instructions are only available when compiling with the flags
747 @ that enable them. We do not want to do that in the library (because we do not
748 @ want the compiler to generate instructions that access those) but this is
749 @ only accessed if the personality routine needs these registers. Use of
750 @ these registers implies they are, actually, available on the target, so
751 @ it's ok to execute.
752 @ So, generate the instruction using the corresponding coprocessor mnemonic.
757 @ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
760 @ values pointer is in r0
766 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
767 vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
771 @ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
774 @ values pointer is in r0
780 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
784 #if defined(__ARM_WMMX)
787 @ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
790 @ values pointer is in r0
796 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
797 ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8
798 ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8
799 ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8
800 ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8
801 ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8
802 ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8
803 ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8
804 ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8
805 ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8
806 ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8
807 ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8
808 ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8
809 ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8
810 ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8
811 ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8
812 ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8
816 @ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
819 @ values pointer is in r0
825 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
826 ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4
827 ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4
828 ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4
829 ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4
834 #elif defined(__or1k__)
836 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
838 # void libunwind::Registers_or1k::jumpto()
841 # thread_state pointer is in r3
844 # restore integral registers
878 # load new pc into ra
881 # at last, restore r3
888 #elif defined(__hexagon__)
890 # thread_state pointer is in r2
891 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
893 # void libunwind::Registers_hexagon::jumpto()
926 c4 = r1 // Predicate register
930 #elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
933 // void libunwind::Registers_mips_o32::jumpto()
936 // thread state pointer is in a0 ($4)
938 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
943 #ifdef __mips_hard_float
945 ldc1 $f0, (4 * 36 + 8 * 0)($4)
946 ldc1 $f2, (4 * 36 + 8 * 2)($4)
947 ldc1 $f4, (4 * 36 + 8 * 4)($4)
948 ldc1 $f6, (4 * 36 + 8 * 6)($4)
949 ldc1 $f8, (4 * 36 + 8 * 8)($4)
950 ldc1 $f10, (4 * 36 + 8 * 10)($4)
951 ldc1 $f12, (4 * 36 + 8 * 12)($4)
952 ldc1 $f14, (4 * 36 + 8 * 14)($4)
953 ldc1 $f16, (4 * 36 + 8 * 16)($4)
954 ldc1 $f18, (4 * 36 + 8 * 18)($4)
955 ldc1 $f20, (4 * 36 + 8 * 20)($4)
956 ldc1 $f22, (4 * 36 + 8 * 22)($4)
957 ldc1 $f24, (4 * 36 + 8 * 24)($4)
958 ldc1 $f26, (4 * 36 + 8 * 26)($4)
959 ldc1 $f28, (4 * 36 + 8 * 28)($4)
960 ldc1 $f30, (4 * 36 + 8 * 30)($4)
962 ldc1 $f0, (4 * 36 + 8 * 0)($4)
963 ldc1 $f1, (4 * 36 + 8 * 1)($4)
964 ldc1 $f2, (4 * 36 + 8 * 2)($4)
965 ldc1 $f3, (4 * 36 + 8 * 3)($4)
966 ldc1 $f4, (4 * 36 + 8 * 4)($4)
967 ldc1 $f5, (4 * 36 + 8 * 5)($4)
968 ldc1 $f6, (4 * 36 + 8 * 6)($4)
969 ldc1 $f7, (4 * 36 + 8 * 7)($4)
970 ldc1 $f8, (4 * 36 + 8 * 8)($4)
971 ldc1 $f9, (4 * 36 + 8 * 9)($4)
972 ldc1 $f10, (4 * 36 + 8 * 10)($4)
973 ldc1 $f11, (4 * 36 + 8 * 11)($4)
974 ldc1 $f12, (4 * 36 + 8 * 12)($4)
975 ldc1 $f13, (4 * 36 + 8 * 13)($4)
976 ldc1 $f14, (4 * 36 + 8 * 14)($4)
977 ldc1 $f15, (4 * 36 + 8 * 15)($4)
978 ldc1 $f16, (4 * 36 + 8 * 16)($4)
979 ldc1 $f17, (4 * 36 + 8 * 17)($4)
980 ldc1 $f18, (4 * 36 + 8 * 18)($4)
981 ldc1 $f19, (4 * 36 + 8 * 19)($4)
982 ldc1 $f20, (4 * 36 + 8 * 20)($4)
983 ldc1 $f21, (4 * 36 + 8 * 21)($4)
984 ldc1 $f22, (4 * 36 + 8 * 22)($4)
985 ldc1 $f23, (4 * 36 + 8 * 23)($4)
986 ldc1 $f24, (4 * 36 + 8 * 24)($4)
987 ldc1 $f25, (4 * 36 + 8 * 25)($4)
988 ldc1 $f26, (4 * 36 + 8 * 26)($4)
989 ldc1 $f27, (4 * 36 + 8 * 27)($4)
990 ldc1 $f28, (4 * 36 + 8 * 28)($4)
991 ldc1 $f29, (4 * 36 + 8 * 29)($4)
992 ldc1 $f30, (4 * 36 + 8 * 30)($4)
993 ldc1 $f31, (4 * 36 + 8 * 31)($4)
996 #if __mips_isa_rev < 6
1013 lw $10, (4 * 10)($4)
1014 lw $11, (4 * 11)($4)
1015 lw $12, (4 * 12)($4)
1016 lw $13, (4 * 13)($4)
1017 lw $14, (4 * 14)($4)
1018 lw $15, (4 * 15)($4)
1019 lw $16, (4 * 16)($4)
1020 lw $17, (4 * 17)($4)
1021 lw $18, (4 * 18)($4)
1022 lw $19, (4 * 19)($4)
1023 lw $20, (4 * 20)($4)
1024 lw $21, (4 * 21)($4)
1025 lw $22, (4 * 22)($4)
1026 lw $23, (4 * 23)($4)
1027 lw $24, (4 * 24)($4)
1028 lw $25, (4 * 25)($4)
1029 lw $26, (4 * 26)($4)
1030 lw $27, (4 * 27)($4)
1031 lw $28, (4 * 28)($4)
1032 lw $29, (4 * 29)($4)
1033 lw $30, (4 * 30)($4)
1034 // load new pc into ra
1035 lw $31, (4 * 32)($4)
1036 // jump to ra, load a0 in the delay slot
1041 #elif defined(__mips64)
1044 // void libunwind::Registers_mips_newabi::jumpto()
1047 // thread state pointer is in a0 ($4)
1049 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
1054 #ifdef __mips_hard_float
1056 ldc1 $f\i, (280+8*\i)($4)
1059 #if __mips_isa_rev < 6
1060 // restore hi and lo
1071 .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
1072 ld $\i, (8 * \i)($4)
1074 // load new pc into ra
1075 ld $31, (8 * 32)($4)
1076 // jump to ra, load a0 in the delay slot
1081 #elif defined(__sparc__) && defined(__arch64__)
1083 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_sparc646jumptoEv)
1085 // void libunwind::Registers_sparc64::jumpto()
1088 // thread_state pointer is in %o0
1090 .register %g2, #scratch
1091 .register %g3, #scratch
1092 .register %g6, #scratch
1093 .register %g7, #scratch
1095 ldx [%o0 + 0x08], %g1
1096 ldx [%o0 + 0x10], %g2
1097 ldx [%o0 + 0x18], %g3
1098 ldx [%o0 + 0x20], %g4
1099 ldx [%o0 + 0x28], %g5
1100 ldx [%o0 + 0x30], %g6
1101 ldx [%o0 + 0x38], %g7
1102 ldx [%o0 + 0x48], %o1
1103 ldx [%o0 + 0x50], %o2
1104 ldx [%o0 + 0x58], %o3
1105 ldx [%o0 + 0x60], %o4
1106 ldx [%o0 + 0x68], %o5
1107 ldx [%o0 + 0x70], %o6
1108 ldx [%o0 + 0x78], %o7
1109 ldx [%o0 + 0x80], %l0
1110 ldx [%o0 + 0x88], %l1
1111 ldx [%o0 + 0x90], %l2
1112 ldx [%o0 + 0x98], %l3
1113 ldx [%o0 + 0xa0], %l4
1114 ldx [%o0 + 0xa8], %l5
1115 ldx [%o0 + 0xb0], %l6
1116 ldx [%o0 + 0xb8], %l7
1117 ldx [%o0 + 0xc0], %i0
1118 ldx [%o0 + 0xc8], %i1
1119 ldx [%o0 + 0xd0], %i2
1120 ldx [%o0 + 0xd8], %i3
1121 ldx [%o0 + 0xe0], %i4
1122 ldx [%o0 + 0xe8], %i5
1123 ldx [%o0 + 0xf0], %i6
1124 ldx [%o0 + 0xf8], %i7
1126 ldx [%o0 + 0x40], %o0
1128 #elif defined(__sparc__)
1131 // void libunwind::Registers_sparc_o32::jumpto()
1134 // thread_state pointer is in o0
1136 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1143 ldd [%o0 + 104], %i2
1144 ldd [%o0 + 112], %i4
1145 ldd [%o0 + 120], %i6
1150 #elif defined(__riscv)
1153 // void libunwind::Registers_riscv::jumpto()
1156 // thread_state pointer is in a0
1159 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
1160 # if defined(__riscv_flen)
1162 FLOAD f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
1167 ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra
1168 .irp i,2,3,4,5,6,7,8,9
1169 ILOAD x\i, (RISCV_ISIZE * \i)(a0)
1172 .irp i,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1173 ILOAD x\i, (RISCV_ISIZE * \i)(a0)
1175 ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a0
1179 #elif defined(__s390x__)
1181 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv)
1183 // void libunwind::Registers_s390x::jumpto()
1186 // thread_state pointer is in r2
1189 // Skip PSWM, but load PSWA into r1
1194 ld %f\i, (144+8*\i)(%r2)
1197 // Restore GPRs - skipping %r0 and %r1
1198 lmg %r2, %r15, 32(%r2)
1200 // Return to PSWA (was loaded into %r1 above)
1203 #elif defined(__loongarch__) && __loongarch_grlen == 64
1206 // void libunwind::Registers_loongarch::jumpto()
1209 // thread_state pointer is in $a0($r4)
1212 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv)
1213 # if __loongarch_frlen == 64
1215 fld.d $f\i, $a0, (8 * 33 + 8 * \i)
1221 ld.d $r\i, $a0, (8 * \i)
1224 .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1225 ld.d $r\i, $a0, (8 * \i)
1228 ld.d $ra, $a0, (8 * 32) // load new pc into $ra
1229 ld.d $a0, $a0, (8 * 4) // restore $a0 last
1235 #endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1237 NO_EXEC_STACK_DIRECTIVE