1 //===------------------------ UnwindRegistersSave.S -----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
13 #if !defined(__USING_SJLJ_EXCEPTIONS__)
18 # extern int __unw_getcontext(unw_context_t* thread_state)
22 # +-----------------------+
23 # + thread_state pointer +
24 # +-----------------------+
26 # +-----------------------+ <-- SP
29 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
40 movl %edx, 28(%eax) # store what sp was at call site as esp
44 movl %edx, 40(%eax) # store return address as eip
51 movl %edx, (%eax) # store original eax
53 xorl %eax, %eax # return UNW_ESUCCESS
56 #elif defined(__x86_64__)
59 # extern int __unw_getcontext(unw_context_t* thread_state)
62 # thread_state pointer is in rdi
64 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
91 movq TMP,128(PTR) # store return address as rip
100 movdqu %xmm2,208(PTR)
101 movdqu %xmm3,224(PTR)
102 movdqu %xmm4,240(PTR)
103 movdqu %xmm5,256(PTR)
104 movdqu %xmm6,272(PTR)
105 movdqu %xmm7,288(PTR)
106 movdqu %xmm8,304(PTR)
107 movdqu %xmm9,320(PTR)
108 movdqu %xmm10,336(PTR)
109 movdqu %xmm11,352(PTR)
110 movdqu %xmm12,368(PTR)
111 movdqu %xmm13,384(PTR)
112 movdqu %xmm14,400(PTR)
113 movdqu %xmm15,416(PTR)
115 xorl %eax, %eax # return UNW_ESUCCESS
118 #elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
121 # extern int __unw_getcontext(unw_context_t* thread_state)
124 # thread_state pointer is in a0 ($4)
126 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
162 # Store return address to pc
169 #ifdef __mips_hard_float
171 sdc1 $f0, (4 * 36 + 8 * 0)($4)
172 sdc1 $f2, (4 * 36 + 8 * 2)($4)
173 sdc1 $f4, (4 * 36 + 8 * 4)($4)
174 sdc1 $f6, (4 * 36 + 8 * 6)($4)
175 sdc1 $f8, (4 * 36 + 8 * 8)($4)
176 sdc1 $f10, (4 * 36 + 8 * 10)($4)
177 sdc1 $f12, (4 * 36 + 8 * 12)($4)
178 sdc1 $f14, (4 * 36 + 8 * 14)($4)
179 sdc1 $f16, (4 * 36 + 8 * 16)($4)
180 sdc1 $f18, (4 * 36 + 8 * 18)($4)
181 sdc1 $f20, (4 * 36 + 8 * 20)($4)
182 sdc1 $f22, (4 * 36 + 8 * 22)($4)
183 sdc1 $f24, (4 * 36 + 8 * 24)($4)
184 sdc1 $f26, (4 * 36 + 8 * 26)($4)
185 sdc1 $f28, (4 * 36 + 8 * 28)($4)
186 sdc1 $f30, (4 * 36 + 8 * 30)($4)
188 sdc1 $f0, (4 * 36 + 8 * 0)($4)
189 sdc1 $f1, (4 * 36 + 8 * 1)($4)
190 sdc1 $f2, (4 * 36 + 8 * 2)($4)
191 sdc1 $f3, (4 * 36 + 8 * 3)($4)
192 sdc1 $f4, (4 * 36 + 8 * 4)($4)
193 sdc1 $f5, (4 * 36 + 8 * 5)($4)
194 sdc1 $f6, (4 * 36 + 8 * 6)($4)
195 sdc1 $f7, (4 * 36 + 8 * 7)($4)
196 sdc1 $f8, (4 * 36 + 8 * 8)($4)
197 sdc1 $f9, (4 * 36 + 8 * 9)($4)
198 sdc1 $f10, (4 * 36 + 8 * 10)($4)
199 sdc1 $f11, (4 * 36 + 8 * 11)($4)
200 sdc1 $f12, (4 * 36 + 8 * 12)($4)
201 sdc1 $f13, (4 * 36 + 8 * 13)($4)
202 sdc1 $f14, (4 * 36 + 8 * 14)($4)
203 sdc1 $f15, (4 * 36 + 8 * 15)($4)
204 sdc1 $f16, (4 * 36 + 8 * 16)($4)
205 sdc1 $f17, (4 * 36 + 8 * 17)($4)
206 sdc1 $f18, (4 * 36 + 8 * 18)($4)
207 sdc1 $f19, (4 * 36 + 8 * 19)($4)
208 sdc1 $f20, (4 * 36 + 8 * 20)($4)
209 sdc1 $f21, (4 * 36 + 8 * 21)($4)
210 sdc1 $f22, (4 * 36 + 8 * 22)($4)
211 sdc1 $f23, (4 * 36 + 8 * 23)($4)
212 sdc1 $f24, (4 * 36 + 8 * 24)($4)
213 sdc1 $f25, (4 * 36 + 8 * 25)($4)
214 sdc1 $f26, (4 * 36 + 8 * 26)($4)
215 sdc1 $f27, (4 * 36 + 8 * 27)($4)
216 sdc1 $f28, (4 * 36 + 8 * 28)($4)
217 sdc1 $f29, (4 * 36 + 8 * 29)($4)
218 sdc1 $f30, (4 * 36 + 8 * 30)($4)
219 sdc1 $f31, (4 * 36 + 8 * 31)($4)
223 # return UNW_ESUCCESS
227 #elif defined(__mips64)
230 # extern int __unw_getcontext(unw_context_t* thread_state)
233 # thread_state pointer is in a0 ($4)
235 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
271 # Store return address to pc
278 #ifdef __mips_hard_float
279 sdc1 $f0, (8 * 35)($4)
280 sdc1 $f1, (8 * 36)($4)
281 sdc1 $f2, (8 * 37)($4)
282 sdc1 $f3, (8 * 38)($4)
283 sdc1 $f4, (8 * 39)($4)
284 sdc1 $f5, (8 * 40)($4)
285 sdc1 $f6, (8 * 41)($4)
286 sdc1 $f7, (8 * 42)($4)
287 sdc1 $f8, (8 * 43)($4)
288 sdc1 $f9, (8 * 44)($4)
289 sdc1 $f10, (8 * 45)($4)
290 sdc1 $f11, (8 * 46)($4)
291 sdc1 $f12, (8 * 47)($4)
292 sdc1 $f13, (8 * 48)($4)
293 sdc1 $f14, (8 * 49)($4)
294 sdc1 $f15, (8 * 50)($4)
295 sdc1 $f16, (8 * 51)($4)
296 sdc1 $f17, (8 * 52)($4)
297 sdc1 $f18, (8 * 53)($4)
298 sdc1 $f19, (8 * 54)($4)
299 sdc1 $f20, (8 * 55)($4)
300 sdc1 $f21, (8 * 56)($4)
301 sdc1 $f22, (8 * 57)($4)
302 sdc1 $f23, (8 * 58)($4)
303 sdc1 $f24, (8 * 59)($4)
304 sdc1 $f25, (8 * 60)($4)
305 sdc1 $f26, (8 * 61)($4)
306 sdc1 $f27, (8 * 62)($4)
307 sdc1 $f28, (8 * 63)($4)
308 sdc1 $f29, (8 * 64)($4)
309 sdc1 $f30, (8 * 65)($4)
310 sdc1 $f31, (8 * 66)($4)
313 # return UNW_ESUCCESS
317 # elif defined(__mips__)
320 # extern int __unw_getcontext(unw_context_t* thread_state)
322 # Just trap for the time being.
323 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
326 #elif defined(__powerpc64__)
329 // extern int __unw_getcontext(unw_context_t* thread_state)
332 // thread_state pointer is in r3
334 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
336 // store register (GPR)
337 #define PPC64_STR(n) \
338 std %r##n, (8 * (n + 2))(%r3)
343 std %r0, PPC64_OFFS_SRR0(%r3) // store lr as ssr0
377 std %r0, PPC64_OFFS_CR(%r3)
379 std %r0, PPC64_OFFS_XER(%r3)
381 std %r0, PPC64_OFFS_LR(%r3)
383 std %r0, PPC64_OFFS_CTR(%r3)
385 std %r0, PPC64_OFFS_VRSAVE(%r3)
389 // (note that this also saves floating point registers and V registers,
390 // because part of VS is mapped to these registers)
392 addi %r4, %r3, PPC64_OFFS_FP
395 #define PPC64_STVS(n) \
396 stxvd2x %vs##n, 0, %r4 ;\
467 #define PPC64_STF(n) \
468 stfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
470 // save float registers
504 #if defined(__ALTIVEC__)
505 // save vector registers
507 // Use 16-bytes below the stack pointer as an
508 // aligned buffer to save each vector register.
509 // Note that the stack pointer is always 16-byte aligned.
512 #define PPC64_STV_UNALIGNED(n) \
513 stvx %v##n, 0, %r4 ;\
515 std %r5, (PPC64_OFFS_V + n * 16)(%r3) ;\
517 std %r5, (PPC64_OFFS_V + n * 16 + 8)(%r3)
519 PPC64_STV_UNALIGNED(0)
520 PPC64_STV_UNALIGNED(1)
521 PPC64_STV_UNALIGNED(2)
522 PPC64_STV_UNALIGNED(3)
523 PPC64_STV_UNALIGNED(4)
524 PPC64_STV_UNALIGNED(5)
525 PPC64_STV_UNALIGNED(6)
526 PPC64_STV_UNALIGNED(7)
527 PPC64_STV_UNALIGNED(8)
528 PPC64_STV_UNALIGNED(9)
529 PPC64_STV_UNALIGNED(10)
530 PPC64_STV_UNALIGNED(11)
531 PPC64_STV_UNALIGNED(12)
532 PPC64_STV_UNALIGNED(13)
533 PPC64_STV_UNALIGNED(14)
534 PPC64_STV_UNALIGNED(15)
535 PPC64_STV_UNALIGNED(16)
536 PPC64_STV_UNALIGNED(17)
537 PPC64_STV_UNALIGNED(18)
538 PPC64_STV_UNALIGNED(19)
539 PPC64_STV_UNALIGNED(20)
540 PPC64_STV_UNALIGNED(21)
541 PPC64_STV_UNALIGNED(22)
542 PPC64_STV_UNALIGNED(23)
543 PPC64_STV_UNALIGNED(24)
544 PPC64_STV_UNALIGNED(25)
545 PPC64_STV_UNALIGNED(26)
546 PPC64_STV_UNALIGNED(27)
547 PPC64_STV_UNALIGNED(28)
548 PPC64_STV_UNALIGNED(29)
549 PPC64_STV_UNALIGNED(30)
550 PPC64_STV_UNALIGNED(31)
555 li %r3, 0 // return UNW_ESUCCESS
559 #elif defined(__ppc__)
562 // extern int unw_getcontext(unw_context_t* thread_state)
565 // thread_state pointer is in r3
567 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
570 stw %r0, 0(%r3) // store lr as ssr0
603 // save VRSave register
613 #if !defined(__NO_FPRS__)
614 // save float registers
649 #if defined(__ALTIVEC__)
650 // save vector registers
653 rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits
654 // r4 is now a 16-byte aligned pointer into the red zone
656 #define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
657 stvx _vec, 0, %r4 SEPARATOR \
658 lwz %r5, 0(%r4) SEPARATOR \
659 stw %r5, _offset(%r3) SEPARATOR \
660 lwz %r5, 4(%r4) SEPARATOR \
661 stw %r5, _offset+4(%r3) SEPARATOR \
662 lwz %r5, 8(%r4) SEPARATOR \
663 stw %r5, _offset+8(%r3) SEPARATOR \
664 lwz %r5, 12(%r4) SEPARATOR \
665 stw %r5, _offset+12(%r3)
667 SAVE_VECTOR_UNALIGNED( %v0, 424+0x000)
668 SAVE_VECTOR_UNALIGNED( %v1, 424+0x010)
669 SAVE_VECTOR_UNALIGNED( %v2, 424+0x020)
670 SAVE_VECTOR_UNALIGNED( %v3, 424+0x030)
671 SAVE_VECTOR_UNALIGNED( %v4, 424+0x040)
672 SAVE_VECTOR_UNALIGNED( %v5, 424+0x050)
673 SAVE_VECTOR_UNALIGNED( %v6, 424+0x060)
674 SAVE_VECTOR_UNALIGNED( %v7, 424+0x070)
675 SAVE_VECTOR_UNALIGNED( %v8, 424+0x080)
676 SAVE_VECTOR_UNALIGNED( %v9, 424+0x090)
677 SAVE_VECTOR_UNALIGNED(%v10, 424+0x0A0)
678 SAVE_VECTOR_UNALIGNED(%v11, 424+0x0B0)
679 SAVE_VECTOR_UNALIGNED(%v12, 424+0x0C0)
680 SAVE_VECTOR_UNALIGNED(%v13, 424+0x0D0)
681 SAVE_VECTOR_UNALIGNED(%v14, 424+0x0E0)
682 SAVE_VECTOR_UNALIGNED(%v15, 424+0x0F0)
683 SAVE_VECTOR_UNALIGNED(%v16, 424+0x100)
684 SAVE_VECTOR_UNALIGNED(%v17, 424+0x110)
685 SAVE_VECTOR_UNALIGNED(%v18, 424+0x120)
686 SAVE_VECTOR_UNALIGNED(%v19, 424+0x130)
687 SAVE_VECTOR_UNALIGNED(%v20, 424+0x140)
688 SAVE_VECTOR_UNALIGNED(%v21, 424+0x150)
689 SAVE_VECTOR_UNALIGNED(%v22, 424+0x160)
690 SAVE_VECTOR_UNALIGNED(%v23, 424+0x170)
691 SAVE_VECTOR_UNALIGNED(%v24, 424+0x180)
692 SAVE_VECTOR_UNALIGNED(%v25, 424+0x190)
693 SAVE_VECTOR_UNALIGNED(%v26, 424+0x1A0)
694 SAVE_VECTOR_UNALIGNED(%v27, 424+0x1B0)
695 SAVE_VECTOR_UNALIGNED(%v28, 424+0x1C0)
696 SAVE_VECTOR_UNALIGNED(%v29, 424+0x1D0)
697 SAVE_VECTOR_UNALIGNED(%v30, 424+0x1E0)
698 SAVE_VECTOR_UNALIGNED(%v31, 424+0x1F0)
701 li %r3, 0 // return UNW_ESUCCESS
705 #elif defined(__aarch64__)
708 // extern int __unw_getcontext(unw_context_t* thread_state)
711 // thread_state pointer is in x0
714 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
715 stp x0, x1, [x0, #0x000]
716 stp x2, x3, [x0, #0x010]
717 stp x4, x5, [x0, #0x020]
718 stp x6, x7, [x0, #0x030]
719 stp x8, x9, [x0, #0x040]
720 stp x10,x11, [x0, #0x050]
721 stp x12,x13, [x0, #0x060]
722 stp x14,x15, [x0, #0x070]
723 stp x16,x17, [x0, #0x080]
724 stp x18,x19, [x0, #0x090]
725 stp x20,x21, [x0, #0x0A0]
726 stp x22,x23, [x0, #0x0B0]
727 stp x24,x25, [x0, #0x0C0]
728 stp x26,x27, [x0, #0x0D0]
729 stp x28,x29, [x0, #0x0E0]
730 str x30, [x0, #0x0F0]
733 str x30, [x0, #0x100] // store return address as pc
735 stp d0, d1, [x0, #0x110]
736 stp d2, d3, [x0, #0x120]
737 stp d4, d5, [x0, #0x130]
738 stp d6, d7, [x0, #0x140]
739 stp d8, d9, [x0, #0x150]
740 stp d10,d11, [x0, #0x160]
741 stp d12,d13, [x0, #0x170]
742 stp d14,d15, [x0, #0x180]
743 stp d16,d17, [x0, #0x190]
744 stp d18,d19, [x0, #0x1A0]
745 stp d20,d21, [x0, #0x1B0]
746 stp d22,d23, [x0, #0x1C0]
747 stp d24,d25, [x0, #0x1D0]
748 stp d26,d27, [x0, #0x1E0]
749 stp d28,d29, [x0, #0x1F0]
750 str d30, [x0, #0x200]
751 str d31, [x0, #0x208]
752 mov x0, #0 // return UNW_ESUCCESS
755 #elif defined(__arm__) && !defined(__APPLE__)
757 #if !defined(__ARM_ARCH_ISA_ARM)
758 #if (__ARM_ARCH_ISA_THUMB == 2)
765 @ extern int __unw_getcontext(unw_context_t* thread_state)
768 @ thread_state pointer is in r0
770 @ Per EHABI #4.7 this only saves the core integer registers.
771 @ EHABI #7.4.5 notes that in general all VRS registers should be restored
772 @ however this is very hard to do for VFP registers because it is unknown
773 @ to the library how many registers are implemented by the architecture.
774 @ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
777 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
778 #if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
787 str r1, [r0, #0] @ r11
788 @ r12 does not need storing, it it the intra-procedure-call scratch register
789 str r2, [r0, #8] @ sp
790 str r3, [r0, #12] @ lr
791 str r3, [r0, #16] @ store return address as pc
792 @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
793 @ It is safe to use here though because we are about to return, and cpsr is
794 @ not expected to be preserved.
795 movs r0, #0 @ return UNW_ESUCCESS
797 @ 32bit thumb-2 restrictions for stm:
798 @ . the sp (r13) cannot be in the list
799 @ . the pc (r15) cannot be in the list in an STM instruction
803 str lr, [r0, #60] @ store return address as pc
804 mov r0, #0 @ return UNW_ESUCCESS
809 @ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
812 @ values pointer is in r0
818 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
823 @ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
826 @ values pointer is in r0
832 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
833 vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
837 @ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
840 @ values pointer is in r0
846 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
847 @ VFP and iwMMX instructions are only available when compiling with the flags
848 @ that enable them. We do not want to do that in the library (because we do not
849 @ want the compiler to generate instructions that access those) but this is
850 @ only accessed if the personality routine needs these registers. Use of
851 @ these registers implies they are, actually, available on the target, so
852 @ it's ok to execute.
853 @ So, generate the instructions using the corresponding coprocessor mnemonic.
857 #if defined(_LIBUNWIND_ARM_WMMX)
860 @ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
863 @ values pointer is in r0
869 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
870 stcl p1, cr0, [r0], #8 @ wstrd wR0, [r0], #8
871 stcl p1, cr1, [r0], #8 @ wstrd wR1, [r0], #8
872 stcl p1, cr2, [r0], #8 @ wstrd wR2, [r0], #8
873 stcl p1, cr3, [r0], #8 @ wstrd wR3, [r0], #8
874 stcl p1, cr4, [r0], #8 @ wstrd wR4, [r0], #8
875 stcl p1, cr5, [r0], #8 @ wstrd wR5, [r0], #8
876 stcl p1, cr6, [r0], #8 @ wstrd wR6, [r0], #8
877 stcl p1, cr7, [r0], #8 @ wstrd wR7, [r0], #8
878 stcl p1, cr8, [r0], #8 @ wstrd wR8, [r0], #8
879 stcl p1, cr9, [r0], #8 @ wstrd wR9, [r0], #8
880 stcl p1, cr10, [r0], #8 @ wstrd wR10, [r0], #8
881 stcl p1, cr11, [r0], #8 @ wstrd wR11, [r0], #8
882 stcl p1, cr12, [r0], #8 @ wstrd wR12, [r0], #8
883 stcl p1, cr13, [r0], #8 @ wstrd wR13, [r0], #8
884 stcl p1, cr14, [r0], #8 @ wstrd wR14, [r0], #8
885 stcl p1, cr15, [r0], #8 @ wstrd wR15, [r0], #8
889 @ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
892 @ values pointer is in r0
898 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
899 stc2 p1, cr8, [r0], #4 @ wstrw wCGR0, [r0], #4
900 stc2 p1, cr9, [r0], #4 @ wstrw wCGR1, [r0], #4
901 stc2 p1, cr10, [r0], #4 @ wstrw wCGR2, [r0], #4
902 stc2 p1, cr11, [r0], #4 @ wstrw wCGR3, [r0], #4
907 #elif defined(__or1k__)
910 # extern int __unw_getcontext(unw_context_t* thread_state)
913 # thread_state pointer is in r3
915 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
953 #elif defined(__hexagon__)
955 # extern int unw_getcontext(unw_context_t* thread_state)
958 # thread_state pointer is in r0
960 #define OFFSET(offset) (offset/4)
961 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
991 r1 = c4 // Predicate register
993 r1 = memw(r30) // *FP == Saved FP
999 #elif defined(__sparc__)
1002 # extern int __unw_getcontext(unw_context_t* thread_state)
1005 # thread_state pointer is in o0
1007 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1023 std %i2, [%o0 + 104]
1024 std %i4, [%o0 + 112]
1025 std %i6, [%o0 + 120]
1027 clr %o0 // return UNW_ESUCCESS
1029 #elif defined(__riscv) && __riscv_xlen == 64
1032 # extern int __unw_getcontext(unw_context_t* thread_state)
1035 # thread_state pointer is in a0
1037 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1038 sd x1, (8 * 0)(a0) // store ra as pc
1048 sd x10, (8 * 10)(a0)
1049 sd x11, (8 * 11)(a0)
1050 sd x12, (8 * 12)(a0)
1051 sd x13, (8 * 13)(a0)
1052 sd x14, (8 * 14)(a0)
1053 sd x15, (8 * 15)(a0)
1054 sd x16, (8 * 16)(a0)
1055 sd x17, (8 * 17)(a0)
1056 sd x18, (8 * 18)(a0)
1057 sd x19, (8 * 19)(a0)
1058 sd x20, (8 * 20)(a0)
1059 sd x21, (8 * 21)(a0)
1060 sd x22, (8 * 22)(a0)
1061 sd x23, (8 * 23)(a0)
1062 sd x24, (8 * 24)(a0)
1063 sd x25, (8 * 25)(a0)
1064 sd x26, (8 * 26)(a0)
1065 sd x27, (8 * 27)(a0)
1066 sd x28, (8 * 28)(a0)
1067 sd x29, (8 * 29)(a0)
1068 sd x30, (8 * 30)(a0)
1069 sd x31, (8 * 31)(a0)
1071 #if defined(__riscv_flen) && __riscv_flen == 64
1072 fsd f0, (8 * 32 + 8 * 0)(a0)
1073 fsd f1, (8 * 32 + 8 * 1)(a0)
1074 fsd f2, (8 * 32 + 8 * 2)(a0)
1075 fsd f3, (8 * 32 + 8 * 3)(a0)
1076 fsd f4, (8 * 32 + 8 * 4)(a0)
1077 fsd f5, (8 * 32 + 8 * 5)(a0)
1078 fsd f6, (8 * 32 + 8 * 6)(a0)
1079 fsd f7, (8 * 32 + 8 * 7)(a0)
1080 fsd f8, (8 * 32 + 8 * 8)(a0)
1081 fsd f9, (8 * 32 + 8 * 9)(a0)
1082 fsd f10, (8 * 32 + 8 * 10)(a0)
1083 fsd f11, (8 * 32 + 8 * 11)(a0)
1084 fsd f12, (8 * 32 + 8 * 12)(a0)
1085 fsd f13, (8 * 32 + 8 * 13)(a0)
1086 fsd f14, (8 * 32 + 8 * 14)(a0)
1087 fsd f15, (8 * 32 + 8 * 15)(a0)
1088 fsd f16, (8 * 32 + 8 * 16)(a0)
1089 fsd f17, (8 * 32 + 8 * 17)(a0)
1090 fsd f18, (8 * 32 + 8 * 18)(a0)
1091 fsd f19, (8 * 32 + 8 * 19)(a0)
1092 fsd f20, (8 * 32 + 8 * 20)(a0)
1093 fsd f21, (8 * 32 + 8 * 21)(a0)
1094 fsd f22, (8 * 32 + 8 * 22)(a0)
1095 fsd f23, (8 * 32 + 8 * 23)(a0)
1096 fsd f24, (8 * 32 + 8 * 24)(a0)
1097 fsd f25, (8 * 32 + 8 * 25)(a0)
1098 fsd f26, (8 * 32 + 8 * 26)(a0)
1099 fsd f27, (8 * 32 + 8 * 27)(a0)
1100 fsd f28, (8 * 32 + 8 * 28)(a0)
1101 fsd f29, (8 * 32 + 8 * 29)(a0)
1102 fsd f30, (8 * 32 + 8 * 30)(a0)
1103 fsd f31, (8 * 32 + 8 * 31)(a0)
1106 li a0, 0 // return UNW_ESUCCESS
1110 WEAK_ALIAS(__unw_getcontext, unw_getcontext)
1112 #endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1114 NO_EXEC_STACK_DIRECTIVE