2 * arch/sh/kernel/cpu/sh5/entry.S
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/errno.h>
13 #include <linux/sys.h>
14 #include <asm/cpu/registers.h>
15 #include <asm/processor.h>
16 #include <asm/unistd.h>
17 #include <asm/thread_info.h>
18 #include <asm/asm-offsets.h>
23 #define SR_ASID_MASK 0x00ff0000
24 #define SR_FD_MASK 0x00008000
25 #define SR_SS 0x08000000
26 #define SR_BL 0x10000000
27 #define SR_MD 0x40000000
32 #define EVENT_INTERRUPT 0
33 #define EVENT_FAULT_TLB 1
34 #define EVENT_FAULT_NOT_TLB 2
38 #define RESET_CAUSE 0x20
39 #define DEBUGSS_CAUSE 0x980
42 * Frame layout. Quad index.
44 #define FRAME_T(x) FRAME_TBASE+(x*8)
45 #define FRAME_R(x) FRAME_RBASE+(x*8)
46 #define FRAME_S(x) FRAME_SBASE+(x*8)
51 /* Arrange the save frame to be a multiple of 32 bytes long */
53 #define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
54 #define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
55 #define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
56 #define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
58 #define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
59 #define FP_FRAME_BASE 0
69 /* These are the registers saved in the TLB path that aren't saved in the first
70 level of the normal one. */
71 #define TLB_SAVED_R25 7*8
72 #define TLB_SAVED_TR1 8*8
73 #define TLB_SAVED_TR2 9*8
74 #define TLB_SAVED_TR3 10*8
75 #define TLB_SAVED_TR4 11*8
76 /* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
77 breakage otherwise. */
78 #define TLB_SAVED_R0 12*8
79 #define TLB_SAVED_R1 13*8
92 # define preempt_stop() CLI()
94 # define preempt_stop()
95 # define resume_kernel restore_all
100 #define FAST_TLBMISS_STACK_CACHELINES 4
101 #define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
103 /* Register back-up area for all exceptions */
105 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
106 * register saves etc. */
107 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
108 /* This is 32 byte aligned by construction */
109 /* Register back-up area for all exceptions */
129 /* Save area for RESVEC exceptions. We cannot use reg_save_area because of
130 * reentrancy. Note this area may be accessed via physical address.
131 * Align so this fits a whole single cache line, for ease of purging.
142 /* Jump table of 3rd level handlers */
144 .long do_exception_error /* 0x000 */
145 .long do_exception_error /* 0x020 */
146 .long tlb_miss_load /* 0x040 */
147 .long tlb_miss_store /* 0x060 */
148 ! ARTIFICIAL pseudo-EXPEVT setting
149 .long do_debug_interrupt /* 0x080 */
150 .long tlb_miss_load /* 0x0A0 */
151 .long tlb_miss_store /* 0x0C0 */
152 .long do_address_error_load /* 0x0E0 */
153 .long do_address_error_store /* 0x100 */
155 .long do_fpu_error /* 0x120 */
157 .long do_exception_error /* 0x120 */
159 .long do_exception_error /* 0x140 */
160 .long system_call /* 0x160 */
161 .long do_reserved_inst /* 0x180 */
162 .long do_illegal_slot_inst /* 0x1A0 */
163 .long do_exception_error /* 0x1C0 - NMI */
164 .long do_exception_error /* 0x1E0 */
166 .long do_IRQ /* 0x200 - 0x3C0 */
168 .long do_exception_error /* 0x3E0 */
170 .long do_IRQ /* 0x400 - 0x7E0 */
172 .long fpu_error_or_IRQA /* 0x800 */
173 .long fpu_error_or_IRQB /* 0x820 */
174 .long do_IRQ /* 0x840 */
175 .long do_IRQ /* 0x860 */
177 .long do_exception_error /* 0x880 - 0x920 */
179 .long do_software_break_point /* 0x940 */
180 .long do_exception_error /* 0x960 */
181 .long do_single_step /* 0x980 */
184 .long do_exception_error /* 0x9A0 - 0x9E0 */
186 .long do_IRQ /* 0xA00 */
187 .long do_IRQ /* 0xA20 */
188 .long itlb_miss_or_IRQ /* 0xA40 */
189 .long do_IRQ /* 0xA60 */
190 .long do_IRQ /* 0xA80 */
191 .long itlb_miss_or_IRQ /* 0xAA0 */
192 .long do_exception_error /* 0xAC0 */
193 .long do_address_error_exec /* 0xAE0 */
195 .long do_exception_error /* 0xB00 - 0xBE0 */
198 .long do_IRQ /* 0xC00 - 0xE20 */
201 .section .text64, "ax"
204 * --- Exception/Interrupt/Event Handling Section
208 * VBR and RESVEC blocks.
210 * First level handler for VBR-based exceptions.
212 * To avoid waste of space, align to the maximum text block size.
213 * This is assumed to be at most 128 bytes or 32 instructions.
214 * DO NOT EXCEED 32 instructions on the first level handlers !
216 * Also note that RESVEC is contained within the VBR block
217 * where the room left (1KB - TEXT_SIZE) allows placing
218 * the RESVEC block (at most 512B + TEXT_SIZE).
220 * So first (and only) level handler for RESVEC-based exceptions.
222 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
223 * and interrupt) we are a lot tight with register space until
224 * saving onto the stack frame, which is done in handle_exception().
228 #define TEXT_SIZE 128
229 #define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
233 .space 256, 0 /* Power-on class handler, */
234 /* not required here */
236 synco /* TAKum03020 (but probably a good idea anyway.) */
237 /* Save original stack pointer into KCR1 */
240 /* Save other original registers into reg_save_area */
241 movi reg_save_area, SP
242 st.q SP, SAVED_R2, r2
243 st.q SP, SAVED_R3, r3
244 st.q SP, SAVED_R4, r4
245 st.q SP, SAVED_R5, r5
246 st.q SP, SAVED_R6, r6
247 st.q SP, SAVED_R18, r18
249 st.q SP, SAVED_TR0, r3
251 /* Set args for Non-debug, Not a TLB miss class handler */
253 movi ret_from_exception, r3
255 movi EVENT_FAULT_NOT_TLB, r4
258 pta handle_exception, tr0
269 * Instead of the natural .balign 1024 place RESVEC here
270 * respecting the final 1KB alignment.
274 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
275 * block making sure the final alignment is correct.
278 synco /* TAKum03020 (but probably a good idea anyway.) */
280 movi reg_save_area, SP
281 /* SP is guaranteed 32-byte aligned. */
282 st.q SP, TLB_SAVED_R0 , r0
283 st.q SP, TLB_SAVED_R1 , r1
284 st.q SP, SAVED_R2 , r2
285 st.q SP, SAVED_R3 , r3
286 st.q SP, SAVED_R4 , r4
287 st.q SP, SAVED_R5 , r5
288 st.q SP, SAVED_R6 , r6
289 st.q SP, SAVED_R18, r18
291 /* Save R25 for safety; as/ld may want to use it to achieve the call to
292 * the code in mm/tlbmiss.c */
293 st.q SP, TLB_SAVED_R25, r25
299 st.q SP, SAVED_TR0 , r2
300 st.q SP, TLB_SAVED_TR1 , r3
301 st.q SP, TLB_SAVED_TR2 , r4
302 st.q SP, TLB_SAVED_TR3 , r5
303 st.q SP, TLB_SAVED_TR4 , r18
305 pt do_fast_page_fault, tr0
310 andi r2, 1, r2 /* r2 = SSR.MD */
313 pt fixup_to_invoke_general_handler, tr1
315 /* If the fast path handler fixed the fault, just drop through quickly
316 to the restore code right away to return to the excepting context.
320 fast_tlb_miss_restore:
321 ld.q SP, SAVED_TR0, r2
322 ld.q SP, TLB_SAVED_TR1, r3
323 ld.q SP, TLB_SAVED_TR2, r4
325 ld.q SP, TLB_SAVED_TR3, r5
326 ld.q SP, TLB_SAVED_TR4, r18
334 ld.q SP, TLB_SAVED_R0, r0
335 ld.q SP, TLB_SAVED_R1, r1
336 ld.q SP, SAVED_R2, r2
337 ld.q SP, SAVED_R3, r3
338 ld.q SP, SAVED_R4, r4
339 ld.q SP, SAVED_R5, r5
340 ld.q SP, SAVED_R6, r6
341 ld.q SP, SAVED_R18, r18
342 ld.q SP, TLB_SAVED_R25, r25
346 nop /* for safety, in case the code is run on sh5-101 cut1.x */
348 fixup_to_invoke_general_handler:
350 /* OK, new method. Restore stuff that's not expected to get saved into
351 the 'first-level' reg save area, then just fall through to setting
352 up the registers and calling the second-level handler. */
354 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
355 r25,tr1-4 and save r6 to get into the right state. */
357 ld.q SP, TLB_SAVED_TR1, r3
358 ld.q SP, TLB_SAVED_TR2, r4
359 ld.q SP, TLB_SAVED_TR3, r5
360 ld.q SP, TLB_SAVED_TR4, r18
361 ld.q SP, TLB_SAVED_R25, r25
363 ld.q SP, TLB_SAVED_R0, r0
364 ld.q SP, TLB_SAVED_R1, r1
371 /* Set args for Non-debug, TLB miss class handler */
373 movi ret_from_exception, r3
375 movi EVENT_FAULT_TLB, r4
378 pta handle_exception, tr0
381 /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
382 DOES END UP AT VBR+0x600 */
394 synco /* TAKum03020 (but probably a good idea anyway.) */
395 /* Save original stack pointer into KCR1 */
398 /* Save other original registers into reg_save_area */
399 movi reg_save_area, SP
400 st.q SP, SAVED_R2, r2
401 st.q SP, SAVED_R3, r3
402 st.q SP, SAVED_R4, r4
403 st.q SP, SAVED_R5, r5
404 st.q SP, SAVED_R6, r6
405 st.q SP, SAVED_R18, r18
407 st.q SP, SAVED_TR0, r3
409 /* Set args for interrupt class handler */
411 movi ret_from_irq, r3
413 movi EVENT_INTERRUPT, r4
416 pta handle_exception, tr0
418 .balign TEXT_SIZE /* let's waste the bare minimum */
420 LVBR_block_end: /* Marker. Used for total checking */
424 /* Panic handler. Called with MMU off. Possible causes/actions:
425 * - Reset: Jump to program start.
426 * - Single Step: Turn off Single Step & return.
427 * - Others: Call panic handler, passing PC as arg.
428 * (this may need to be extended...)
431 synco /* TAKum03020 (but probably a good idea anyway.) */
433 /* First save r0-1 and tr0, as we need to use these */
434 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
443 sub r1, r0, r1 /* r1=0 if reset */
444 movi _stext-CONFIG_PAGE_OFFSET, r0
447 beqi r1, 0, tr0 /* Jump to start address if reset */
450 movi DEBUGSS_CAUSE, r1
451 sub r1, r0, r1 /* r1=0 if single step */
452 pta single_step_panic, tr0
453 beqi r1, 0, tr0 /* jump if single step */
455 /* Now jump to where we save the registers. */
456 movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
461 /* We are in a handler with Single Step set. We need to resume the
462 * handler, by turning on MMU & turning off Single Step. */
469 /* Restore EXPEVT, as the rte won't do this */
484 synco /* TAKum03020 (but probably a good idea anyway.) */
486 * Single step/software_break_point first level handler.
487 * Called with MMU off, so the first thing we do is enable it
488 * by doing an rte with appropriate SSR.
491 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
492 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
494 /* With the MMU off, we are bypassing the cache, so purge any
495 * data that will be made stale by the following stores.
507 /* Enable MMU, block exceptions, set priv mode, disable single step */
508 movi SR_MMU | SR_BL | SR_MD, r1
513 /* Force control to debug_exception_2 when rte is executed */
514 movi debug_exeception_2, r0
515 ori r0, 1, r0 /* force SHmedia, just in case */
521 /* Restore saved regs */
523 movi resvec_save_area, SP
531 /* Save other original registers into reg_save_area */
532 movi reg_save_area, SP
533 st.q SP, SAVED_R2, r2
534 st.q SP, SAVED_R3, r3
535 st.q SP, SAVED_R4, r4
536 st.q SP, SAVED_R5, r5
537 st.q SP, SAVED_R6, r6
538 st.q SP, SAVED_R18, r18
540 st.q SP, SAVED_TR0, r3
542 /* Set args for debug class handler */
544 movi ret_from_exception, r3
549 pta handle_exception, tr0
554 /* !!! WE COME HERE IN REAL MODE !!! */
555 /* Hook-up debug interrupt to allow various debugging options to be
556 * hooked into its handler. */
557 /* Save original stack pointer into KCR1 */
560 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
565 /* Save other original registers into reg_save_area thru real addresses */
566 st.q SP, SAVED_R2, r2
567 st.q SP, SAVED_R3, r3
568 st.q SP, SAVED_R4, r4
569 st.q SP, SAVED_R5, r5
570 st.q SP, SAVED_R6, r6
571 st.q SP, SAVED_R18, r18
573 st.q SP, SAVED_TR0, r3
575 /* move (spc,ssr)->(pspc,pssr). The rte will shift
576 them back again, so that they look like the originals
577 as far as the real handler code is concerned. */
583 ! construct useful SR for handle_exception
590 ! SSR is now the current SR with the MD and MMU bits set
591 ! i.e. the rte will switch back to priv mode and put
595 movi handle_exception, r18
596 ori r18, 1, r18 ! for safety (do we need this?)
599 /* Set args for Non-debug, Not a TLB miss class handler */
601 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
602 ! debug interrupt handler in the vectoring table
604 movi ret_from_exception, r3
606 movi EVENT_FAULT_NOT_TLB, r4
609 movi CONFIG_PAGE_OFFSET, r6
614 rte ! -> handle_exception, switch back to priv mode again
616 LRESVEC_block_end: /* Marker. Unused. */
621 * Second level handler for VBR-based exceptions. Pre-handler.
622 * In common to all stack-frame sensitive handlers.
625 * (KCR0) Current [current task union]
628 * (r3) appropriate return address
629 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
630 * (r5) Pointer to reg_save_area
633 * Available registers:
640 /* Common 2nd level handler. */
642 /* First thing we need an appropriate stack pointer */
647 bne r6, ZERO, tr0 /* Original stack pointer is fine */
649 /* Set stack pointer for user fault */
651 movi THREAD_SIZE, r6 /* Point to the end */
656 /* DEBUG : check for underflow/overflow of the kernel stack */
657 pta no_underflow, tr0
661 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
663 /* Just panic to cause a crash. */
671 movi THREAD_SIZE, r18
673 bgt SP, r6, tr0 ! sp above the stack
675 /* Make some room for the BASIC frame. */
676 movi -(FRAME_SIZE), r6
679 /* Could do this with no stalling if we had another spare register, but the
680 code below will be OK. */
681 ld.q r5, SAVED_R2, r6
682 ld.q r5, SAVED_R3, r18
683 st.q SP, FRAME_R(2), r6
684 ld.q r5, SAVED_R4, r6
685 st.q SP, FRAME_R(3), r18
686 ld.q r5, SAVED_R5, r18
687 st.q SP, FRAME_R(4), r6
688 ld.q r5, SAVED_R6, r6
689 st.q SP, FRAME_R(5), r18
690 ld.q r5, SAVED_R18, r18
691 st.q SP, FRAME_R(6), r6
692 ld.q r5, SAVED_TR0, r6
693 st.q SP, FRAME_R(18), r18
694 st.q SP, FRAME_T(0), r6
696 /* Keep old SP around */
699 /* Save the rest of the general purpose registers */
700 st.q SP, FRAME_R(0), r0
701 st.q SP, FRAME_R(1), r1
702 st.q SP, FRAME_R(7), r7
703 st.q SP, FRAME_R(8), r8
704 st.q SP, FRAME_R(9), r9
705 st.q SP, FRAME_R(10), r10
706 st.q SP, FRAME_R(11), r11
707 st.q SP, FRAME_R(12), r12
708 st.q SP, FRAME_R(13), r13
709 st.q SP, FRAME_R(14), r14
711 /* SP is somewhere else */
712 st.q SP, FRAME_R(15), r6
714 st.q SP, FRAME_R(16), r16
715 st.q SP, FRAME_R(17), r17
716 /* r18 is saved earlier. */
717 st.q SP, FRAME_R(19), r19
718 st.q SP, FRAME_R(20), r20
719 st.q SP, FRAME_R(21), r21
720 st.q SP, FRAME_R(22), r22
721 st.q SP, FRAME_R(23), r23
722 st.q SP, FRAME_R(24), r24
723 st.q SP, FRAME_R(25), r25
724 st.q SP, FRAME_R(26), r26
725 st.q SP, FRAME_R(27), r27
726 st.q SP, FRAME_R(28), r28
727 st.q SP, FRAME_R(29), r29
728 st.q SP, FRAME_R(30), r30
729 st.q SP, FRAME_R(31), r31
730 st.q SP, FRAME_R(32), r32
731 st.q SP, FRAME_R(33), r33
732 st.q SP, FRAME_R(34), r34
733 st.q SP, FRAME_R(35), r35
734 st.q SP, FRAME_R(36), r36
735 st.q SP, FRAME_R(37), r37
736 st.q SP, FRAME_R(38), r38
737 st.q SP, FRAME_R(39), r39
738 st.q SP, FRAME_R(40), r40
739 st.q SP, FRAME_R(41), r41
740 st.q SP, FRAME_R(42), r42
741 st.q SP, FRAME_R(43), r43
742 st.q SP, FRAME_R(44), r44
743 st.q SP, FRAME_R(45), r45
744 st.q SP, FRAME_R(46), r46
745 st.q SP, FRAME_R(47), r47
746 st.q SP, FRAME_R(48), r48
747 st.q SP, FRAME_R(49), r49
748 st.q SP, FRAME_R(50), r50
749 st.q SP, FRAME_R(51), r51
750 st.q SP, FRAME_R(52), r52
751 st.q SP, FRAME_R(53), r53
752 st.q SP, FRAME_R(54), r54
753 st.q SP, FRAME_R(55), r55
754 st.q SP, FRAME_R(56), r56
755 st.q SP, FRAME_R(57), r57
756 st.q SP, FRAME_R(58), r58
757 st.q SP, FRAME_R(59), r59
758 st.q SP, FRAME_R(60), r60
759 st.q SP, FRAME_R(61), r61
760 st.q SP, FRAME_R(62), r62
763 * Save the S* registers.
766 st.q SP, FRAME_S(FSSR), r61
768 st.q SP, FRAME_S(FSPC), r62
769 movi -1, r62 /* Reset syscall_nr */
770 st.q SP, FRAME_S(FSYSCALL_ID), r62
772 /* Save the rest of the target registers */
774 st.q SP, FRAME_T(1), r6
776 st.q SP, FRAME_T(2), r6
778 st.q SP, FRAME_T(3), r6
780 st.q SP, FRAME_T(4), r6
782 st.q SP, FRAME_T(5), r6
784 st.q SP, FRAME_T(6), r6
786 st.q SP, FRAME_T(7), r6
788 ! setup FP so that unwinder can wind back through nested kernel mode
792 #ifdef CONFIG_POOR_MANS_STRACE
793 /* We've pushed all the registers now, so only r2-r4 hold anything
794 * useful. Move them into callee save registers */
799 /* Preserve r2 as the event code */
813 /* For syscall and debug race condition, get TRA now */
816 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
817 * Also set FD, to catch FPU usage in the kernel.
819 * benedict.gaster@superh.com 29/07/2002
821 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
822 * same time change BL from 1->0, as any pending interrupt of a level
823 * higher than he previous value of IMASK will leak through and be
824 * taken unexpectedly.
826 * To avoid this we raise the IMASK and then issue another PUTCON to
830 movi SR_IMASK | SR_FD, r7
833 movi SR_UNBLOCK_EXC, r7
838 /* Now call the appropriate 3rd level handler */
849 * Second level handler for VBR-based exceptions. Post-handlers.
851 * Post-handlers for interrupts (ret_from_irq), exceptions
852 * (ret_from_exception) and common reentrance doors (restore_all
853 * to get back to the original context, ret_from_syscall loop to
854 * check kernel exiting).
856 * ret_with_reschedule and work_notifysig are an inner lables of
857 * the ret_from_syscall loop.
859 * In common to all stack-frame sensitive handlers.
862 * (SP) struct pt_regs *, original register's frame pointer (basic)
867 #ifdef CONFIG_POOR_MANS_STRACE
868 pta evt_debug_ret_from_irq, tr0
872 ld.q SP, FRAME_S(FSSR), r6
875 pta resume_kernel, tr0
876 bne r6, ZERO, tr0 /* no further checks */
878 pta ret_with_reschedule, tr0
879 blink tr0, ZERO /* Do not check softirqs */
881 .global ret_from_exception
885 #ifdef CONFIG_POOR_MANS_STRACE
886 pta evt_debug_ret_from_exc, tr0
891 ld.q SP, FRAME_S(FSSR), r6
894 pta resume_kernel, tr0
895 bne r6, ZERO, tr0 /* no further checks */
899 #ifdef CONFIG_PREEMPT
900 pta ret_from_syscall, tr0
907 ld.l r6, TI_PRE_COUNT, r7
911 ld.l r6, TI_FLAGS, r7
912 movi (1 << TIF_NEED_RESCHED), r8
920 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
921 shori (PREEMPT_ACTIVE & 65535), r8
922 st.l r6, TI_PRE_COUNT, r8
930 st.l r6, TI_PRE_COUNT, ZERO
933 pta need_resched, tr1
937 .global ret_from_syscall
941 getcon KCR0, r6 ! r6 contains current_thread_info
942 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
944 movi _TIF_NEED_RESCHED, r8
946 pta work_resched, tr0
951 movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
953 pta work_notifysig, tr0
959 pta ret_from_syscall, tr0
963 blink tr0, ZERO /* Call schedule(), return on top */
972 blink tr0, LINK /* Call do_signal(regs, 0), return here */
977 ld.q SP, FRAME_T(0), r6
978 ld.q SP, FRAME_T(1), r7
979 ld.q SP, FRAME_T(2), r8
980 ld.q SP, FRAME_T(3), r9
985 ld.q SP, FRAME_T(4), r6
986 ld.q SP, FRAME_T(5), r7
987 ld.q SP, FRAME_T(6), r8
988 ld.q SP, FRAME_T(7), r9
994 ld.q SP, FRAME_R(0), r0
995 ld.q SP, FRAME_R(1), r1
996 ld.q SP, FRAME_R(2), r2
997 ld.q SP, FRAME_R(3), r3
998 ld.q SP, FRAME_R(4), r4
999 ld.q SP, FRAME_R(5), r5
1000 ld.q SP, FRAME_R(6), r6
1001 ld.q SP, FRAME_R(7), r7
1002 ld.q SP, FRAME_R(8), r8
1003 ld.q SP, FRAME_R(9), r9
1004 ld.q SP, FRAME_R(10), r10
1005 ld.q SP, FRAME_R(11), r11
1006 ld.q SP, FRAME_R(12), r12
1007 ld.q SP, FRAME_R(13), r13
1008 ld.q SP, FRAME_R(14), r14
1010 ld.q SP, FRAME_R(16), r16
1011 ld.q SP, FRAME_R(17), r17
1012 ld.q SP, FRAME_R(18), r18
1013 ld.q SP, FRAME_R(19), r19
1014 ld.q SP, FRAME_R(20), r20
1015 ld.q SP, FRAME_R(21), r21
1016 ld.q SP, FRAME_R(22), r22
1017 ld.q SP, FRAME_R(23), r23
1018 ld.q SP, FRAME_R(24), r24
1019 ld.q SP, FRAME_R(25), r25
1020 ld.q SP, FRAME_R(26), r26
1021 ld.q SP, FRAME_R(27), r27
1022 ld.q SP, FRAME_R(28), r28
1023 ld.q SP, FRAME_R(29), r29
1024 ld.q SP, FRAME_R(30), r30
1025 ld.q SP, FRAME_R(31), r31
1026 ld.q SP, FRAME_R(32), r32
1027 ld.q SP, FRAME_R(33), r33
1028 ld.q SP, FRAME_R(34), r34
1029 ld.q SP, FRAME_R(35), r35
1030 ld.q SP, FRAME_R(36), r36
1031 ld.q SP, FRAME_R(37), r37
1032 ld.q SP, FRAME_R(38), r38
1033 ld.q SP, FRAME_R(39), r39
1034 ld.q SP, FRAME_R(40), r40
1035 ld.q SP, FRAME_R(41), r41
1036 ld.q SP, FRAME_R(42), r42
1037 ld.q SP, FRAME_R(43), r43
1038 ld.q SP, FRAME_R(44), r44
1039 ld.q SP, FRAME_R(45), r45
1040 ld.q SP, FRAME_R(46), r46
1041 ld.q SP, FRAME_R(47), r47
1042 ld.q SP, FRAME_R(48), r48
1043 ld.q SP, FRAME_R(49), r49
1044 ld.q SP, FRAME_R(50), r50
1045 ld.q SP, FRAME_R(51), r51
1046 ld.q SP, FRAME_R(52), r52
1047 ld.q SP, FRAME_R(53), r53
1048 ld.q SP, FRAME_R(54), r54
1049 ld.q SP, FRAME_R(55), r55
1050 ld.q SP, FRAME_R(56), r56
1051 ld.q SP, FRAME_R(57), r57
1052 ld.q SP, FRAME_R(58), r58
1055 movi SR_BLOCK_EXC, r60
1057 putcon r59, SR /* SR.BL = 1, keep nesting out */
1058 ld.q SP, FRAME_S(FSSR), r61
1059 ld.q SP, FRAME_S(FSPC), r62
1060 movi SR_ASID_MASK, r60
1062 andc r61, r60, r61 /* Clear out older ASID */
1063 or r59, r61, r61 /* Retain current ASID */
1067 /* Ignore FSYSCALL_ID */
1069 ld.q SP, FRAME_R(59), r59
1070 ld.q SP, FRAME_R(60), r60
1071 ld.q SP, FRAME_R(61), r61
1072 ld.q SP, FRAME_R(62), r62
1075 ld.q SP, FRAME_R(15), SP
1080 * Third level handlers for VBR-based exceptions. Adapting args to
1081 * and/or deflecting to fourth level handlers.
1083 * Fourth level handlers interface.
1084 * Most are C-coded handlers directly pointed by the trap_jtable.
1085 * (Third = Fourth level)
1087 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1088 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1089 * (r3) struct pt_regs *, original register's frame pointer
1090 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1091 * (r5) TRA control register (for syscall/debug benefit only)
1092 * (LINK) return address
1095 * Kernel TLB fault handlers will get a slightly different interface.
1096 * (r2) struct pt_regs *, original register's frame pointer
1097 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1098 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1099 * (r5) Effective Address of fault
1100 * (LINK) return address
1103 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1108 or ZERO, ZERO, r3 /* Read */
1109 or ZERO, ZERO, r4 /* Data */
1111 pta call_do_page_fault, tr0
1116 movi 1, r3 /* Write */
1117 or ZERO, ZERO, r4 /* Data */
1119 pta call_do_page_fault, tr0
1124 beqi/u r4, EVENT_INTERRUPT, tr0
1126 or ZERO, ZERO, r3 /* Read */
1127 movi 1, r4 /* Text */
1132 movi do_page_fault, r6
1138 beqi/l r4, EVENT_INTERRUPT, tr0
1139 #ifdef CONFIG_SH_FPU
1140 movi do_fpu_state_restore, r6
1142 movi do_exception_error, r6
1149 beqi/l r4, EVENT_INTERRUPT, tr0
1150 #ifdef CONFIG_SH_FPU
1151 movi do_fpu_state_restore, r6
1153 movi do_exception_error, r6
1164 * system_call/unknown_trap third level handler:
1167 * (r2) fault/interrupt code, entry number (TRAP = 11)
1168 * (r3) struct pt_regs *, original register's frame pointer
1169 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1170 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1172 * (LINK) return address: ret_from_exception
1173 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1176 * (*r3) Syscall reply (Saved r2)
1177 * (LINK) In case of syscall only it can be scrapped.
1178 * Common second level post handler will be ret_from_syscall.
1179 * Common (non-trace) exit point to that is syscall_ret (saving
1180 * result to r2). Common bad exit point is syscall_bad (returning
1181 * ENOSYS then saved to r2).
1186 /* Unknown Trap or User Trace */
1187 movi do_unknown_trapa, r6
1189 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1190 andi r2, 0x1ff, r2 /* r2 = syscall # */
1193 pta syscall_ret, tr0
1196 /* New syscall implementation*/
1198 pta unknown_trap, tr0
1199 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1201 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1203 /* It's a system call */
1204 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1205 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1209 pta syscall_allowed, tr0
1210 movi NR_syscalls - 1, r4 /* Last valid */
1214 /* Return ENOSYS ! */
1215 movi -(ENOSYS), r2 /* Fall-through */
1219 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1221 #ifdef CONFIG_POOR_MANS_STRACE
1222 /* nothing useful in registers at this point */
1227 ld.q SP, FRAME_R(9), r2
1232 ld.q SP, FRAME_S(FSPC), r2
1233 addi r2, 4, r2 /* Move PC, being pre-execution event */
1234 st.q SP, FRAME_S(FSPC), r2
1235 pta ret_from_syscall, tr0
1239 /* A different return path for ret_from_fork, because we now need
1240 * to call schedule_tail with the later kernels. Because prev is
1241 * loaded into r2 by switch_to() means we can just call it straight away
1244 .global ret_from_fork
1247 movi schedule_tail,r5
1252 #ifdef CONFIG_POOR_MANS_STRACE
1253 /* nothing useful in registers at this point */
1258 ld.q SP, FRAME_R(9), r2
1263 ld.q SP, FRAME_S(FSPC), r2
1264 addi r2, 4, r2 /* Move PC, being pre-execution event */
1265 st.q SP, FRAME_S(FSPC), r2
1266 pta ret_from_syscall, tr0
1272 /* Use LINK to deflect the exit point, default is syscall_ret */
1273 pta syscall_ret, tr0
1275 pta syscall_notrace, tr0
1278 ld.l r2, TI_FLAGS, r4
1279 movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
1283 /* Trace it by calling syscall_trace before and after */
1284 movi syscall_trace, r4
1290 /* Reload syscall number as r5 is trashed by syscall_trace */
1291 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1294 pta syscall_ret_trace, tr0
1298 /* Now point to the appropriate 4th level syscall handler */
1299 movi sys_call_table, r4
1304 /* Prepare original args */
1305 ld.q SP, FRAME_R(2), r2
1306 ld.q SP, FRAME_R(3), r3
1307 ld.q SP, FRAME_R(4), r4
1308 ld.q SP, FRAME_R(5), r5
1309 ld.q SP, FRAME_R(6), r6
1310 ld.q SP, FRAME_R(7), r7
1312 /* And now the trick for those syscalls requiring regs * ! */
1316 blink tr0, ZERO /* LINK is already properly set */
1319 /* We get back here only if under trace */
1320 st.q SP, FRAME_R(9), r2 /* Save return value */
1322 movi syscall_trace, LINK
1328 /* This needs to be done after any syscall tracing */
1329 ld.q SP, FRAME_S(FSPC), r2
1330 addi r2, 4, r2 /* Move PC, being pre-execution event */
1331 st.q SP, FRAME_S(FSPC), r2
1333 pta ret_from_syscall, tr0
1334 blink tr0, ZERO /* Resume normal return sequence */
1337 * --- Switch to running under a particular ASID and return the previous ASID value
1338 * --- The caller is assumed to have done a cli before calling this.
1340 * Input r2 : new ASID
1341 * Output r2 : old ASID
1344 .global switch_and_save_asid
1345 switch_and_save_asid:
1348 shlli r4, 16, r4 /* r4 = mask to select ASID */
1349 and r0, r4, r3 /* r3 = shifted old ASID */
1350 andi r2, 255, r2 /* mask down new ASID */
1351 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1352 andc r0, r4, r0 /* efface old ASID from SR */
1353 or r0, r2, r0 /* insert the new ASID */
1361 shlri r3, 16, r2 /* r2 = old ASID */
1364 .global route_to_panic_handler
1365 route_to_panic_handler:
1366 /* Switch to real mode, goto panic_handler, don't return. Useful for
1367 last-chance debugging, e.g. if no output wants to go to the console.
1370 movi panic_handler - CONFIG_PAGE_OFFSET, r1
1382 1: /* Now in real mode */
1386 .global peek_real_address_q
1387 peek_real_address_q:
1389 r2 : real mode address to peek
1390 r2(out) : result quadword
1392 This is provided as a cheapskate way of manipulating device
1393 registers for debugging (to avoid the need to onchip_remap the debug
1394 module, and to avoid the need to onchip_remap the watchpoint
1395 controller in a way that identity maps sufficient bits to avoid the
1396 SH5-101 cut2 silicon defect).
1398 This code is not performance critical
1401 add.l r2, r63, r2 /* sign extend address */
1402 getcon sr, r0 /* r0 = saved original SR */
1405 or r0, r1, r1 /* r0 with block bit set */
1406 putcon r1, sr /* now in critical section */
1409 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1412 movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1413 movi 1f, r37 /* virtual mode return addr */
1420 .peek0: /* come here in real mode, don't touch caches!!
1421 still in critical section (sr.bl==1) */
1424 /* Here's the actual peek. If the address is bad, all bets are now off
1425 * what will happen (handlers invoked in real-mode = bad news) */
1428 rte /* Back to virtual mode */
1435 .global poke_real_address_q
1436 poke_real_address_q:
1438 r2 : real mode address to poke
1439 r3 : quadword value to write.
1441 This is provided as a cheapskate way of manipulating device
1442 registers for debugging (to avoid the need to onchip_remap the debug
1443 module, and to avoid the need to onchip_remap the watchpoint
1444 controller in a way that identity maps sufficient bits to avoid the
1445 SH5-101 cut2 silicon defect).
1447 This code is not performance critical
1450 add.l r2, r63, r2 /* sign extend address */
1451 getcon sr, r0 /* r0 = saved original SR */
1454 or r0, r1, r1 /* r0 with block bit set */
1455 putcon r1, sr /* now in critical section */
1458 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1461 movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1462 movi 1f, r37 /* virtual mode return addr */
1469 .poke0: /* come here in real mode, don't touch caches!!
1470 still in critical section (sr.bl==1) */
1473 /* Here's the actual poke. If the address is bad, all bets are now off
1474 * what will happen (handlers invoked in real-mode = bad news) */
1477 rte /* Back to virtual mode */
1485 * --- User Access Handling Section
1489 * User Access support. It all moved to non inlined Assembler
1490 * functions in here.
1492 * __kernel_size_t __copy_user(void *__to, const void *__from,
1493 * __kernel_size_t __n)
1496 * (r2) target address
1497 * (r3) source address
1498 * (r4) size in bytes
1502 * (r2) non-copied bytes
1504 * If a fault occurs on the user pointer, bail out early and return the
1505 * number of bytes not copied in r2.
1506 * Strategy : for large blocks, call a real memcpy function which can
1507 * move >1 byte at a time using unaligned ld/st instructions, and can
1508 * manipulate the cache using prefetch + alloco to improve the speed
1509 * further. If a fault occurs in that function, just revert to the
1510 * byte-by-byte approach used for small blocks; this is rare so the
1511 * performance hit for that case does not matter.
1513 * For small blocks it's not worth the overhead of setting up and calling
1514 * the memcpy routine; do the copy a byte at a time.
1519 pta __copy_user_byte_by_byte, tr1
1520 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1522 pta copy_user_memcpy, tr0
1524 /* Save arguments in case we have to fix-up unhandled page fault */
1528 st.q SP, 24, r35 ! r35 is callee-save
1529 /* Save LINK in a register to reduce RTS time later (otherwise
1530 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1534 /* Copy completed normally if we get back here */
1537 /* don't restore r2-r4, pointless */
1538 /* set result=r2 to zero as the copy must have succeeded. */
1541 blink tr0, r63 ! RTS
1543 .global __copy_user_fixup
1545 /* Restore stack frame */
1552 /* Fall through to original code, in the 'same' state we entered with */
1554 /* The slow byte-by-byte method is used if the fast copy traps due to a bad
1555 user address. In that rare case, the speed drop can be tolerated. */
1556 __copy_user_byte_by_byte:
1557 pta ___copy_user_exit, tr1
1558 pta ___copy_user1, tr0
1559 beq/u r4, r63, tr1 /* early exit for zero length copy */
1564 ld.b r3, 0, r5 /* Fault address 1 */
1566 /* Could rewrite this to use just 1 add, but the second comes 'free'
1567 due to load latency */
1569 addi r4, -1, r4 /* No real fixup required */
1571 stx.b r3, r0, r5 /* Fault address 2 */
1580 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1583 * (r2) target address
1584 * (r3) size in bytes
1587 * (*r2) zero-ed target data
1588 * (r2) non-zero-ed bytes
1590 .global __clear_user
1592 pta ___clear_user_exit, tr1
1593 pta ___clear_user1, tr0
1597 st.b r2, 0, ZERO /* Fault address */
1599 addi r3, -1, r3 /* No real fixup required */
1609 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1613 * (r2) target address
1614 * (r3) source address
1615 * (r4) maximum size in bytes
1619 * (r2) -EFAULT (in case of faulting)
1620 * copied data (otherwise)
1622 .global __strncpy_from_user
1623 __strncpy_from_user:
1624 pta ___strncpy_from_user1, tr0
1625 pta ___strncpy_from_user_done, tr1
1626 or r4, ZERO, r5 /* r5 = original count */
1627 beq/u r4, r63, tr1 /* early exit if r4==0 */
1628 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1629 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1631 ___strncpy_from_user1:
1632 ld.b r3, 0, r7 /* Fault address: only in reading */
1637 addi r4, -1, r4 /* return real number of copied bytes */
1640 ___strncpy_from_user_done:
1641 sub r5, r4, r6 /* If done, return copied */
1643 ___strncpy_from_user_exit:
1649 * extern long __strnlen_user(const char *__s, long __n)
1652 * (r2) source address
1653 * (r3) source size in bytes
1656 * (r2) -EFAULT (in case of faulting)
1657 * string length (otherwise)
1659 .global __strnlen_user
1661 pta ___strnlen_user_set_reply, tr0
1662 pta ___strnlen_user1, tr1
1663 or ZERO, ZERO, r5 /* r5 = counter */
1664 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1665 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1669 ldx.b r2, r5, r7 /* Fault address: only in reading */
1670 addi r3, -1, r3 /* No real fixup */
1674 ! The line below used to be active. This meant led to a junk byte lying between each pair
1675 ! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1676 ! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1677 ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1678 ! addi r5, 1, r5 /* Include '\0' */
1680 ___strnlen_user_set_reply:
1681 or r5, ZERO, r6 /* If done, return counter */
1683 ___strnlen_user_exit:
1689 * extern long __get_user_asm_?(void *val, long addr)
1693 * (r3) source address (in User Space)
1696 * (r2) -EFAULT (faulting)
1699 .global __get_user_asm_b
1702 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1705 ld.b r3, 0, r5 /* r5 = data */
1709 ___get_user_asm_b_exit:
1714 .global __get_user_asm_w
1717 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1720 ld.w r3, 0, r5 /* r5 = data */
1724 ___get_user_asm_w_exit:
1729 .global __get_user_asm_l
1732 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1735 ld.l r3, 0, r5 /* r5 = data */
1739 ___get_user_asm_l_exit:
1744 .global __get_user_asm_q
1747 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1750 ld.q r3, 0, r5 /* r5 = data */
1754 ___get_user_asm_q_exit:
1759 * extern long __put_user_asm_?(void *pval, long addr)
1762 * (r2) kernel pointer to value
1763 * (r3) dest address (in User Space)
1766 * (r2) -EFAULT (faulting)
1769 .global __put_user_asm_b
1771 ld.b r2, 0, r4 /* r4 = data */
1772 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1778 ___put_user_asm_b_exit:
1783 .global __put_user_asm_w
1785 ld.w r2, 0, r4 /* r4 = data */
1786 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1792 ___put_user_asm_w_exit:
1797 .global __put_user_asm_l
1799 ld.l r2, 0, r4 /* r4 = data */
1800 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1806 ___put_user_asm_l_exit:
1811 .global __put_user_asm_q
1813 ld.q r2, 0, r4 /* r4 = data */
1814 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1820 ___put_user_asm_q_exit:
1825 /* The idea is : when we get an unhandled panic, we dump the registers
1826 to a known memory location, the just sit in a tight loop.
1827 This allows the human to look at the memory region through the GDB
1828 session (assuming the debug module's SHwy initiator isn't locked up
1829 or anything), to hopefully analyze the cause of the panic. */
1831 /* On entry, former r15 (SP) is in DCR
1832 former r0 is at resvec_saved_area + 0
1833 former r1 is at resvec_saved_area + 8
1834 former tr0 is at resvec_saved_area + 32
1835 DCR is the only register whose value is lost altogether.
1838 movi 0xffffffff80000000, r0 ! phy of dump area
1839 ld.q SP, 0x000, r1 ! former r0
1841 ld.q SP, 0x008, r1 ! former r1
1905 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1907 ld.q SP, 0x020, r1 ! former tr0
1957 /* Prepare to jump to C - physical address */
1958 movi panic_handler-CONFIG_PAGE_OFFSET, r1
1972 * --- Signal Handling Section
1976 * extern long long _sa_default_rt_restorer
1977 * extern long long _sa_default_restorer
1981 * extern void _sa_default_rt_restorer(void)
1982 * extern void _sa_default_restorer(void)
1984 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1985 * from user space. Copied into user space by signal management.
1986 * Both must be quad aligned and 2 quad long (4 instructions).
1990 .global sa_default_rt_restorer
1991 sa_default_rt_restorer:
1993 shori __NR_rt_sigreturn, r9
1998 .global sa_default_restorer
1999 sa_default_restorer:
2001 shori __NR_sigreturn, r9
2006 * --- __ex_table Section
2010 * User Access Exception Table.
2012 .section __ex_table, "a"
2014 .global asm_uaccess_start /* Just a marker */
2017 .long ___copy_user1, ___copy_user_exit
2018 .long ___copy_user2, ___copy_user_exit
2019 .long ___clear_user1, ___clear_user_exit
2020 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2021 .long ___strnlen_user1, ___strnlen_user_exit
2022 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2023 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2024 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2025 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2026 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2027 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2028 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2029 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2031 .global asm_uaccess_end /* Just a marker */
2038 * --- .text.init Section
2041 .section .text.init, "ax"
2044 * void trap_init (void)
2049 addi SP, -24, SP /* Room to save r28/r29/r30 */
2054 /* Set VBR and RESVEC */
2055 movi LVBR_block, r19
2056 andi r19, -4, r19 /* reset MMUOFF + reserved */
2057 /* For RESVEC exceptions we force the MMU off, which means we need the
2058 physical address. */
2059 movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
2060 andi r20, -4, r20 /* reset reserved */
2061 ori r20, 1, r20 /* set MMUOFF */
2066 movi LVBR_block_end, r21
2068 movi BLOCK_SIZE, r29 /* r29 = expected size */
2073 * Ugly, but better loop forever now than crash afterwards.
2074 * We should print a message, but if we touch LVBR or
2075 * LRESVEC blocks we should not be surprised if we get stuck
2078 pta trap_init_loop, tr1
2079 gettr tr1, r28 /* r28 = trap_init_loop */
2080 sub r21, r30, r30 /* r30 = actual size */
2083 * VBR/RESVEC handlers overlap by being bigger than
2084 * allowed. Very bad. Just loop forever.
2085 * (r28) panic/loop address
2086 * (r29) expected size
2092 /* Now that exception vectors are set up reset SR.BL */
2094 movi SR_UNBLOCK_EXC, r23