1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
14 #include <linux/linkage.h>
15 #include <linux/threads.h>
16 #include <linux/errno.h>
17 #include <linux/init.h>
18 #include <linux/serial_reg.h>
19 #include <asm/processor.h>
22 #include <asm/pgtable.h>
23 #include <asm/thread_info.h>
24 #include <asm/cache.h>
25 #include <asm/spr_defs.h>
26 #include <asm/asm-offsets.h>
27 #include <linux/of_fdt.h>
29 #define tophys(rd,rs) \
30 l.movhi rd,hi(-KERNELBASE) ;\
33 #define CLEAR_GPR(gpr) \
36 #define LOAD_SYMBOL_2_GPR(gpr,symbol) \
37 l.movhi gpr,hi(symbol) ;\
38 l.ori gpr,gpr,lo(symbol)
41 #define UART_BASE_ADD 0x90000000
43 #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
44 #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
46 /* ============================================[ tmp store locations ]=== */
48 #define SPR_SHADOW_GPR(x) ((x) + SPR_GPR_BASE + 32)
51 * emergency_print temporary stores
53 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
54 #define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14)
55 #define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14)
57 #define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15)
58 #define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15)
60 #define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16)
61 #define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16)
63 #define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7)
64 #define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7)
66 #define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8)
67 #define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8)
69 #define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9)
70 #define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9)
72 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
73 #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
74 #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
76 #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
77 #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
79 #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
80 #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
82 #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
83 #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
85 #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
86 #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
88 #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
89 #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
94 * TLB miss handlers temorary stores
96 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
97 #define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2)
98 #define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2)
100 #define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3)
101 #define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3)
103 #define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4)
104 #define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4)
106 #define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5)
107 #define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5)
109 #define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6)
110 #define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6)
112 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
113 #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
114 #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
116 #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
117 #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
119 #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
120 #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
122 #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
123 #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
125 #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
126 #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
131 * EXCEPTION_HANDLE temporary stores
134 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
135 #define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30)
136 #define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30)
138 #define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10)
139 #define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10)
141 #define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1)
142 #define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1)
144 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
145 #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
146 #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
148 #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
149 #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
151 #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
152 #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
155 /* =========================================================[ macros ]=== */
158 #define GET_CURRENT_PGD(reg,t1) \
159 LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
160 l.mfspr t1,r0,SPR_COREID ;\
166 #define GET_CURRENT_PGD(reg,t1) \
167 LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
172 /* Load r10 from current_thread_info_set - clobbers r1 and r30 */
174 #define GET_CURRENT_THREAD_INFO \
175 LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
177 l.mfspr r10,r0,SPR_COREID ;\
180 /* r10: current_thread_info */ ;\
183 #define GET_CURRENT_THREAD_INFO \
184 LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
186 /* r10: current_thread_info */ ;\
191 * DSCR: this is a common hook for handling exceptions. it will save
192 * the needed registers, set up stack and pointer to current
193 * then jump to the handler while enabling MMU
195 * PRMS: handler - a function to jump to. it has to save the
196 * remaining registers to kernel stack, call
197 * appropriate arch-independant exception handler
198 * and finaly jump to ret_from_except
200 * PREQ: unchanged state from the time exception happened
202 * POST: SAVED the following registers original value
203 * to the new created exception frame pointed to by r1
205 * r1 - ksp pointing to the new (exception) frame
206 * r4 - EEAR exception EA
207 * r10 - current pointing to current_thread_info struct
208 * r12 - syscall 0, since we didn't come from syscall
209 * r30 - handler address of the handler we'll jump to
211 * handler has to save remaining registers to the exception
212 * ksp frame *before* tainting them!
214 * NOTE: this function is not reentrant per se. reentrancy is guaranteed
215 * by processor disabling all exceptions/interrupts when exception
218 * OPTM: no need to make it so wasteful to extract ksp when in user mode
221 #define EXCEPTION_HANDLE(handler) \
222 EXCEPTION_T_STORE_GPR30 ;\
223 l.mfspr r30,r0,SPR_ESR_BASE ;\
224 l.andi r30,r30,SPR_SR_SM ;\
226 EXCEPTION_T_STORE_GPR10 ;\
227 l.bnf 2f /* kernel_mode */ ;\
228 EXCEPTION_T_STORE_SP /* delay slot */ ;\
229 1: /* user_mode: */ ;\
230 GET_CURRENT_THREAD_INFO ;\
232 l.lwz r1,(TI_KSP)(r30) ;\
233 /* fall through */ ;\
234 2: /* kernel_mode: */ ;\
235 /* create new stack frame, save only needed gprs */ ;\
236 /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\
237 /* r12: temp, syscall indicator */ ;\
238 l.addi r1,r1,-(INT_FRAME_SIZE) ;\
239 /* r1 is KSP, r30 is __pa(KSP) */ ;\
241 l.sw PT_GPR12(r30),r12 ;\
242 /* r4 use for tmp before EA */ ;\
243 l.mfspr r12,r0,SPR_EPCR_BASE ;\
244 l.sw PT_PC(r30),r12 ;\
245 l.mfspr r12,r0,SPR_ESR_BASE ;\
246 l.sw PT_SR(r30),r12 ;\
248 EXCEPTION_T_LOAD_GPR30(r12) ;\
249 l.sw PT_GPR30(r30),r12 ;\
250 /* save r10 as was prior to exception */ ;\
251 EXCEPTION_T_LOAD_GPR10(r12) ;\
252 l.sw PT_GPR10(r30),r12 ;\
253 /* save PT_SP as was prior to exception */ ;\
254 EXCEPTION_T_LOAD_SP(r12) ;\
255 l.sw PT_SP(r30),r12 ;\
256 /* save exception r4, set r4 = EA */ ;\
257 l.sw PT_GPR4(r30),r4 ;\
258 l.mfspr r4,r0,SPR_EEAR_BASE ;\
259 /* r12 == 1 if we come from syscall */ ;\
261 /* ----- turn on MMU ----- */ ;\
262 /* Carry DSX into exception SR */ ;\
263 l.mfspr r30,r0,SPR_SR ;\
264 l.andi r30,r30,SPR_SR_DSX ;\
265 l.ori r30,r30,(EXCEPTION_SR) ;\
266 l.mtspr r0,r30,SPR_ESR_BASE ;\
267 /* r30: EA address of handler */ ;\
268 LOAD_SYMBOL_2_GPR(r30,handler) ;\
269 l.mtspr r0,r30,SPR_EPCR_BASE ;\
276 * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
277 * #define UNHANDLED_EXCEPTION(handler) \
279 * l.mtspr r0,r3,SPR_SR ;\
280 * l.movhi r3,hi(0xf0000100) ;\
281 * l.ori r3,r3,lo(0xf0000100) ;\
288 /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
289 * a bit more carefull (if we have a PT_SP or current pointer
290 * corruption) and set them up from 'current_set'
293 #define UNHANDLED_EXCEPTION(handler) \
294 EXCEPTION_T_STORE_GPR30 ;\
295 EXCEPTION_T_STORE_GPR10 ;\
296 EXCEPTION_T_STORE_SP ;\
297 /* temporary store r3, r9 into r1, r10 */ ;\
300 /* the string referenced by r3 must be low enough */ ;\
301 l.jal _emergency_print ;\
302 l.ori r3,r0,lo(_string_unhandled_exception) ;\
303 l.mfspr r3,r0,SPR_NPC ;\
304 l.jal _emergency_print_nr ;\
305 l.andi r3,r3,0x1f00 ;\
306 /* the string referenced by r3 must be low enough */ ;\
307 l.jal _emergency_print ;\
308 l.ori r3,r0,lo(_string_epc_prefix) ;\
309 l.jal _emergency_print_nr ;\
310 l.mfspr r3,r0,SPR_EPCR_BASE ;\
311 l.jal _emergency_print ;\
312 l.ori r3,r0,lo(_string_nl) ;\
313 /* end of printing */ ;\
316 /* extract current, ksp from current_set */ ;\
317 LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\
318 LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\
319 /* create new stack frame, save only needed gprs */ ;\
320 /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
321 /* r12: temp, syscall indicator, r13 temp */ ;\
322 l.addi r1,r1,-(INT_FRAME_SIZE) ;\
323 /* r1 is KSP, r30 is __pa(KSP) */ ;\
325 l.sw PT_GPR12(r30),r12 ;\
326 l.mfspr r12,r0,SPR_EPCR_BASE ;\
327 l.sw PT_PC(r30),r12 ;\
328 l.mfspr r12,r0,SPR_ESR_BASE ;\
329 l.sw PT_SR(r30),r12 ;\
331 EXCEPTION_T_LOAD_GPR30(r12) ;\
332 l.sw PT_GPR30(r30),r12 ;\
333 /* save r10 as was prior to exception */ ;\
334 EXCEPTION_T_LOAD_GPR10(r12) ;\
335 l.sw PT_GPR10(r30),r12 ;\
336 /* save PT_SP as was prior to exception */ ;\
337 EXCEPTION_T_LOAD_SP(r12) ;\
338 l.sw PT_SP(r30),r12 ;\
339 l.sw PT_GPR13(r30),r13 ;\
341 /* save exception r4, set r4 = EA */ ;\
342 l.sw PT_GPR4(r30),r4 ;\
343 l.mfspr r4,r0,SPR_EEAR_BASE ;\
344 /* r12 == 1 if we come from syscall */ ;\
346 /* ----- play a MMU trick ----- */ ;\
347 l.ori r30,r0,(EXCEPTION_SR) ;\
348 l.mtspr r0,r30,SPR_ESR_BASE ;\
349 /* r31: EA address of handler */ ;\
350 LOAD_SYMBOL_2_GPR(r30,handler) ;\
351 l.mtspr r0,r30,SPR_EPCR_BASE ;\
354 /* =====================================================[ exceptions] === */
356 /* ---[ 0x100: RESET exception ]----------------------------------------- */
358 /* Jump to .init code at _start which lives in the .head section
359 * and will be discarded after boot.
361 LOAD_SYMBOL_2_GPR(r15, _start)
362 tophys (r13,r15) /* MMU disabled */
366 /* ---[ 0x200: BUS exception ]------------------------------------------- */
369 EXCEPTION_HANDLE(_bus_fault_handler)
371 /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
373 _dispatch_do_dpage_fault:
374 // totaly disable timer interrupt
375 // l.mtspr r0,r0,SPR_TTMR
376 // DEBUG_TLB_PROBE(0x300)
377 // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
378 EXCEPTION_HANDLE(_data_page_fault_handler)
380 /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
382 _dispatch_do_ipage_fault:
383 // totaly disable timer interrupt
384 // l.mtspr r0,r0,SPR_TTMR
385 // DEBUG_TLB_PROBE(0x400)
386 // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
387 EXCEPTION_HANDLE(_insn_page_fault_handler)
389 /* ---[ 0x500: Timer exception ]----------------------------------------- */
391 EXCEPTION_HANDLE(_timer_handler)
393 /* ---[ 0x600: Alignment exception ]-------------------------------------- */
395 EXCEPTION_HANDLE(_alignment_handler)
397 /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
399 EXCEPTION_HANDLE(_illegal_instruction_handler)
401 /* ---[ 0x800: External interrupt exception ]---------------------------- */
403 EXCEPTION_HANDLE(_external_irq_handler)
405 /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
407 l.j boot_dtlb_miss_handler
410 /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
412 l.j boot_itlb_miss_handler
415 /* ---[ 0xb00: Range exception ]----------------------------------------- */
417 UNHANDLED_EXCEPTION(_vector_0xb00)
419 /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
421 EXCEPTION_HANDLE(_sys_call_handler)
423 /* ---[ 0xd00: Trap exception ]------------------------------------------ */
425 UNHANDLED_EXCEPTION(_vector_0xd00)
427 /* ---[ 0xe00: Trap exception ]------------------------------------------ */
429 // UNHANDLED_EXCEPTION(_vector_0xe00)
430 EXCEPTION_HANDLE(_trap_handler)
432 /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
434 UNHANDLED_EXCEPTION(_vector_0xf00)
436 /* ---[ 0x1000: Reserved exception ]------------------------------------- */
438 UNHANDLED_EXCEPTION(_vector_0x1000)
440 /* ---[ 0x1100: Reserved exception ]------------------------------------- */
442 UNHANDLED_EXCEPTION(_vector_0x1100)
444 /* ---[ 0x1200: Reserved exception ]------------------------------------- */
446 UNHANDLED_EXCEPTION(_vector_0x1200)
448 /* ---[ 0x1300: Reserved exception ]------------------------------------- */
450 UNHANDLED_EXCEPTION(_vector_0x1300)
452 /* ---[ 0x1400: Reserved exception ]------------------------------------- */
454 UNHANDLED_EXCEPTION(_vector_0x1400)
456 /* ---[ 0x1500: Reserved exception ]------------------------------------- */
458 UNHANDLED_EXCEPTION(_vector_0x1500)
460 /* ---[ 0x1600: Reserved exception ]------------------------------------- */
462 UNHANDLED_EXCEPTION(_vector_0x1600)
464 /* ---[ 0x1700: Reserved exception ]------------------------------------- */
466 UNHANDLED_EXCEPTION(_vector_0x1700)
468 /* ---[ 0x1800: Reserved exception ]------------------------------------- */
470 UNHANDLED_EXCEPTION(_vector_0x1800)
472 /* ---[ 0x1900: Reserved exception ]------------------------------------- */
474 UNHANDLED_EXCEPTION(_vector_0x1900)
476 /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
478 UNHANDLED_EXCEPTION(_vector_0x1a00)
480 /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
482 UNHANDLED_EXCEPTION(_vector_0x1b00)
484 /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
486 UNHANDLED_EXCEPTION(_vector_0x1c00)
488 /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
490 UNHANDLED_EXCEPTION(_vector_0x1d00)
492 /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
494 UNHANDLED_EXCEPTION(_vector_0x1e00)
496 /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
498 UNHANDLED_EXCEPTION(_vector_0x1f00)
501 /* ===================================================[ kernel start ]=== */
505 /* This early stuff belongs in HEAD, but some of the functions below definitely
511 /* Init r0 to zero as per spec */
514 /* save kernel parameters */
515 l.or r25,r0,r3 /* pointer to fdt */
518 * ensure a deterministic start
556 l.mfspr r26,r0,SPR_COREID
562 * set up initial ksp and current
564 /* setup kernel stack */
565 LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE)
566 LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current
574 * .data contains initialized data,
575 * .bss contains uninitialized data - clear it up
578 LOAD_SYMBOL_2_GPR(r24, __bss_start)
579 LOAD_SYMBOL_2_GPR(r26, _end)
602 /* The MMU needs to be enabled before or32_early_setup is called */
607 * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
609 l.mfspr r30,r0,SPR_SR
610 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
611 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
613 l.mtspr r0,r30,SPR_SR
631 // reset the simulation counters
634 /* check fdt header magic word */
635 l.lwz r3,0(r25) /* load magic from fdt into r3 */
636 l.movhi r4,hi(OF_DT_HEADER)
637 l.ori r4,r4,lo(OF_DT_HEADER)
641 /* magic number mismatch, set fdt pointer to null */
644 /* pass fdt pointer to or32_early_setup in r3 */
646 LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
652 * clear all GPRS to increase determinism
686 * jump to kernel entry (start_kernel)
688 LOAD_SYMBOL_2_GPR(r30, start_kernel)
694 * I N V A L I D A T E T L B e n t r i e s
696 LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
697 LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
698 l.addi r7,r0,128 /* Maximum number of sets */
714 /* Doze the cpu until we are asked to run */
715 /* If we dont have power management skip doze */
716 l.mfspr r25,r0,SPR_UPR
717 l.andi r25,r25,SPR_UPR_PMP
719 l.bf secondary_check_release
722 /* Setup special secondary exception handler */
723 LOAD_SYMBOL_2_GPR(r3, _secondary_evbar)
725 l.mtspr r0,r25,SPR_EVBAR
727 /* Enable Interrupts */
728 l.mfspr r25,r0,SPR_SR
729 l.ori r25,r25,SPR_SR_IEE
730 l.mtspr r0,r25,SPR_SR
732 /* Unmask interrupts interrupts */
733 l.mfspr r25,r0,SPR_PICMR
735 l.mtspr r0,r25,SPR_PICMR
738 l.mfspr r25,r0,SPR_PMR
739 LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME)
741 l.mtspr r0,r25,SPR_PMR
743 /* Wakeup - Restore exception handler */
744 l.mtspr r0,r0,SPR_EVBAR
746 secondary_check_release:
748 * Check if we actually got the release signal, if not go-back to
751 l.mfspr r25,r0,SPR_COREID
752 LOAD_SYMBOL_2_GPR(r3, secondary_release)
758 /* fall through to secondary_init */
762 * set up initial ksp and current
764 LOAD_SYMBOL_2_GPR(r10, secondary_thread_info)
767 l.addi r1,r10,THREAD_SIZE
783 l.mfspr r30,r0,SPR_SR
784 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
785 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
788 * This is a bit tricky, we need to switch over from physical addresses
789 * to virtual addresses on the fly.
790 * To do that, we first set up ESR with the IME and DME bits set.
791 * Then EPCR is set to secondary_start and then a l.rfe is issued to
794 l.mtspr r0,r30,SPR_ESR_BASE
795 LOAD_SYMBOL_2_GPR(r30, secondary_start)
796 l.mtspr r0,r30,SPR_EPCR_BASE
800 LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel)
806 /* ========================================[ cache ]=== */
808 /* alignment here so we don't change memory offsets with
809 * memory controller defined
814 /* Check if IC present and skip enabling otherwise */
815 l.mfspr r24,r0,SPR_UPR
816 l.andi r26,r24,SPR_UPR_ICP
824 l.xori r5,r5,SPR_SR_ICE
828 /* Establish cache block size
831 r14 contain block size
833 l.mfspr r24,r0,SPR_ICCFGR
834 l.andi r26,r24,SPR_ICCFGR_CBS
839 /* Establish number of cache sets
840 r16 contains number of cache sets
841 r28 contains log(# of cache sets)
843 l.andi r26,r24,SPR_ICCFGR_NCS
853 // l.addi r5,r0,IC_SIZE
855 l.mtspr r0,r6,SPR_ICBIR
859 // l.addi r6,r6,IC_LINE
863 l.ori r6,r6,SPR_SR_ICE
880 /* Check if DC present and skip enabling otherwise */
881 l.mfspr r24,r0,SPR_UPR
882 l.andi r26,r24,SPR_UPR_DCP
890 l.xori r5,r5,SPR_SR_DCE
894 /* Establish cache block size
897 r14 contain block size
899 l.mfspr r24,r0,SPR_DCCFGR
900 l.andi r26,r24,SPR_DCCFGR_CBS
905 /* Establish number of cache sets
906 r16 contains number of cache sets
907 r28 contains log(# of cache sets)
909 l.andi r26,r24,SPR_DCCFGR_NCS
918 l.mtspr r0,r6,SPR_DCBIR
925 l.ori r6,r6,SPR_SR_DCE
931 /* ===============================================[ page table masks ]=== */
933 #define DTLB_UP_CONVERT_MASK 0x3fa
934 #define ITLB_UP_CONVERT_MASK 0x3a
936 /* for SMP we'd have (this is a bit subtle, CC must be always set
937 * for SMP, but since we have _PAGE_PRESENT bit always defined
938 * we can just modify the mask)
940 #define DTLB_SMP_CONVERT_MASK 0x3fb
941 #define ITLB_SMP_CONVERT_MASK 0x3b
943 /* ---[ boot dtlb miss handler ]----------------------------------------- */
945 boot_dtlb_miss_handler:
947 /* mask for DTLB_MR register: - (0) sets V (valid) bit,
948 * - (31-12) sets bits belonging to VPN (31-12)
950 #define DTLB_MR_MASK 0xfffff001
952 /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
953 * - (4) sets A (access) bit,
954 * - (5) sets D (dirty) bit,
955 * - (8) sets SRE (superuser read) bit
956 * - (9) sets SWE (superuser write) bit
957 * - (31-12) sets bits belonging to VPN (31-12)
959 #define DTLB_TR_MASK 0xfffff332
961 /* These are for masking out the VPN/PPN value from the MR/TR registers...
962 * it's not the same as the PFN */
963 #define VPN_MASK 0xfffff000
964 #define PPN_MASK 0xfffff000
970 l.mfspr r6,r0,SPR_ESR_BASE //
971 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
972 l.sfeqi r6,0 // r6 == 0x1 --> SM
973 l.bf exit_with_no_dtranslation //
977 /* this could be optimized by moving storing of
978 * non r6 registers here, and jumping r6 restore
979 * if not in supervisor mode
987 l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
989 immediate_translation:
992 l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
994 l.mfspr r6, r0, SPR_DMMUCFGR
995 l.andi r6, r6, SPR_DMMUCFGR_NTS
996 l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
998 l.sll r5, r5, r6 // r5 = number DMMU sets
999 l.addi r6, r5, -1 // r6 = nsets mask
1000 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
1002 l.or r6,r6,r4 // r6 <- r4
1003 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
1004 l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
1005 l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
1006 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
1007 l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
1009 /* set up DTLB with no translation for EA <= 0xbfffffff */
1010 LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
1011 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
1013 l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
1015 tophys(r3,r4) // r3 <- PA
1017 l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
1018 l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
1019 l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
1020 l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
1021 l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
1029 l.rfe // SR <- ESR, PC <- EPC
1031 exit_with_no_dtranslation:
1032 /* EA out of memory or not in supervisor mode */
1035 l.j _dispatch_bus_fault
1037 /* ---[ boot itlb miss handler ]----------------------------------------- */
1039 boot_itlb_miss_handler:
1041 /* mask for ITLB_MR register: - sets V (valid) bit,
1042 * - sets bits belonging to VPN (15-12)
1044 #define ITLB_MR_MASK 0xfffff001
1046 /* mask for ITLB_TR register: - sets A (access) bit,
1047 * - sets SXE (superuser execute) bit
1048 * - sets bits belonging to VPN (15-12)
1050 #define ITLB_TR_MASK 0xfffff050
1053 #define VPN_MASK 0xffffe000
1054 #define PPN_MASK 0xffffe000
1059 EXCEPTION_STORE_GPR2
1060 EXCEPTION_STORE_GPR3
1061 EXCEPTION_STORE_GPR4
1062 EXCEPTION_STORE_GPR5
1063 EXCEPTION_STORE_GPR6
1066 l.mfspr r6,r0,SPR_ESR_BASE //
1067 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
1068 l.sfeqi r6,0 // r6 == 0x1 --> SM
1069 l.bf exit_with_no_itranslation
1074 l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
1079 l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
1081 l.mfspr r6, r0, SPR_IMMUCFGR
1082 l.andi r6, r6, SPR_IMMUCFGR_NTS
1083 l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
1085 l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
1086 l.addi r6, r5, -1 // r6 = nsets mask
1087 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
1089 l.or r6,r6,r4 // r6 <- r4
1090 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
1091 l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
1092 l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
1093 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
1094 l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
1097 * set up ITLB with no translation for EA <= 0x0fffffff
1099 * we need this for head.S mapping (EA = PA). if we move all functions
1100 * which run with mmu enabled into entry.S, we might be able to eliminate this.
1103 LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
1104 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
1106 l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
1108 tophys(r3,r4) // r3 <- PA
1110 l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
1111 l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
1112 l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
1113 l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
1114 l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
1122 l.rfe // SR <- ESR, PC <- EPC
1124 exit_with_no_itranslation:
1127 l.j _dispatch_bus_fault
1130 /* ====================================================================== */
1132 * Stuff below here shouldn't go into .head section... maybe this stuff
1133 * can be moved to entry.S ???
1136 /* ==============================================[ DTLB miss handler ]=== */
1140 * Exception handlers are entered with MMU off so the following handler
1141 * needs to use physical addressing
1146 ENTRY(dtlb_miss_handler)
1147 EXCEPTION_STORE_GPR2
1148 EXCEPTION_STORE_GPR3
1149 EXCEPTION_STORE_GPR4
1151 * get EA of the miss
1153 l.mfspr r2,r0,SPR_EEAR_BASE
1155 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1157 GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r4 is temp
1158 l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1159 l.slli r4,r4,0x2 // to get address << 2
1160 l.add r3,r4,r3 // r4 is pgd_index(daddr)
1162 * if (pmd_none(*pmd))
1166 l.lwz r3,0x0(r4) // get *pmd value
1169 l.addi r3,r0,0xffffe000 // PAGE_MASK
1173 * pte = *pte_offset(pmd, daddr);
1175 l.lwz r4,0x0(r4) // get **pmd value
1176 l.and r4,r4,r3 // & PAGE_MASK
1177 l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
1178 l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
1179 l.slli r3,r3,0x2 // to get address << 2
1181 l.lwz r3,0x0(r3) // this is pte at last
1183 * if (!pte_present(pte))
1186 l.sfne r4,r0 // is pte present
1187 l.bnf d_pte_not_present
1188 l.addi r4,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
1190 * fill DTLB TR register
1192 l.and r4,r3,r4 // apply the mask
1193 // Determine number of DMMU sets
1194 l.mfspr r2, r0, SPR_DMMUCFGR
1195 l.andi r2, r2, SPR_DMMUCFGR_NTS
1196 l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF
1198 l.sll r3, r3, r2 // r3 = number DMMU sets DMMUCFGR
1199 l.addi r2, r3, -1 // r2 = nsets mask
1200 l.mfspr r3, r0, SPR_EEAR_BASE
1201 l.srli r3, r3, 0xd // >> PAGE_SHIFT
1202 l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
1204 l.mtspr r2,r4,SPR_DTLBTR_BASE(0)
1206 * fill DTLB MR register
1208 l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
1209 l.ori r4,r3,0x1 // set hardware valid bit: DTBL_MR entry
1210 l.mtspr r2,r4,SPR_DTLBMR_BASE(0)
1221 EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
1223 /* ==============================================[ ITLB miss handler ]=== */
1224 ENTRY(itlb_miss_handler)
1225 EXCEPTION_STORE_GPR2
1226 EXCEPTION_STORE_GPR3
1227 EXCEPTION_STORE_GPR4
1229 * get EA of the miss
1231 l.mfspr r2,r0,SPR_EEAR_BASE
1234 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1237 GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r5 is temp
1238 l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1239 l.slli r4,r4,0x2 // to get address << 2
1240 l.add r3,r4,r3 // r4 is pgd_index(daddr)
1242 * if (pmd_none(*pmd))
1246 l.lwz r3,0x0(r4) // get *pmd value
1249 l.addi r3,r0,0xffffe000 // PAGE_MASK
1253 * pte = *pte_offset(pmd, iaddr);
1256 l.lwz r4,0x0(r4) // get **pmd value
1257 l.and r4,r4,r3 // & PAGE_MASK
1258 l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
1259 l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
1260 l.slli r3,r3,0x2 // to get address << 2
1262 l.lwz r3,0x0(r3) // this is pte at last
1264 * if (!pte_present(pte))
1268 l.sfne r4,r0 // is pte present
1269 l.bnf i_pte_not_present
1270 l.addi r4,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
1272 * fill ITLB TR register
1274 l.and r4,r3,r4 // apply the mask
1275 l.andi r3,r3,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
1277 l.bf itlb_tr_fill //_workaround
1278 // Determine number of IMMU sets
1279 l.mfspr r2, r0, SPR_IMMUCFGR
1280 l.andi r2, r2, SPR_IMMUCFGR_NTS
1281 l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF
1283 l.sll r3, r3, r2 // r3 = number IMMU sets IMMUCFGR
1284 l.addi r2, r3, -1 // r2 = nsets mask
1285 l.mfspr r3, r0, SPR_EEAR_BASE
1286 l.srli r3, r3, 0xd // >> PAGE_SHIFT
1287 l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
1291 * we should not just blindly set executable flags,
1292 * but it does help with ping. the clean way would be to find out
1293 * (and fix it) why stack doesn't have execution permissions
1296 itlb_tr_fill_workaround:
1297 l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
1299 l.mtspr r2,r4,SPR_ITLBTR_BASE(0)
1301 * fill DTLB MR register
1303 l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
1304 l.ori r4,r3,0x1 // set hardware valid bit: ITBL_MR entry
1305 l.mtspr r2,r4,SPR_ITLBMR_BASE(0)
1317 EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
1319 /* ==============================================[ boot tlb handlers ]=== */
1322 /* =================================================[ debugging aids ]=== */
1327 _immu_trampoline_top:
1329 #define TRAMP_SLOT_0 (0x0)
1330 #define TRAMP_SLOT_1 (0x4)
1331 #define TRAMP_SLOT_2 (0x8)
1332 #define TRAMP_SLOT_3 (0xc)
1333 #define TRAMP_SLOT_4 (0x10)
1334 #define TRAMP_SLOT_5 (0x14)
1335 #define TRAMP_FRAME_SIZE (0x18)
1337 ENTRY(_immu_trampoline_workaround)
1339 // r6 is physical EEA
1342 LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1343 tophys (r3,r5) // r3 is trampoline (physical)
1345 LOAD_SYMBOL_2_GPR(r4,0x15000000)
1346 l.sw TRAMP_SLOT_0(r3),r4
1347 l.sw TRAMP_SLOT_1(r3),r4
1348 l.sw TRAMP_SLOT_4(r3),r4
1349 l.sw TRAMP_SLOT_5(r3),r4
1352 l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
1353 l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
1354 l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
1355 l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
1357 l.srli r5,r4,26 // check opcode for write access
1360 l.sfeqi r5,0x11 // l.jr
1362 l.sfeqi r5,1 // l.jal
1364 l.sfeqi r5,0x12 // l.jalr
1366 l.sfeqi r5,3 // l.bnf
1368 l.sfeqi r5,4 // l.bf
1372 l.j 99b // should never happen
1376 // r3 is trampoline address (physical)
1377 // r4 is instruction
1378 // r6 is physical(EEA)
1384 /* 19 20 aa aa l.movhi r9,0xaaaa
1385 * a9 29 bb bb l.ori r9,0xbbbb
1387 * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1390 l.addi r6,r2,0x4 // this is 0xaaaabbbb
1392 // l.movhi r9,0xaaaa
1393 l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
1394 l.sh (TRAMP_SLOT_0+0x0)(r3),r5
1396 l.sh (TRAMP_SLOT_0+0x2)(r3),r5
1399 l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
1400 l.sh (TRAMP_SLOT_1+0x0)(r3),r5
1402 l.sh (TRAMP_SLOT_1+0x2)(r3),r5
1404 /* falthrough, need to set up new jump offset */
1408 l.slli r6,r4,6 // original offset shifted left 6 - 2
1409 // l.srli r6,r6,6 // original offset shifted right 2
1411 l.slli r4,r2,4 // old jump position: EEA shifted left 4
1412 // l.srli r4,r4,6 // old jump position: shifted right 2
1414 l.addi r5,r3,0xc // new jump position (physical)
1415 l.slli r5,r5,4 // new jump position: shifted left 4
1417 // calculate new jump offset
1418 // new_off = old_off + (old_jump - new_jump)
1420 l.sub r5,r4,r5 // old_jump - new_jump
1421 l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
1422 l.srli r5,r5,6 // new offset shifted right 2
1424 // r5 is new jump offset
1425 // l.j has opcode 0x0...
1426 l.sw TRAMP_SLOT_2(r3),r5 // write it back
1431 /* ----------------------------- */
1435 /* 19 20 aa aa l.movhi r9,0xaaaa
1436 * a9 29 bb bb l.ori r9,0xbbbb
1438 * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1441 l.addi r6,r2,0x4 // this is 0xaaaabbbb
1443 // l.movhi r9,0xaaaa
1444 l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
1445 l.sh (TRAMP_SLOT_0+0x0)(r3),r5
1447 l.sh (TRAMP_SLOT_0+0x2)(r3),r5
1450 l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
1451 l.sh (TRAMP_SLOT_1+0x0)(r3),r5
1453 l.sh (TRAMP_SLOT_1+0x2)(r3),r5
1455 l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
1456 l.andi r5,r5,0x3ff // clear out opcode part
1457 l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
1458 l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
1466 /* ----------------------------- */
1470 l.slli r6,r4,6 // original offset shifted left 6 - 2
1471 // l.srli r6,r6,6 // original offset shifted right 2
1473 l.slli r4,r2,4 // old jump position: EEA shifted left 4
1474 // l.srli r4,r4,6 // old jump position: shifted right 2
1476 l.addi r5,r3,0xc // new jump position (physical)
1477 l.slli r5,r5,4 // new jump position: shifted left 4
1479 // calculate new jump offset
1480 // new_off = old_off + (old_jump - new_jump)
1482 l.add r6,r6,r4 // (orig_off + old_jump)
1483 l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
1484 l.srli r6,r6,6 // new offset shifted right 2
1486 // r6 is new jump offset
1487 l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
1489 l.andi r4,r4,0xfc00 // get opcode part
1491 l.or r6,r4,r6 // l.b(n)f new offset
1492 l.sw TRAMP_SLOT_2(r3),r6 // write it back
1494 /* we need to add l.j to EEA + 0x8 */
1495 tophys (r4,r2) // may not be needed (due to shifts down_
1496 l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
1497 // jump position = r5 + 0x8 (0x8 compensated)
1498 l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
1500 l.slli r4,r4,4 // the amount of info in imediate of jump
1501 l.srli r4,r4,6 // jump instruction with offset
1502 l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
1507 // set up new EPC to point to our trampoline code
1508 LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1509 l.mtspr r0,r5,SPR_EPCR_BASE
1511 // immu_trampoline is (4x) CACHE_LINE aligned
1512 // and only 6 instructions long,
1513 // so we need to invalidate only 2 lines
1515 /* Establish cache block size
1518 r14 contain block size
1520 l.mfspr r21,r0,SPR_ICCFGR
1521 l.andi r21,r21,SPR_ICCFGR_CBS
1526 l.mtspr r0,r5,SPR_ICBIR
1528 l.mtspr r0,r5,SPR_ICBIR
1535 * DSCR: prints a string referenced by r3.
1537 * PRMS: r3 - address of the first character of null
1538 * terminated string to be printed
1540 * PREQ: UART at UART_BASE_ADD has to be initialized
1542 * POST: caller should be aware that r3, r9 are changed
1544 ENTRY(_emergency_print)
1545 EMERGENCY_PRINT_STORE_GPR4
1546 EMERGENCY_PRINT_STORE_GPR5
1547 EMERGENCY_PRINT_STORE_GPR6
1548 EMERGENCY_PRINT_STORE_GPR7
1556 l.movhi r4,hi(UART_BASE_ADD)
1574 /* next character */
1579 EMERGENCY_PRINT_LOAD_GPR7
1580 EMERGENCY_PRINT_LOAD_GPR6
1581 EMERGENCY_PRINT_LOAD_GPR5
1582 EMERGENCY_PRINT_LOAD_GPR4
1586 ENTRY(_emergency_print_nr)
1587 EMERGENCY_PRINT_STORE_GPR4
1588 EMERGENCY_PRINT_STORE_GPR5
1589 EMERGENCY_PRINT_STORE_GPR6
1590 EMERGENCY_PRINT_STORE_GPR7
1591 EMERGENCY_PRINT_STORE_GPR8
1593 l.addi r8,r0,32 // shift register
1595 1: /* remove leading zeros */
1600 /* don't skip the last zero if number == 0x0 */
1624 l.movhi r4,hi(UART_BASE_ADD)
1642 /* next character */
1647 EMERGENCY_PRINT_LOAD_GPR8
1648 EMERGENCY_PRINT_LOAD_GPR7
1649 EMERGENCY_PRINT_LOAD_GPR6
1650 EMERGENCY_PRINT_LOAD_GPR5
1651 EMERGENCY_PRINT_LOAD_GPR4
1657 * This should be used for debugging only.
1658 * It messes up the Linux early serial output
1659 * somehow, so use it sparingly and essentially
1660 * only if you need to debug something that goes wrong
1661 * before Linux gets the early serial going.
1663 * Furthermore, you'll have to make sure you set the
1664 * UART_DEVISOR correctly according to the system
1672 #define SYS_CLK 20000000
1673 //#define SYS_CLK 1843200
1674 #define OR32_CONSOLE_BAUD 115200
1675 #define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD)
1677 ENTRY(_early_uart_init)
1678 l.movhi r3,hi(UART_BASE_ADD)
1692 l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
1693 l.sb UART_DLM(r3),r4
1694 l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
1695 l.sb UART_DLL(r3),r4
1702 .global _secondary_evbar
1706 /* Just disable interrupts and Return */
1707 l.ori r3,r0,SPR_SR_SM
1708 l.mtspr r0,r3,SPR_ESR_BASE
1713 _string_unhandled_exception:
1714 .string "\n\rRunarunaround: Unhandled exception 0x\0"
1717 .string ": EPC=0x\0"
1723 /* ========================================[ page aligned structures ]=== */
1726 * .data section should be page aligned
1727 * (look into arch/openrisc/kernel/vmlinux.lds.S)
1731 .global empty_zero_page
1735 .global swapper_pg_dir
1739 .global _unhandled_stack
1742 _unhandled_stack_top:
1744 /* ============================================================[ EOF ]=== */