4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/linkage.h>
19 #include <linux/threads.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/serial_reg.h>
23 #include <asm/processor.h>
26 #include <asm/pgtable.h>
27 #include <asm/thread_info.h>
28 #include <asm/cache.h>
29 #include <asm/spr_defs.h>
30 #include <asm/asm-offsets.h>
31 #include <linux/of_fdt.h>
33 #define tophys(rd,rs) \
34 l.movhi rd,hi(-KERNELBASE) ;\
37 #define CLEAR_GPR(gpr) \
40 #define LOAD_SYMBOL_2_GPR(gpr,symbol) \
41 l.movhi gpr,hi(symbol) ;\
42 l.ori gpr,gpr,lo(symbol)
45 #define UART_BASE_ADD 0x90000000
47 #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
48 #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
50 /* ============================================[ tmp store locations ]=== */
52 #define SPR_SHADOW_GPR(x) ((x) + SPR_GPR_BASE + 32)
55 * emergency_print temporary stores
57 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
58 #define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14)
59 #define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14)
61 #define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15)
62 #define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15)
64 #define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16)
65 #define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16)
67 #define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7)
68 #define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7)
70 #define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8)
71 #define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8)
73 #define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9)
74 #define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9)
76 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
77 #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
78 #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
80 #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
81 #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
83 #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
84 #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
86 #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
87 #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
89 #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
90 #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
92 #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
93 #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
98 * TLB miss handlers temorary stores
100 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
101 #define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2)
102 #define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2)
104 #define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3)
105 #define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3)
107 #define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4)
108 #define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4)
110 #define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5)
111 #define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5)
113 #define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6)
114 #define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6)
116 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
117 #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
118 #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
120 #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
121 #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
123 #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
124 #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
126 #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
127 #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
129 #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
130 #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
135 * EXCEPTION_HANDLE temporary stores
138 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
139 #define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30)
140 #define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30)
142 #define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10)
143 #define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10)
145 #define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1)
146 #define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1)
148 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
149 #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
150 #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
152 #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
153 #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
155 #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
156 #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
159 /* =========================================================[ macros ]=== */
162 #define GET_CURRENT_PGD(reg,t1) \
163 LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
164 l.mfspr t1,r0,SPR_COREID ;\
170 #define GET_CURRENT_PGD(reg,t1) \
171 LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
176 /* Load r10 from current_thread_info_set - clobbers r1 and r30 */
178 #define GET_CURRENT_THREAD_INFO \
179 LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
181 l.mfspr r10,r0,SPR_COREID ;\
184 /* r10: current_thread_info */ ;\
187 #define GET_CURRENT_THREAD_INFO \
188 LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
190 /* r10: current_thread_info */ ;\
195 * DSCR: this is a common hook for handling exceptions. it will save
196 * the needed registers, set up stack and pointer to current
197 * then jump to the handler while enabling MMU
199 * PRMS: handler - a function to jump to. it has to save the
200 * remaining registers to kernel stack, call
201 * appropriate arch-independant exception handler
202 * and finaly jump to ret_from_except
204 * PREQ: unchanged state from the time exception happened
206 * POST: SAVED the following registers original value
207 * to the new created exception frame pointed to by r1
209 * r1 - ksp pointing to the new (exception) frame
210 * r4 - EEAR exception EA
211 * r10 - current pointing to current_thread_info struct
212 * r12 - syscall 0, since we didn't come from syscall
213 * r13 - temp it actually contains new SR, not needed anymore
214 * r31 - handler address of the handler we'll jump to
216 * handler has to save remaining registers to the exception
217 * ksp frame *before* tainting them!
219 * NOTE: this function is not reentrant per se. reentrancy is guaranteed
220 * by processor disabling all exceptions/interrupts when exception
223 * OPTM: no need to make it so wasteful to extract ksp when in user mode
226 #define EXCEPTION_HANDLE(handler) \
227 EXCEPTION_T_STORE_GPR30 ;\
228 l.mfspr r30,r0,SPR_ESR_BASE ;\
229 l.andi r30,r30,SPR_SR_SM ;\
231 EXCEPTION_T_STORE_GPR10 ;\
232 l.bnf 2f /* kernel_mode */ ;\
233 EXCEPTION_T_STORE_SP /* delay slot */ ;\
234 1: /* user_mode: */ ;\
235 GET_CURRENT_THREAD_INFO ;\
237 l.lwz r1,(TI_KSP)(r30) ;\
238 /* fall through */ ;\
239 2: /* kernel_mode: */ ;\
240 /* create new stack frame, save only needed gprs */ ;\
241 /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\
242 /* r12: temp, syscall indicator */ ;\
243 l.addi r1,r1,-(INT_FRAME_SIZE) ;\
244 /* r1 is KSP, r30 is __pa(KSP) */ ;\
246 l.sw PT_GPR12(r30),r12 ;\
247 l.mfspr r12,r0,SPR_EPCR_BASE ;\
248 l.sw PT_PC(r30),r12 ;\
249 l.mfspr r12,r0,SPR_ESR_BASE ;\
250 l.sw PT_SR(r30),r12 ;\
252 EXCEPTION_T_LOAD_GPR30(r12) ;\
253 l.sw PT_GPR30(r30),r12 ;\
254 /* save r10 as was prior to exception */ ;\
255 EXCEPTION_T_LOAD_GPR10(r12) ;\
256 l.sw PT_GPR10(r30),r12 ;\
257 /* save PT_SP as was prior to exception */ ;\
258 EXCEPTION_T_LOAD_SP(r12) ;\
259 l.sw PT_SP(r30),r12 ;\
260 /* save exception r4, set r4 = EA */ ;\
261 l.sw PT_GPR4(r30),r4 ;\
262 l.mfspr r4,r0,SPR_EEAR_BASE ;\
263 /* r12 == 1 if we come from syscall */ ;\
265 /* ----- turn on MMU ----- */ ;\
266 l.ori r30,r0,(EXCEPTION_SR) ;\
267 l.mtspr r0,r30,SPR_ESR_BASE ;\
268 /* r30: EA address of handler */ ;\
269 LOAD_SYMBOL_2_GPR(r30,handler) ;\
270 l.mtspr r0,r30,SPR_EPCR_BASE ;\
277 * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
278 * #define UNHANDLED_EXCEPTION(handler) \
280 * l.mtspr r0,r3,SPR_SR ;\
281 * l.movhi r3,hi(0xf0000100) ;\
282 * l.ori r3,r3,lo(0xf0000100) ;\
289 /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
290 * a bit more carefull (if we have a PT_SP or current pointer
291 * corruption) and set them up from 'current_set'
294 #define UNHANDLED_EXCEPTION(handler) \
295 EXCEPTION_T_STORE_GPR30 ;\
296 EXCEPTION_T_STORE_GPR10 ;\
297 EXCEPTION_T_STORE_SP ;\
298 /* temporary store r3, r9 into r1, r10 */ ;\
301 /* the string referenced by r3 must be low enough */ ;\
302 l.jal _emergency_print ;\
303 l.ori r3,r0,lo(_string_unhandled_exception) ;\
304 l.mfspr r3,r0,SPR_NPC ;\
305 l.jal _emergency_print_nr ;\
306 l.andi r3,r3,0x1f00 ;\
307 /* the string referenced by r3 must be low enough */ ;\
308 l.jal _emergency_print ;\
309 l.ori r3,r0,lo(_string_epc_prefix) ;\
310 l.jal _emergency_print_nr ;\
311 l.mfspr r3,r0,SPR_EPCR_BASE ;\
312 l.jal _emergency_print ;\
313 l.ori r3,r0,lo(_string_nl) ;\
314 /* end of printing */ ;\
317 /* extract current, ksp from current_set */ ;\
318 LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\
319 LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\
320 /* create new stack frame, save only needed gprs */ ;\
321 /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
322 /* r12: temp, syscall indicator, r13 temp */ ;\
323 l.addi r1,r1,-(INT_FRAME_SIZE) ;\
324 /* r1 is KSP, r30 is __pa(KSP) */ ;\
326 l.sw PT_GPR12(r30),r12 ;\
327 l.mfspr r12,r0,SPR_EPCR_BASE ;\
328 l.sw PT_PC(r30),r12 ;\
329 l.mfspr r12,r0,SPR_ESR_BASE ;\
330 l.sw PT_SR(r30),r12 ;\
332 EXCEPTION_T_LOAD_GPR30(r12) ;\
333 l.sw PT_GPR30(r30),r12 ;\
334 /* save r10 as was prior to exception */ ;\
335 EXCEPTION_T_LOAD_GPR10(r12) ;\
336 l.sw PT_GPR10(r30),r12 ;\
337 /* save PT_SP as was prior to exception */ ;\
338 EXCEPTION_T_LOAD_SP(r12) ;\
339 l.sw PT_SP(r30),r12 ;\
340 l.sw PT_GPR13(r30),r13 ;\
342 /* save exception r4, set r4 = EA */ ;\
343 l.sw PT_GPR4(r30),r4 ;\
344 l.mfspr r4,r0,SPR_EEAR_BASE ;\
345 /* r12 == 1 if we come from syscall */ ;\
347 /* ----- play a MMU trick ----- */ ;\
348 l.ori r30,r0,(EXCEPTION_SR) ;\
349 l.mtspr r0,r30,SPR_ESR_BASE ;\
350 /* r31: EA address of handler */ ;\
351 LOAD_SYMBOL_2_GPR(r30,handler) ;\
352 l.mtspr r0,r30,SPR_EPCR_BASE ;\
355 /* =====================================================[ exceptions] === */
357 /* ---[ 0x100: RESET exception ]----------------------------------------- */
359 /* Jump to .init code at _start which lives in the .head section
360 * and will be discarded after boot.
362 LOAD_SYMBOL_2_GPR(r15, _start)
363 tophys (r13,r15) /* MMU disabled */
367 /* ---[ 0x200: BUS exception ]------------------------------------------- */
370 EXCEPTION_HANDLE(_bus_fault_handler)
372 /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
374 _dispatch_do_dpage_fault:
375 // totaly disable timer interrupt
376 // l.mtspr r0,r0,SPR_TTMR
377 // DEBUG_TLB_PROBE(0x300)
378 // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
379 EXCEPTION_HANDLE(_data_page_fault_handler)
381 /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
383 _dispatch_do_ipage_fault:
384 // totaly disable timer interrupt
385 // l.mtspr r0,r0,SPR_TTMR
386 // DEBUG_TLB_PROBE(0x400)
387 // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
388 EXCEPTION_HANDLE(_insn_page_fault_handler)
390 /* ---[ 0x500: Timer exception ]----------------------------------------- */
392 EXCEPTION_HANDLE(_timer_handler)
394 /* ---[ 0x600: Alignment exception ]-------------------------------------- */
396 EXCEPTION_HANDLE(_alignment_handler)
398 /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
400 EXCEPTION_HANDLE(_illegal_instruction_handler)
402 /* ---[ 0x800: External interrupt exception ]---------------------------- */
404 EXCEPTION_HANDLE(_external_irq_handler)
406 /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
408 l.j boot_dtlb_miss_handler
411 /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
413 l.j boot_itlb_miss_handler
416 /* ---[ 0xb00: Range exception ]----------------------------------------- */
418 UNHANDLED_EXCEPTION(_vector_0xb00)
420 /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
422 EXCEPTION_HANDLE(_sys_call_handler)
424 /* ---[ 0xd00: Trap exception ]------------------------------------------ */
426 UNHANDLED_EXCEPTION(_vector_0xd00)
428 /* ---[ 0xe00: Trap exception ]------------------------------------------ */
430 // UNHANDLED_EXCEPTION(_vector_0xe00)
431 EXCEPTION_HANDLE(_trap_handler)
433 /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
435 UNHANDLED_EXCEPTION(_vector_0xf00)
437 /* ---[ 0x1000: Reserved exception ]------------------------------------- */
439 UNHANDLED_EXCEPTION(_vector_0x1000)
441 /* ---[ 0x1100: Reserved exception ]------------------------------------- */
443 UNHANDLED_EXCEPTION(_vector_0x1100)
445 /* ---[ 0x1200: Reserved exception ]------------------------------------- */
447 UNHANDLED_EXCEPTION(_vector_0x1200)
449 /* ---[ 0x1300: Reserved exception ]------------------------------------- */
451 UNHANDLED_EXCEPTION(_vector_0x1300)
453 /* ---[ 0x1400: Reserved exception ]------------------------------------- */
455 UNHANDLED_EXCEPTION(_vector_0x1400)
457 /* ---[ 0x1500: Reserved exception ]------------------------------------- */
459 UNHANDLED_EXCEPTION(_vector_0x1500)
461 /* ---[ 0x1600: Reserved exception ]------------------------------------- */
463 UNHANDLED_EXCEPTION(_vector_0x1600)
465 /* ---[ 0x1700: Reserved exception ]------------------------------------- */
467 UNHANDLED_EXCEPTION(_vector_0x1700)
469 /* ---[ 0x1800: Reserved exception ]------------------------------------- */
471 UNHANDLED_EXCEPTION(_vector_0x1800)
473 /* ---[ 0x1900: Reserved exception ]------------------------------------- */
475 UNHANDLED_EXCEPTION(_vector_0x1900)
477 /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
479 UNHANDLED_EXCEPTION(_vector_0x1a00)
481 /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
483 UNHANDLED_EXCEPTION(_vector_0x1b00)
485 /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
487 UNHANDLED_EXCEPTION(_vector_0x1c00)
489 /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
491 UNHANDLED_EXCEPTION(_vector_0x1d00)
493 /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
495 UNHANDLED_EXCEPTION(_vector_0x1e00)
497 /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
499 UNHANDLED_EXCEPTION(_vector_0x1f00)
502 /* ===================================================[ kernel start ]=== */
506 /* This early stuff belongs in HEAD, but some of the functions below definitely
512 /* Init r0 to zero as per spec */
515 /* save kernel parameters */
516 l.or r25,r0,r3 /* pointer to fdt */
519 * ensure a deterministic start
557 l.mfspr r26,r0,SPR_COREID
563 * set up initial ksp and current
565 /* setup kernel stack */
566 LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE)
567 LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current
575 * .data contains initialized data,
576 * .bss contains uninitialized data - clear it up
579 LOAD_SYMBOL_2_GPR(r24, __bss_start)
580 LOAD_SYMBOL_2_GPR(r26, _end)
603 /* The MMU needs to be enabled before or32_early_setup is called */
608 * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
610 l.mfspr r30,r0,SPR_SR
611 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
612 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
614 l.mtspr r0,r30,SPR_SR
632 // reset the simulation counters
635 /* check fdt header magic word */
636 l.lwz r3,0(r25) /* load magic from fdt into r3 */
637 l.movhi r4,hi(OF_DT_HEADER)
638 l.ori r4,r4,lo(OF_DT_HEADER)
642 /* magic number mismatch, set fdt pointer to null */
645 /* pass fdt pointer to or32_early_setup in r3 */
647 LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
653 * clear all GPRS to increase determinism
687 * jump to kernel entry (start_kernel)
689 LOAD_SYMBOL_2_GPR(r30, start_kernel)
695 * I N V A L I D A T E T L B e n t r i e s
697 LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
698 LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
699 l.addi r7,r0,128 /* Maximum number of sets */
715 /* Doze the cpu until we are asked to run */
716 /* If we dont have power management skip doze */
717 l.mfspr r25,r0,SPR_UPR
718 l.andi r25,r25,SPR_UPR_PMP
720 l.bf secondary_check_release
723 /* Setup special secondary exception handler */
724 LOAD_SYMBOL_2_GPR(r3, _secondary_evbar)
726 l.mtspr r0,r25,SPR_EVBAR
728 /* Enable Interrupts */
729 l.mfspr r25,r0,SPR_SR
730 l.ori r25,r25,SPR_SR_IEE
731 l.mtspr r0,r25,SPR_SR
733 /* Unmask interrupts interrupts */
734 l.mfspr r25,r0,SPR_PICMR
736 l.mtspr r0,r25,SPR_PICMR
739 l.mfspr r25,r0,SPR_PMR
740 LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME)
742 l.mtspr r0,r25,SPR_PMR
744 /* Wakeup - Restore exception handler */
745 l.mtspr r0,r0,SPR_EVBAR
747 secondary_check_release:
749 * Check if we actually got the release signal, if not go-back to
752 l.mfspr r25,r0,SPR_COREID
753 LOAD_SYMBOL_2_GPR(r3, secondary_release)
759 /* fall through to secondary_init */
763 * set up initial ksp and current
765 LOAD_SYMBOL_2_GPR(r10, secondary_thread_info)
768 l.addi r1,r10,THREAD_SIZE
784 l.mfspr r30,r0,SPR_SR
785 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
786 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
789 * This is a bit tricky, we need to switch over from physical addresses
790 * to virtual addresses on the fly.
791 * To do that, we first set up ESR with the IME and DME bits set.
792 * Then EPCR is set to secondary_start and then a l.rfe is issued to
795 l.mtspr r0,r30,SPR_ESR_BASE
796 LOAD_SYMBOL_2_GPR(r30, secondary_start)
797 l.mtspr r0,r30,SPR_EPCR_BASE
801 LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel)
807 /* ========================================[ cache ]=== */
809 /* alignment here so we don't change memory offsets with
810 * memory controller defined
815 /* Check if IC present and skip enabling otherwise */
816 l.mfspr r24,r0,SPR_UPR
817 l.andi r26,r24,SPR_UPR_ICP
825 l.xori r5,r5,SPR_SR_ICE
829 /* Establish cache block size
832 r14 contain block size
834 l.mfspr r24,r0,SPR_ICCFGR
835 l.andi r26,r24,SPR_ICCFGR_CBS
840 /* Establish number of cache sets
841 r16 contains number of cache sets
842 r28 contains log(# of cache sets)
844 l.andi r26,r24,SPR_ICCFGR_NCS
854 // l.addi r5,r0,IC_SIZE
856 l.mtspr r0,r6,SPR_ICBIR
860 // l.addi r6,r6,IC_LINE
864 l.ori r6,r6,SPR_SR_ICE
881 /* Check if DC present and skip enabling otherwise */
882 l.mfspr r24,r0,SPR_UPR
883 l.andi r26,r24,SPR_UPR_DCP
891 l.xori r5,r5,SPR_SR_DCE
895 /* Establish cache block size
898 r14 contain block size
900 l.mfspr r24,r0,SPR_DCCFGR
901 l.andi r26,r24,SPR_DCCFGR_CBS
906 /* Establish number of cache sets
907 r16 contains number of cache sets
908 r28 contains log(# of cache sets)
910 l.andi r26,r24,SPR_DCCFGR_NCS
919 l.mtspr r0,r6,SPR_DCBIR
926 l.ori r6,r6,SPR_SR_DCE
932 /* ===============================================[ page table masks ]=== */
934 #define DTLB_UP_CONVERT_MASK 0x3fa
935 #define ITLB_UP_CONVERT_MASK 0x3a
937 /* for SMP we'd have (this is a bit subtle, CC must be always set
938 * for SMP, but since we have _PAGE_PRESENT bit always defined
939 * we can just modify the mask)
941 #define DTLB_SMP_CONVERT_MASK 0x3fb
942 #define ITLB_SMP_CONVERT_MASK 0x3b
944 /* ---[ boot dtlb miss handler ]----------------------------------------- */
946 boot_dtlb_miss_handler:
948 /* mask for DTLB_MR register: - (0) sets V (valid) bit,
949 * - (31-12) sets bits belonging to VPN (31-12)
951 #define DTLB_MR_MASK 0xfffff001
953 /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
954 * - (4) sets A (access) bit,
955 * - (5) sets D (dirty) bit,
956 * - (8) sets SRE (superuser read) bit
957 * - (9) sets SWE (superuser write) bit
958 * - (31-12) sets bits belonging to VPN (31-12)
960 #define DTLB_TR_MASK 0xfffff332
962 /* These are for masking out the VPN/PPN value from the MR/TR registers...
963 * it's not the same as the PFN */
964 #define VPN_MASK 0xfffff000
965 #define PPN_MASK 0xfffff000
971 l.mfspr r6,r0,SPR_ESR_BASE //
972 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
973 l.sfeqi r6,0 // r6 == 0x1 --> SM
974 l.bf exit_with_no_dtranslation //
978 /* this could be optimized by moving storing of
979 * non r6 registers here, and jumping r6 restore
980 * if not in supervisor mode
988 l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
990 immediate_translation:
993 l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
995 l.mfspr r6, r0, SPR_DMMUCFGR
996 l.andi r6, r6, SPR_DMMUCFGR_NTS
997 l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
999 l.sll r5, r5, r6 // r5 = number DMMU sets
1000 l.addi r6, r5, -1 // r6 = nsets mask
1001 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
1003 l.or r6,r6,r4 // r6 <- r4
1004 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
1005 l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
1006 l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
1007 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
1008 l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
1010 /* set up DTLB with no translation for EA <= 0xbfffffff */
1011 LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
1012 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
1014 l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
1016 tophys(r3,r4) // r3 <- PA
1018 l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
1019 l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
1020 l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
1021 l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
1022 l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
1030 l.rfe // SR <- ESR, PC <- EPC
1032 exit_with_no_dtranslation:
1033 /* EA out of memory or not in supervisor mode */
1036 l.j _dispatch_bus_fault
1038 /* ---[ boot itlb miss handler ]----------------------------------------- */
1040 boot_itlb_miss_handler:
1042 /* mask for ITLB_MR register: - sets V (valid) bit,
1043 * - sets bits belonging to VPN (15-12)
1045 #define ITLB_MR_MASK 0xfffff001
1047 /* mask for ITLB_TR register: - sets A (access) bit,
1048 * - sets SXE (superuser execute) bit
1049 * - sets bits belonging to VPN (15-12)
1051 #define ITLB_TR_MASK 0xfffff050
1054 #define VPN_MASK 0xffffe000
1055 #define PPN_MASK 0xffffe000
1060 EXCEPTION_STORE_GPR2
1061 EXCEPTION_STORE_GPR3
1062 EXCEPTION_STORE_GPR4
1063 EXCEPTION_STORE_GPR5
1064 EXCEPTION_STORE_GPR6
1067 l.mfspr r6,r0,SPR_ESR_BASE //
1068 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
1069 l.sfeqi r6,0 // r6 == 0x1 --> SM
1070 l.bf exit_with_no_itranslation
1075 l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
1080 l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
1082 l.mfspr r6, r0, SPR_IMMUCFGR
1083 l.andi r6, r6, SPR_IMMUCFGR_NTS
1084 l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
1086 l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
1087 l.addi r6, r5, -1 // r6 = nsets mask
1088 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
1090 l.or r6,r6,r4 // r6 <- r4
1091 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
1092 l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
1093 l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
1094 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
1095 l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
1098 * set up ITLB with no translation for EA <= 0x0fffffff
1100 * we need this for head.S mapping (EA = PA). if we move all functions
1101 * which run with mmu enabled into entry.S, we might be able to eliminate this.
1104 LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
1105 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
1107 l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
1109 tophys(r3,r4) // r3 <- PA
1111 l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
1112 l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
1113 l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
1114 l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
1115 l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
1123 l.rfe // SR <- ESR, PC <- EPC
1125 exit_with_no_itranslation:
1128 l.j _dispatch_bus_fault
1131 /* ====================================================================== */
1133 * Stuff below here shouldn't go into .head section... maybe this stuff
1134 * can be moved to entry.S ???
1137 /* ==============================================[ DTLB miss handler ]=== */
1141 * Exception handlers are entered with MMU off so the following handler
1142 * needs to use physical addressing
1147 ENTRY(dtlb_miss_handler)
1148 EXCEPTION_STORE_GPR2
1149 EXCEPTION_STORE_GPR3
1150 EXCEPTION_STORE_GPR4
1152 * get EA of the miss
1154 l.mfspr r2,r0,SPR_EEAR_BASE
1156 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1158 GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r4 is temp
1159 l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1160 l.slli r4,r4,0x2 // to get address << 2
1161 l.add r3,r4,r3 // r4 is pgd_index(daddr)
1163 * if (pmd_none(*pmd))
1167 l.lwz r3,0x0(r4) // get *pmd value
1170 l.addi r3,r0,0xffffe000 // PAGE_MASK
1174 * pte = *pte_offset(pmd, daddr);
1176 l.lwz r4,0x0(r4) // get **pmd value
1177 l.and r4,r4,r3 // & PAGE_MASK
1178 l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
1179 l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
1180 l.slli r3,r3,0x2 // to get address << 2
1182 l.lwz r3,0x0(r3) // this is pte at last
1184 * if (!pte_present(pte))
1187 l.sfne r4,r0 // is pte present
1188 l.bnf d_pte_not_present
1189 l.addi r4,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
1191 * fill DTLB TR register
1193 l.and r4,r3,r4 // apply the mask
1194 // Determine number of DMMU sets
1195 l.mfspr r2, r0, SPR_DMMUCFGR
1196 l.andi r2, r2, SPR_DMMUCFGR_NTS
1197 l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF
1199 l.sll r3, r3, r2 // r3 = number DMMU sets DMMUCFGR
1200 l.addi r2, r3, -1 // r2 = nsets mask
1201 l.mfspr r3, r0, SPR_EEAR_BASE
1202 l.srli r3, r3, 0xd // >> PAGE_SHIFT
1203 l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
1205 l.mtspr r2,r4,SPR_DTLBTR_BASE(0)
1207 * fill DTLB MR register
1209 l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
1210 l.ori r4,r3,0x1 // set hardware valid bit: DTBL_MR entry
1211 l.mtspr r2,r4,SPR_DTLBMR_BASE(0)
1222 EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
1224 /* ==============================================[ ITLB miss handler ]=== */
1225 ENTRY(itlb_miss_handler)
1226 EXCEPTION_STORE_GPR2
1227 EXCEPTION_STORE_GPR3
1228 EXCEPTION_STORE_GPR4
1230 * get EA of the miss
1232 l.mfspr r2,r0,SPR_EEAR_BASE
1235 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1238 GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r5 is temp
1239 l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1240 l.slli r4,r4,0x2 // to get address << 2
1241 l.add r3,r4,r3 // r4 is pgd_index(daddr)
1243 * if (pmd_none(*pmd))
1247 l.lwz r3,0x0(r4) // get *pmd value
1250 l.addi r3,r0,0xffffe000 // PAGE_MASK
1254 * pte = *pte_offset(pmd, iaddr);
1257 l.lwz r4,0x0(r4) // get **pmd value
1258 l.and r4,r4,r3 // & PAGE_MASK
1259 l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
1260 l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
1261 l.slli r3,r3,0x2 // to get address << 2
1263 l.lwz r3,0x0(r3) // this is pte at last
1265 * if (!pte_present(pte))
1269 l.sfne r4,r0 // is pte present
1270 l.bnf i_pte_not_present
1271 l.addi r4,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
1273 * fill ITLB TR register
1275 l.and r4,r3,r4 // apply the mask
1276 l.andi r3,r3,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
1278 l.bf itlb_tr_fill //_workaround
1279 // Determine number of IMMU sets
1280 l.mfspr r2, r0, SPR_IMMUCFGR
1281 l.andi r2, r2, SPR_IMMUCFGR_NTS
1282 l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF
1284 l.sll r3, r3, r2 // r3 = number IMMU sets IMMUCFGR
1285 l.addi r2, r3, -1 // r2 = nsets mask
1286 l.mfspr r3, r0, SPR_EEAR_BASE
1287 l.srli r3, r3, 0xd // >> PAGE_SHIFT
1288 l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
1292 * we should not just blindly set executable flags,
1293 * but it does help with ping. the clean way would be to find out
1294 * (and fix it) why stack doesn't have execution permissions
1297 itlb_tr_fill_workaround:
1298 l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
1300 l.mtspr r2,r4,SPR_ITLBTR_BASE(0)
1302 * fill DTLB MR register
1304 l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
1305 l.ori r4,r3,0x1 // set hardware valid bit: ITBL_MR entry
1306 l.mtspr r2,r4,SPR_ITLBMR_BASE(0)
1318 EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
1320 /* ==============================================[ boot tlb handlers ]=== */
1323 /* =================================================[ debugging aids ]=== */
1328 _immu_trampoline_top:
1330 #define TRAMP_SLOT_0 (0x0)
1331 #define TRAMP_SLOT_1 (0x4)
1332 #define TRAMP_SLOT_2 (0x8)
1333 #define TRAMP_SLOT_3 (0xc)
1334 #define TRAMP_SLOT_4 (0x10)
1335 #define TRAMP_SLOT_5 (0x14)
1336 #define TRAMP_FRAME_SIZE (0x18)
1338 ENTRY(_immu_trampoline_workaround)
1340 // r6 is physical EEA
1343 LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1344 tophys (r3,r5) // r3 is trampoline (physical)
1346 LOAD_SYMBOL_2_GPR(r4,0x15000000)
1347 l.sw TRAMP_SLOT_0(r3),r4
1348 l.sw TRAMP_SLOT_1(r3),r4
1349 l.sw TRAMP_SLOT_4(r3),r4
1350 l.sw TRAMP_SLOT_5(r3),r4
1353 l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
1354 l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
1355 l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
1356 l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
1358 l.srli r5,r4,26 // check opcode for write access
1361 l.sfeqi r5,0x11 // l.jr
1363 l.sfeqi r5,1 // l.jal
1365 l.sfeqi r5,0x12 // l.jalr
1367 l.sfeqi r5,3 // l.bnf
1369 l.sfeqi r5,4 // l.bf
1373 l.j 99b // should never happen
1377 // r3 is trampoline address (physical)
1378 // r4 is instruction
1379 // r6 is physical(EEA)
1385 /* 19 20 aa aa l.movhi r9,0xaaaa
1386 * a9 29 bb bb l.ori r9,0xbbbb
1388 * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1391 l.addi r6,r2,0x4 // this is 0xaaaabbbb
1393 // l.movhi r9,0xaaaa
1394 l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
1395 l.sh (TRAMP_SLOT_0+0x0)(r3),r5
1397 l.sh (TRAMP_SLOT_0+0x2)(r3),r5
1400 l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
1401 l.sh (TRAMP_SLOT_1+0x0)(r3),r5
1403 l.sh (TRAMP_SLOT_1+0x2)(r3),r5
1405 /* falthrough, need to set up new jump offset */
1409 l.slli r6,r4,6 // original offset shifted left 6 - 2
1410 // l.srli r6,r6,6 // original offset shifted right 2
1412 l.slli r4,r2,4 // old jump position: EEA shifted left 4
1413 // l.srli r4,r4,6 // old jump position: shifted right 2
1415 l.addi r5,r3,0xc // new jump position (physical)
1416 l.slli r5,r5,4 // new jump position: shifted left 4
1418 // calculate new jump offset
1419 // new_off = old_off + (old_jump - new_jump)
1421 l.sub r5,r4,r5 // old_jump - new_jump
1422 l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
1423 l.srli r5,r5,6 // new offset shifted right 2
1425 // r5 is new jump offset
1426 // l.j has opcode 0x0...
1427 l.sw TRAMP_SLOT_2(r3),r5 // write it back
1432 /* ----------------------------- */
1436 /* 19 20 aa aa l.movhi r9,0xaaaa
1437 * a9 29 bb bb l.ori r9,0xbbbb
1439 * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1442 l.addi r6,r2,0x4 // this is 0xaaaabbbb
1444 // l.movhi r9,0xaaaa
1445 l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
1446 l.sh (TRAMP_SLOT_0+0x0)(r3),r5
1448 l.sh (TRAMP_SLOT_0+0x2)(r3),r5
1451 l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
1452 l.sh (TRAMP_SLOT_1+0x0)(r3),r5
1454 l.sh (TRAMP_SLOT_1+0x2)(r3),r5
1456 l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
1457 l.andi r5,r5,0x3ff // clear out opcode part
1458 l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
1459 l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
1467 /* ----------------------------- */
1471 l.slli r6,r4,6 // original offset shifted left 6 - 2
1472 // l.srli r6,r6,6 // original offset shifted right 2
1474 l.slli r4,r2,4 // old jump position: EEA shifted left 4
1475 // l.srli r4,r4,6 // old jump position: shifted right 2
1477 l.addi r5,r3,0xc // new jump position (physical)
1478 l.slli r5,r5,4 // new jump position: shifted left 4
1480 // calculate new jump offset
1481 // new_off = old_off + (old_jump - new_jump)
1483 l.add r6,r6,r4 // (orig_off + old_jump)
1484 l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
1485 l.srli r6,r6,6 // new offset shifted right 2
1487 // r6 is new jump offset
1488 l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
1490 l.andi r4,r4,0xfc00 // get opcode part
1492 l.or r6,r4,r6 // l.b(n)f new offset
1493 l.sw TRAMP_SLOT_2(r3),r6 // write it back
1495 /* we need to add l.j to EEA + 0x8 */
1496 tophys (r4,r2) // may not be needed (due to shifts down_
1497 l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
1498 // jump position = r5 + 0x8 (0x8 compensated)
1499 l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
1501 l.slli r4,r4,4 // the amount of info in imediate of jump
1502 l.srli r4,r4,6 // jump instruction with offset
1503 l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
1508 // set up new EPC to point to our trampoline code
1509 LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1510 l.mtspr r0,r5,SPR_EPCR_BASE
1512 // immu_trampoline is (4x) CACHE_LINE aligned
1513 // and only 6 instructions long,
1514 // so we need to invalidate only 2 lines
1516 /* Establish cache block size
1519 r14 contain block size
1521 l.mfspr r21,r0,SPR_ICCFGR
1522 l.andi r21,r21,SPR_ICCFGR_CBS
1527 l.mtspr r0,r5,SPR_ICBIR
1529 l.mtspr r0,r5,SPR_ICBIR
1536 * DSCR: prints a string referenced by r3.
1538 * PRMS: r3 - address of the first character of null
1539 * terminated string to be printed
1541 * PREQ: UART at UART_BASE_ADD has to be initialized
1543 * POST: caller should be aware that r3, r9 are changed
1545 ENTRY(_emergency_print)
1546 EMERGENCY_PRINT_STORE_GPR4
1547 EMERGENCY_PRINT_STORE_GPR5
1548 EMERGENCY_PRINT_STORE_GPR6
1549 EMERGENCY_PRINT_STORE_GPR7
1557 l.movhi r4,hi(UART_BASE_ADD)
1575 /* next character */
1580 EMERGENCY_PRINT_LOAD_GPR7
1581 EMERGENCY_PRINT_LOAD_GPR6
1582 EMERGENCY_PRINT_LOAD_GPR5
1583 EMERGENCY_PRINT_LOAD_GPR4
1587 ENTRY(_emergency_print_nr)
1588 EMERGENCY_PRINT_STORE_GPR4
1589 EMERGENCY_PRINT_STORE_GPR5
1590 EMERGENCY_PRINT_STORE_GPR6
1591 EMERGENCY_PRINT_STORE_GPR7
1592 EMERGENCY_PRINT_STORE_GPR8
1594 l.addi r8,r0,32 // shift register
1596 1: /* remove leading zeros */
1601 /* don't skip the last zero if number == 0x0 */
1625 l.movhi r4,hi(UART_BASE_ADD)
1643 /* next character */
1648 EMERGENCY_PRINT_LOAD_GPR8
1649 EMERGENCY_PRINT_LOAD_GPR7
1650 EMERGENCY_PRINT_LOAD_GPR6
1651 EMERGENCY_PRINT_LOAD_GPR5
1652 EMERGENCY_PRINT_LOAD_GPR4
1658 * This should be used for debugging only.
1659 * It messes up the Linux early serial output
1660 * somehow, so use it sparingly and essentially
1661 * only if you need to debug something that goes wrong
1662 * before Linux gets the early serial going.
1664 * Furthermore, you'll have to make sure you set the
1665 * UART_DEVISOR correctly according to the system
1673 #define SYS_CLK 20000000
1674 //#define SYS_CLK 1843200
1675 #define OR32_CONSOLE_BAUD 115200
1676 #define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD)
1678 ENTRY(_early_uart_init)
1679 l.movhi r3,hi(UART_BASE_ADD)
1693 l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
1694 l.sb UART_DLM(r3),r4
1695 l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
1696 l.sb UART_DLL(r3),r4
1703 .global _secondary_evbar
1707 /* Just disable interrupts and Return */
1708 l.ori r3,r0,SPR_SR_SM
1709 l.mtspr r0,r3,SPR_ESR_BASE
1714 _string_unhandled_exception:
1715 .string "\n\rRunarunaround: Unhandled exception 0x\0"
1718 .string ": EPC=0x\0"
1724 /* ========================================[ page aligned structures ]=== */
1727 * .data section should be page aligned
1728 * (look into arch/or32/kernel/vmlinux.lds)
1732 .global empty_zero_page
1736 .global swapper_pg_dir
1740 .global _unhandled_stack
1743 _unhandled_stack_top:
1745 /* ============================================================[ EOF ]=== */