2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
14 #include <linux/errno.h>
17 #include <asm/hardirq.h>
21 #include <asm/pgtable.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
24 #include <asm/thread_info.h>
25 #include <asm/unistd.h>
28 # define preempt_stop mask_interrupts
31 # define fault_resume_kernel fault_restore_all
34 #define __MASK(x) ((1 << (x)) - 1)
35 #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
38 .section .ex.text,"ax",@progbits
45 bral do_bus_error_write
47 bral do_bus_error_read
51 bral handle_address_fault
53 bral handle_protection_fault
57 bral do_illegal_opcode_ll
59 bral do_illegal_opcode_ll
61 bral do_illegal_opcode_ll
65 bral do_illegal_opcode_ll
67 bral handle_address_fault
69 bral handle_address_fault
71 bral handle_protection_fault
73 bral handle_protection_fault
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
83 #define tlbmiss_save pushm r0-r3
84 #define tlbmiss_restore popm r0-r3
86 .section .tlbx.ex.text,"ax",@progbits
92 .section .tlbr.ex.text,"ax",@progbits
97 .section .tlbw.ex.text,"ax",@progbits
101 .global tlb_miss_common
103 mfsr r0, SYSREG_TLBEAR
106 /* Is it the vmalloc space? */
108 brcs handle_vmalloc_miss
110 /* First level lookup */
112 lsr r2, r0, PGDIR_SHIFT
114 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
115 bld r3, _PAGE_BIT_PRESENT
116 brcc page_table_not_present
118 /* Translate to virtual address in P1. */
122 /* Second level lookup */
124 mfsr r0, SYSREG_TLBARLO
125 bld r2, _PAGE_BIT_PRESENT
126 brcc page_not_present
128 /* Mark the page as accessed */
129 sbr r2, _PAGE_BIT_ACCESSED
132 /* Drop software flags */
133 andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
134 mtsr SYSREG_TLBELO, r2
136 /* Figure out which entry we want to replace */
137 mfsr r1, SYSREG_MMUCR
140 mov r3, -1 /* All entries have been accessed, */
141 mov r2, 0 /* so start at 0 */
142 mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
144 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
145 mtsr SYSREG_MMUCR, r1
152 /* Simply do the lookup in init's page table */
153 mov r1, lo(swapper_pg_dir)
154 orh r1, hi(swapper_pg_dir)
158 /* --- System Call --- */
160 .section .scall.text,"ax",@progbits
162 #ifdef CONFIG_PREEMPT
165 pushm r12 /* r12_orig */
168 mfsr r0, SYSREG_RAR_SUP
169 mfsr r1, SYSREG_RSR_SUP
170 #ifdef CONFIG_PREEMPT
176 /* check for syscall tracing */
178 ld.w r1, r0[TI_flags]
179 bld r1, TIF_SYSCALL_TRACE
180 brcs syscall_trace_enter
186 lddpc lr, syscall_table_addr
188 mov r8, r5 /* 5th argument (6th is pushed by stub) */
191 .global syscall_return
194 mask_interrupts /* make sure we don't miss an interrupt
195 setting need_resched or sigpending
196 between sampling and the rets */
198 /* Store the return value so that the correct value is loaded below */
199 stdsp sp[REG_R12], r12
201 ld.w r1, r0[TI_flags]
202 andl r1, _TIF_ALLWORK_MASK, COH
203 brne syscall_exit_work
207 mtsr SYSREG_RAR_SUP, r8
208 mtsr SYSREG_RSR_SUP, r9
210 sub sp, -4 /* r12_orig */
221 .global ret_from_fork
225 /* check for syscall tracing */
227 ld.w r1, r0[TI_flags]
228 andl r1, _TIF_ALLWORK_MASK, COH
229 brne syscall_exit_work
230 rjmp syscall_exit_cont
236 rjmp syscall_trace_cont
239 bld r1, TIF_SYSCALL_TRACE
244 ld.w r1, r0[TI_flags]
246 1: bld r1, TIF_NEED_RESCHED
251 ld.w r1, r0[TI_flags]
254 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
260 rcall do_notify_resume
262 ld.w r1, r0[TI_flags]
265 3: bld r1, TIF_BREAKPOINT
266 brcc syscall_exit_cont
267 mfsr r3, SYSREG_TLBEHI
273 mtdr DBGREG_BWA2A, r2
274 mtdr DBGREG_BWC2A, r3
275 rjmp syscall_exit_cont
278 /* The slow path of the TLB miss handler */
279 page_table_not_present:
284 rcall save_full_context_ex
288 rjmp ret_from_exception
290 /* This function expects to find offending PC in SYSREG_RAR_EX */
291 save_full_context_ex:
292 mfsr r8, SYSREG_RSR_EX
294 andh r8, (MODE_MASK >> 16), COH
295 mfsr r11, SYSREG_RAR_EX
298 1: pushm r11, r12 /* PC and SR */
302 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
303 stdsp sp[4], r10 /* replace saved SP */
306 /* Low-level exception handlers */
310 rcall save_full_context_ex
313 rcall do_critical_exception
315 /* We should never get here... */
317 sub r12, pc, (. - 1f)
320 1: .asciz "Return from critical exception!"
326 rcall save_full_context_ex
333 rcall save_full_context_ex
335 1: mfsr r12, SYSREG_BEAR
338 rjmp ret_from_exception
344 mfsr r9, SYSREG_RSR_NMI
345 mfsr r8, SYSREG_RAR_NMI
346 bfextu r0, r9, MODE_SHIFT, 3
349 1: pushm r8, r9 /* PC and SR */
354 mtsr SYSREG_RAR_NMI, r8
356 mtsr SYSREG_RSR_NMI, r9
360 sub sp, -4 /* skip r12_orig */
363 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
364 stdsp sp[4], r10 /* replace saved SP */
368 sub sp, -4 /* skip sp */
370 sub sp, -4 /* skip r12_orig */
373 handle_address_fault:
376 rcall save_full_context_ex
379 rcall do_address_exception
380 rjmp ret_from_exception
382 handle_protection_fault:
385 rcall save_full_context_ex
389 rjmp ret_from_exception
392 do_illegal_opcode_ll:
395 rcall save_full_context_ex
398 rcall do_illegal_opcode
399 rjmp ret_from_exception
403 mfsr r1, SYSREG_TLBEAR
405 lsr r2, r1, PGDIR_SHIFT
407 lsl r1, (32 - PGDIR_SHIFT)
408 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
410 /* Translate to virtual address in P1 */
415 sbr r3, _PAGE_BIT_DIRTY
419 /* The page table is up-to-date. Update the TLB entry as well */
420 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
421 mtsr SYSREG_TLBELO, r0
423 /* MMUCR[DRP] is updated automatically, so let's go... */
432 rcall save_full_context_ex
437 rjmp ret_from_exception
442 andh r4, (MODE_MASK >> 16), COH
443 brne fault_resume_kernel
446 ld.w r1, r0[TI_flags]
447 andl r1, _TIF_WORK_MASK, COH
453 mtsr SYSREG_RAR_EX, r8
454 mtsr SYSREG_RSR_EX, r9
460 #ifdef CONFIG_PREEMPT
462 ld.w r2, r0[TI_preempt_count]
465 ld.w r1, r0[TI_flags]
466 bld r1, TIF_NEED_RESCHED
469 bld r4, SYSREG_GM_OFFSET
471 rcall preempt_schedule_irq
478 mtsr SYSREG_RAR_EX, r8
479 mtsr SYSREG_RSR_EX, r9
481 sub sp, -4 /* ignore SP */
483 sub sp, -4 /* ignore r12_orig */
487 /* Switch to exception mode so that we can share the same code. */
489 cbr r8, SYSREG_M0_OFFSET
490 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
494 ld.w r1, r0[TI_flags]
497 bld r1, TIF_NEED_RESCHED
502 ld.w r1, r0[TI_flags]
505 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
511 rcall do_notify_resume
513 ld.w r1, r0[TI_flags]
516 2: bld r1, TIF_BREAKPOINT
517 brcc fault_resume_user
518 mfsr r3, SYSREG_TLBEHI
524 mtdr DBGREG_BWA2A, r2
525 mtdr DBGREG_BWC2A, r3
526 rjmp fault_resume_user
528 /* If we get a debug trap from privileged context we end up here */
530 /* Fix up LR and SP in regs. r11 contains the mode we came from */
533 andh r8, hi(~MODE_MASK)
540 sub r10, sp, -FRAME_SIZE_FULL
541 stdsp sp[REG_SP], r10
545 /* Now, put everything back */
548 mtsr SYSREG_RAR_DBG, r10
549 mtsr SYSREG_RSR_DBG, r11
552 andh r8, hi(~MODE_MASK)
553 andh r11, hi(MODE_MASK)
560 sub sp, -4 /* skip SP */
566 * At this point, everything is masked, that is, interrupts,
567 * exceptions and debugging traps. We might get called from
568 * interrupt or exception context in some rare cases, but this
569 * will be taken care of by do_debug(), so we're not going to
570 * do a 100% correct context save here.
573 sub sp, 4 /* r12_orig */
575 mfsr r10, SYSREG_RAR_DBG
576 mfsr r11, SYSREG_RSR_DBG
579 andh r11, (MODE_MASK >> 16), COH
580 brne handle_debug_priv
585 lddsp r10, sp[REG_SR]
586 andh r10, (MODE_MASK >> 16), COH
587 breq debug_resume_user
592 mtsr SYSREG_RSR_DBG, r11
593 mtsr SYSREG_RAR_DBG, r10
602 ld.w r1, r0[TI_flags]
603 andl r1, _TIF_DBGWORK_MASK, COH
604 breq debug_restore_all
606 1: bld r1, TIF_NEED_RESCHED
611 ld.w r1, r0[TI_flags]
614 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
620 rcall do_notify_resume
622 ld.w r1, r0[TI_flags]
625 3: bld r1, TIF_SINGLE_STEP
626 brcc debug_restore_all
630 rjmp debug_restore_all
632 .set rsr_int0, SYSREG_RSR_INT0
633 .set rsr_int1, SYSREG_RSR_INT1
634 .set rsr_int2, SYSREG_RSR_INT2
635 .set rsr_int3, SYSREG_RSR_INT3
636 .set rar_int0, SYSREG_RAR_INT0
637 .set rar_int1, SYSREG_RAR_INT1
638 .set rar_int2, SYSREG_RAR_INT2
639 .set rar_int3, SYSREG_RAR_INT3
641 .macro IRQ_LEVEL level
642 .type irq_level\level, @function
644 sub sp, 4 /* r12_orig */
646 mfsr r8, rar_int\level
647 mfsr r9, rsr_int\level
649 #ifdef CONFIG_PREEMPT
650 sub r11, pc, (. - system_call)
663 bfextu r4, r4, SYSREG_M0_OFFSET, 3
664 cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
666 cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
667 #ifdef CONFIG_PREEMPT
674 ld.w r1, r0[TI_flags]
675 andl r1, _TIF_WORK_MASK, COH
679 mtsr rar_int\level, r8
680 mtsr rsr_int\level, r9
682 sub sp, -4 /* ignore r12_orig */
685 #ifdef CONFIG_PREEMPT
687 mfsr r8, rsr_int\level
689 mtsr rsr_int\level, r8
691 sub sp, -4 /* ignore r12_orig */
695 2: get_thread_info r0
696 ld.w r1, r0[TI_flags]
697 bld r1, TIF_CPU_GOING_TO_SLEEP
698 #ifdef CONFIG_PREEMPT
703 sub r1, pc, . - cpu_idle_skip_sleep
705 #ifdef CONFIG_PREEMPT
706 3: get_thread_info r0
707 ld.w r2, r0[TI_preempt_count]
710 ld.w r1, r0[TI_flags]
711 bld r1, TIF_NEED_RESCHED
714 bld r4, SYSREG_GM_OFFSET
716 rcall preempt_schedule_irq
721 .section .irq.text,"ax",@progbits
723 .global cpu_idle_sleep
727 ld.w r9, r8[TI_flags]
728 bld r9, TIF_NEED_RESCHED
729 brcs cpu_idle_enable_int_and_exit
730 sbr r9, TIF_CPU_GOING_TO_SLEEP
731 st.w r8[TI_flags], r9
736 ld.w r9, r8[TI_flags]
737 cbr r9, TIF_CPU_GOING_TO_SLEEP
738 st.w r8[TI_flags], r9
739 cpu_idle_enable_int_and_exit: