2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2008 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/perf_counter.h>
19 #include <asm/io_trapped.h>
20 #include <asm/system.h>
21 #include <asm/mmu_context.h>
22 #include <asm/tlbflush.h>
24 static inline int notify_page_fault(struct pt_regs
*regs
, int trap
)
29 if (!user_mode(regs
)) {
31 if (kprobe_running() && kprobe_fault_handler(regs
, trap
))
41 * This routine handles page faults. It determines the address,
42 * and the problem, and then passes it off to one of the appropriate
45 asmlinkage
void __kprobes
do_page_fault(struct pt_regs
*regs
,
46 unsigned long writeaccess
,
47 unsigned long address
)
49 struct task_struct
*tsk
;
51 struct vm_area_struct
* vma
;
57 * We don't bother with any notifier callbacks here, as they are
58 * all handled through the __do_page_fault() fast-path.
62 si_code
= SEGV_MAPERR
;
64 if (unlikely(address
>= TASK_SIZE
)) {
66 * Synchronize this task's top level page-table
67 * with the 'reference' page table.
69 * Do _not_ use "tsk" here. We might be inside
70 * an interrupt in the middle of a task switch..
72 int offset
= pgd_index(address
);
77 pgd
= get_TTB() + offset
;
78 pgd_k
= swapper_pg_dir
+ offset
;
80 if (!pgd_present(*pgd
)) {
81 if (!pgd_present(*pgd_k
))
82 goto bad_area_nosemaphore
;
87 pud
= pud_offset(pgd
, address
);
88 pud_k
= pud_offset(pgd_k
, address
);
90 if (!pud_present(*pud
)) {
91 if (!pud_present(*pud_k
))
92 goto bad_area_nosemaphore
;
97 pmd
= pmd_offset(pud
, address
);
98 pmd_k
= pmd_offset(pud_k
, address
);
99 if (pmd_present(*pmd
) || !pmd_present(*pmd_k
))
100 goto bad_area_nosemaphore
;
101 set_pmd(pmd
, *pmd_k
);
108 if (unlikely(notify_page_fault(regs
, lookup_exception_vector())))
111 /* Only enable interrupts if they were on before the fault */
112 if ((regs
->sr
& SR_IMASK
) != SR_IMASK
)
115 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, 0, regs
, address
);
118 * If we're in an interrupt or have no user
119 * context, we must not take the fault..
121 if (in_atomic() || !mm
)
124 down_read(&mm
->mmap_sem
);
126 vma
= find_vma(mm
, address
);
129 if (vma
->vm_start
<= address
)
131 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
133 if (expand_stack(vma
, address
))
136 * Ok, we have a good vm_area for this memory access, so
140 si_code
= SEGV_ACCERR
;
142 if (!(vma
->vm_flags
& VM_WRITE
))
145 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
150 * If for any reason at all we couldn't handle the fault,
151 * make sure we exit gracefully rather than endlessly redo
155 fault
= handle_mm_fault(mm
, vma
, address
, writeaccess
? FAULT_FLAG_WRITE
: 0);
156 if (unlikely(fault
& VM_FAULT_ERROR
)) {
157 if (fault
& VM_FAULT_OOM
)
159 else if (fault
& VM_FAULT_SIGBUS
)
163 if (fault
& VM_FAULT_MAJOR
) {
165 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
, 1, 0,
169 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
, 1, 0,
173 up_read(&mm
->mmap_sem
);
177 * Something tried to access memory that isn't in our memory map..
178 * Fix it, but check if it's kernel or user first..
181 up_read(&mm
->mmap_sem
);
183 bad_area_nosemaphore
:
184 if (user_mode(regs
)) {
185 info
.si_signo
= SIGSEGV
;
187 info
.si_code
= si_code
;
188 info
.si_addr
= (void *) address
;
189 force_sig_info(SIGSEGV
, &info
, tsk
);
194 /* Are we prepared to handle this kernel fault? */
195 if (fixup_exception(regs
))
198 if (handle_trapped_io(regs
, address
))
201 * Oops. The kernel tried to access some bad page. We'll have to
202 * terminate things with extreme prejudice.
208 if (oops_may_print()) {
211 if (address
< PAGE_SIZE
)
212 printk(KERN_ALERT
"Unable to handle kernel NULL "
213 "pointer dereference");
215 printk(KERN_ALERT
"Unable to handle kernel paging "
217 printk(" at virtual address %08lx\n", address
);
218 printk(KERN_ALERT
"pc = %08lx\n", regs
->pc
);
219 page
= (unsigned long)get_TTB();
221 page
= ((__typeof__(page
) *)page
)[address
>> PGDIR_SHIFT
];
222 printk(KERN_ALERT
"*pde = %08lx\n", page
);
223 if (page
& _PAGE_PRESENT
) {
225 address
&= 0x003ff000;
226 page
= ((__typeof__(page
) *)
227 __va(page
))[address
>>
229 printk(KERN_ALERT
"*pte = %08lx\n", page
);
234 die("Oops", regs
, writeaccess
);
239 * We ran out of memory, or some other thing happened to us that made
240 * us unable to handle the page fault gracefully.
243 up_read(&mm
->mmap_sem
);
244 if (is_global_init(current
)) {
246 down_read(&mm
->mmap_sem
);
249 printk("VM: killing process %s\n", tsk
->comm
);
251 do_group_exit(SIGKILL
);
255 up_read(&mm
->mmap_sem
);
258 * Send a sigbus, regardless of whether we were in kernel
261 info
.si_signo
= SIGBUS
;
263 info
.si_code
= BUS_ADRERR
;
264 info
.si_addr
= (void *)address
;
265 force_sig_info(SIGBUS
, &info
, tsk
);
267 /* Kernel mode? Handle exceptions or die */
268 if (!user_mode(regs
))
273 * Called with interrupts disabled.
275 asmlinkage
int __kprobes
__do_page_fault(struct pt_regs
*regs
,
276 unsigned long writeaccess
,
277 unsigned long address
)
287 * We don't take page faults for P1, P2, and parts of P4, these
288 * are always mapped, whether it be due to legacy behaviour in
289 * 29-bit mode, or due to PMB configuration in 32-bit mode.
291 if (address
>= P3SEG
&& address
< P3_ADDR_MAX
) {
292 pgd
= pgd_offset_k(address
);
294 if (unlikely(address
>= TASK_SIZE
|| !current
->mm
))
297 pgd
= pgd_offset(current
->mm
, address
);
300 pud
= pud_offset(pgd
, address
);
301 if (pud_none_or_clear_bad(pud
))
303 pmd
= pmd_offset(pud
, address
);
304 if (pmd_none_or_clear_bad(pmd
))
306 pte
= pte_offset_kernel(pmd
, address
);
308 if (unlikely(pte_none(entry
) || pte_not_present(entry
)))
310 if (unlikely(writeaccess
&& !pte_write(entry
)))
314 entry
= pte_mkdirty(entry
);
315 entry
= pte_mkyoung(entry
);
317 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
319 * ITLB is not affected by "ldtlb" instruction.
320 * So, we need to flush the entry by ourselves.
322 local_flush_tlb_one(get_asid(), address
& PAGE_MASK
);
326 update_mmu_cache(NULL
, address
, entry
);