Linux 4.8-rc8
[linux/fpc-iii.git] / arch / sh / mm / fault.c
blob9bf876780cef4b1ea4b2aee99e47997ffb1f8b97
1 /*
2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2012 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/perf_event.h>
19 #include <linux/kdebug.h>
20 #include <linux/uaccess.h>
21 #include <asm/io_trapped.h>
22 #include <asm/mmu_context.h>
23 #include <asm/tlbflush.h>
24 #include <asm/traps.h>
26 static inline int notify_page_fault(struct pt_regs *regs, int trap)
28 int ret = 0;
30 if (kprobes_built_in() && !user_mode(regs)) {
31 preempt_disable();
32 if (kprobe_running() && kprobe_fault_handler(regs, trap))
33 ret = 1;
34 preempt_enable();
37 return ret;
40 static void
41 force_sig_info_fault(int si_signo, int si_code, unsigned long address,
42 struct task_struct *tsk)
44 siginfo_t info;
46 info.si_signo = si_signo;
47 info.si_errno = 0;
48 info.si_code = si_code;
49 info.si_addr = (void __user *)address;
51 force_sig_info(si_signo, &info, tsk);
55 * This is useful to dump out the page tables associated with
56 * 'addr' in mm 'mm'.
58 static void show_pte(struct mm_struct *mm, unsigned long addr)
60 pgd_t *pgd;
62 if (mm) {
63 pgd = mm->pgd;
64 } else {
65 pgd = get_TTB();
67 if (unlikely(!pgd))
68 pgd = swapper_pg_dir;
71 printk(KERN_ALERT "pgd = %p\n", pgd);
72 pgd += pgd_index(addr);
73 printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
74 (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
76 do {
77 pud_t *pud;
78 pmd_t *pmd;
79 pte_t *pte;
81 if (pgd_none(*pgd))
82 break;
84 if (pgd_bad(*pgd)) {
85 printk("(bad)");
86 break;
89 pud = pud_offset(pgd, addr);
90 if (PTRS_PER_PUD != 1)
91 printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
92 (u64)pud_val(*pud));
94 if (pud_none(*pud))
95 break;
97 if (pud_bad(*pud)) {
98 printk("(bad)");
99 break;
102 pmd = pmd_offset(pud, addr);
103 if (PTRS_PER_PMD != 1)
104 printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
105 (u64)pmd_val(*pmd));
107 if (pmd_none(*pmd))
108 break;
110 if (pmd_bad(*pmd)) {
111 printk("(bad)");
112 break;
115 /* We must not map this if we have highmem enabled */
116 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
117 break;
119 pte = pte_offset_kernel(pmd, addr);
120 printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
121 (u64)pte_val(*pte));
122 } while (0);
124 printk("\n");
127 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
129 unsigned index = pgd_index(address);
130 pgd_t *pgd_k;
131 pud_t *pud, *pud_k;
132 pmd_t *pmd, *pmd_k;
134 pgd += index;
135 pgd_k = init_mm.pgd + index;
137 if (!pgd_present(*pgd_k))
138 return NULL;
140 pud = pud_offset(pgd, address);
141 pud_k = pud_offset(pgd_k, address);
142 if (!pud_present(*pud_k))
143 return NULL;
145 if (!pud_present(*pud))
146 set_pud(pud, *pud_k);
148 pmd = pmd_offset(pud, address);
149 pmd_k = pmd_offset(pud_k, address);
150 if (!pmd_present(*pmd_k))
151 return NULL;
153 if (!pmd_present(*pmd))
154 set_pmd(pmd, *pmd_k);
155 else {
157 * The page tables are fully synchronised so there must
158 * be another reason for the fault. Return NULL here to
159 * signal that we have not taken care of the fault.
161 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
162 return NULL;
165 return pmd_k;
168 #ifdef CONFIG_SH_STORE_QUEUES
169 #define __FAULT_ADDR_LIMIT P3_ADDR_MAX
170 #else
171 #define __FAULT_ADDR_LIMIT VMALLOC_END
172 #endif
175 * Handle a fault on the vmalloc or module mapping area
177 static noinline int vmalloc_fault(unsigned long address)
179 pgd_t *pgd_k;
180 pmd_t *pmd_k;
181 pte_t *pte_k;
183 /* Make sure we are in vmalloc/module/P3 area: */
184 if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
185 return -1;
188 * Synchronize this task's top level page-table
189 * with the 'reference' page table.
191 * Do _not_ use "current" here. We might be inside
192 * an interrupt in the middle of a task switch..
194 pgd_k = get_TTB();
195 pmd_k = vmalloc_sync_one(pgd_k, address);
196 if (!pmd_k)
197 return -1;
199 pte_k = pte_offset_kernel(pmd_k, address);
200 if (!pte_present(*pte_k))
201 return -1;
203 return 0;
206 static void
207 show_fault_oops(struct pt_regs *regs, unsigned long address)
209 if (!oops_may_print())
210 return;
212 printk(KERN_ALERT "BUG: unable to handle kernel ");
213 if (address < PAGE_SIZE)
214 printk(KERN_CONT "NULL pointer dereference");
215 else
216 printk(KERN_CONT "paging request");
218 printk(KERN_CONT " at %08lx\n", address);
219 printk(KERN_ALERT "PC:");
220 printk_address(regs->pc, 1);
222 show_pte(NULL, address);
225 static noinline void
226 no_context(struct pt_regs *regs, unsigned long error_code,
227 unsigned long address)
229 /* Are we prepared to handle this kernel fault? */
230 if (fixup_exception(regs))
231 return;
233 if (handle_trapped_io(regs, address))
234 return;
237 * Oops. The kernel tried to access some bad page. We'll have to
238 * terminate things with extreme prejudice.
240 bust_spinlocks(1);
242 show_fault_oops(regs, address);
244 die("Oops", regs, error_code);
245 bust_spinlocks(0);
246 do_exit(SIGKILL);
249 static void
250 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
251 unsigned long address, int si_code)
253 struct task_struct *tsk = current;
255 /* User mode accesses just cause a SIGSEGV */
256 if (user_mode(regs)) {
258 * It's possible to have interrupts off here:
260 local_irq_enable();
262 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
264 return;
267 no_context(regs, error_code, address);
270 static noinline void
271 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
272 unsigned long address)
274 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
277 static void
278 __bad_area(struct pt_regs *regs, unsigned long error_code,
279 unsigned long address, int si_code)
281 struct mm_struct *mm = current->mm;
284 * Something tried to access memory that isn't in our memory map..
285 * Fix it, but check if it's kernel or user first..
287 up_read(&mm->mmap_sem);
289 __bad_area_nosemaphore(regs, error_code, address, si_code);
292 static noinline void
293 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
295 __bad_area(regs, error_code, address, SEGV_MAPERR);
298 static noinline void
299 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
300 unsigned long address)
302 __bad_area(regs, error_code, address, SEGV_ACCERR);
305 static void
306 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
308 struct task_struct *tsk = current;
309 struct mm_struct *mm = tsk->mm;
311 up_read(&mm->mmap_sem);
313 /* Kernel mode? Handle exceptions or die: */
314 if (!user_mode(regs))
315 no_context(regs, error_code, address);
317 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
320 static noinline int
321 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
322 unsigned long address, unsigned int fault)
325 * Pagefault was interrupted by SIGKILL. We have no reason to
326 * continue pagefault.
328 if (fatal_signal_pending(current)) {
329 if (!(fault & VM_FAULT_RETRY))
330 up_read(&current->mm->mmap_sem);
331 if (!user_mode(regs))
332 no_context(regs, error_code, address);
333 return 1;
336 if (!(fault & VM_FAULT_ERROR))
337 return 0;
339 if (fault & VM_FAULT_OOM) {
340 /* Kernel mode? Handle exceptions or die: */
341 if (!user_mode(regs)) {
342 up_read(&current->mm->mmap_sem);
343 no_context(regs, error_code, address);
344 return 1;
346 up_read(&current->mm->mmap_sem);
349 * We ran out of memory, call the OOM killer, and return the
350 * userspace (which will retry the fault, or kill us if we got
351 * oom-killed):
353 pagefault_out_of_memory();
354 } else {
355 if (fault & VM_FAULT_SIGBUS)
356 do_sigbus(regs, error_code, address);
357 else if (fault & VM_FAULT_SIGSEGV)
358 bad_area(regs, error_code, address);
359 else
360 BUG();
363 return 1;
366 static inline int access_error(int error_code, struct vm_area_struct *vma)
368 if (error_code & FAULT_CODE_WRITE) {
369 /* write, present and write, not present: */
370 if (unlikely(!(vma->vm_flags & VM_WRITE)))
371 return 1;
372 return 0;
375 /* ITLB miss on NX page */
376 if (unlikely((error_code & FAULT_CODE_ITLB) &&
377 !(vma->vm_flags & VM_EXEC)))
378 return 1;
380 /* read, not present: */
381 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
382 return 1;
384 return 0;
387 static int fault_in_kernel_space(unsigned long address)
389 return address >= TASK_SIZE;
393 * This routine handles page faults. It determines the address,
394 * and the problem, and then passes it off to one of the appropriate
395 * routines.
397 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
398 unsigned long error_code,
399 unsigned long address)
401 unsigned long vec;
402 struct task_struct *tsk;
403 struct mm_struct *mm;
404 struct vm_area_struct * vma;
405 int fault;
406 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
408 tsk = current;
409 mm = tsk->mm;
410 vec = lookup_exception_vector();
413 * We fault-in kernel-space virtual memory on-demand. The
414 * 'reference' page table is init_mm.pgd.
416 * NOTE! We MUST NOT take any locks for this case. We may
417 * be in an interrupt or a critical region, and should
418 * only copy the information from the master page table,
419 * nothing more.
421 if (unlikely(fault_in_kernel_space(address))) {
422 if (vmalloc_fault(address) >= 0)
423 return;
424 if (notify_page_fault(regs, vec))
425 return;
427 bad_area_nosemaphore(regs, error_code, address);
428 return;
431 if (unlikely(notify_page_fault(regs, vec)))
432 return;
434 /* Only enable interrupts if they were on before the fault */
435 if ((regs->sr & SR_IMASK) != SR_IMASK)
436 local_irq_enable();
438 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
441 * If we're in an interrupt, have no user context or are running
442 * with pagefaults disabled then we must not take the fault:
444 if (unlikely(faulthandler_disabled() || !mm)) {
445 bad_area_nosemaphore(regs, error_code, address);
446 return;
449 retry:
450 down_read(&mm->mmap_sem);
452 vma = find_vma(mm, address);
453 if (unlikely(!vma)) {
454 bad_area(regs, error_code, address);
455 return;
457 if (likely(vma->vm_start <= address))
458 goto good_area;
459 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
460 bad_area(regs, error_code, address);
461 return;
463 if (unlikely(expand_stack(vma, address))) {
464 bad_area(regs, error_code, address);
465 return;
469 * Ok, we have a good vm_area for this memory access, so
470 * we can handle it..
472 good_area:
473 if (unlikely(access_error(error_code, vma))) {
474 bad_area_access_error(regs, error_code, address);
475 return;
478 set_thread_fault_code(error_code);
480 if (user_mode(regs))
481 flags |= FAULT_FLAG_USER;
482 if (error_code & FAULT_CODE_WRITE)
483 flags |= FAULT_FLAG_WRITE;
486 * If for any reason at all we couldn't handle the fault,
487 * make sure we exit gracefully rather than endlessly redo
488 * the fault.
490 fault = handle_mm_fault(vma, address, flags);
492 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
493 if (mm_fault_error(regs, error_code, address, fault))
494 return;
496 if (flags & FAULT_FLAG_ALLOW_RETRY) {
497 if (fault & VM_FAULT_MAJOR) {
498 tsk->maj_flt++;
499 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
500 regs, address);
501 } else {
502 tsk->min_flt++;
503 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
504 regs, address);
506 if (fault & VM_FAULT_RETRY) {
507 flags &= ~FAULT_FLAG_ALLOW_RETRY;
508 flags |= FAULT_FLAG_TRIED;
511 * No need to up_read(&mm->mmap_sem) as we would
512 * have already released it in __lock_page_or_retry
513 * in mm/filemap.c.
515 goto retry;
519 up_read(&mm->mmap_sem);