2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2012 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
19 #include <linux/perf_event.h>
20 #include <linux/kdebug.h>
21 #include <linux/uaccess.h>
22 #include <asm/io_trapped.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/traps.h>
28 force_sig_info_fault(int si_signo
, int si_code
, unsigned long address
)
30 force_sig_fault(si_signo
, si_code
, (void __user
*)address
);
34 * This is useful to dump out the page tables associated with
37 static void show_pte(struct mm_struct
*mm
, unsigned long addr
)
50 pr_alert("pgd = %p\n", pgd
);
51 pgd
+= pgd_index(addr
);
52 pr_alert("[%08lx] *pgd=%0*llx", addr
, (u32
)(sizeof(*pgd
) * 2),
69 p4d
= p4d_offset(pgd
, addr
);
70 if (PTRS_PER_P4D
!= 1)
71 pr_cont(", *p4d=%0*Lx", (u32
)(sizeof(*p4d
) * 2),
82 pud
= pud_offset(p4d
, addr
);
83 if (PTRS_PER_PUD
!= 1)
84 pr_cont(", *pud=%0*llx", (u32
)(sizeof(*pud
) * 2),
95 pmd
= pmd_offset(pud
, addr
);
96 if (PTRS_PER_PMD
!= 1)
97 pr_cont(", *pmd=%0*llx", (u32
)(sizeof(*pmd
) * 2),
108 /* We must not map this if we have highmem enabled */
109 if (PageHighMem(pfn_to_page(pmd_val(*pmd
) >> PAGE_SHIFT
)))
112 pte
= pte_offset_kernel(pmd
, addr
);
113 pr_cont(", *pte=%0*llx", (u32
)(sizeof(*pte
) * 2),
120 static inline pmd_t
*vmalloc_sync_one(pgd_t
*pgd
, unsigned long address
)
122 unsigned index
= pgd_index(address
);
129 pgd_k
= init_mm
.pgd
+ index
;
131 if (!pgd_present(*pgd_k
))
134 p4d
= p4d_offset(pgd
, address
);
135 p4d_k
= p4d_offset(pgd_k
, address
);
136 if (!p4d_present(*p4d_k
))
139 pud
= pud_offset(p4d
, address
);
140 pud_k
= pud_offset(p4d_k
, address
);
141 if (!pud_present(*pud_k
))
144 if (!pud_present(*pud
))
145 set_pud(pud
, *pud_k
);
147 pmd
= pmd_offset(pud
, address
);
148 pmd_k
= pmd_offset(pud_k
, address
);
149 if (!pmd_present(*pmd_k
))
152 if (!pmd_present(*pmd
))
153 set_pmd(pmd
, *pmd_k
);
156 * The page tables are fully synchronised so there must
157 * be another reason for the fault. Return NULL here to
158 * signal that we have not taken care of the fault.
160 BUG_ON(pmd_page(*pmd
) != pmd_page(*pmd_k
));
167 #ifdef CONFIG_SH_STORE_QUEUES
168 #define __FAULT_ADDR_LIMIT P3_ADDR_MAX
170 #define __FAULT_ADDR_LIMIT VMALLOC_END
174 * Handle a fault on the vmalloc or module mapping area
176 static noinline
int vmalloc_fault(unsigned long address
)
182 /* Make sure we are in vmalloc/module/P3 area: */
183 if (!(address
>= VMALLOC_START
&& address
< __FAULT_ADDR_LIMIT
))
187 * Synchronize this task's top level page-table
188 * with the 'reference' page table.
190 * Do _not_ use "current" here. We might be inside
191 * an interrupt in the middle of a task switch..
194 pmd_k
= vmalloc_sync_one(pgd_k
, address
);
198 pte_k
= pte_offset_kernel(pmd_k
, address
);
199 if (!pte_present(*pte_k
))
206 show_fault_oops(struct pt_regs
*regs
, unsigned long address
)
208 if (!oops_may_print())
211 pr_alert("BUG: unable to handle kernel %s at %08lx\n",
212 address
< PAGE_SIZE
? "NULL pointer dereference"
216 printk_address(regs
->pc
, 1);
218 show_pte(NULL
, address
);
222 no_context(struct pt_regs
*regs
, unsigned long error_code
,
223 unsigned long address
)
225 /* Are we prepared to handle this kernel fault? */
226 if (fixup_exception(regs
))
229 if (handle_trapped_io(regs
, address
))
233 * Oops. The kernel tried to access some bad page. We'll have to
234 * terminate things with extreme prejudice.
238 show_fault_oops(regs
, address
);
240 die("Oops", regs
, error_code
);
244 __bad_area_nosemaphore(struct pt_regs
*regs
, unsigned long error_code
,
245 unsigned long address
, int si_code
)
247 /* User mode accesses just cause a SIGSEGV */
248 if (user_mode(regs
)) {
250 * It's possible to have interrupts off here:
254 force_sig_info_fault(SIGSEGV
, si_code
, address
);
259 no_context(regs
, error_code
, address
);
263 bad_area_nosemaphore(struct pt_regs
*regs
, unsigned long error_code
,
264 unsigned long address
)
266 __bad_area_nosemaphore(regs
, error_code
, address
, SEGV_MAPERR
);
270 __bad_area(struct pt_regs
*regs
, unsigned long error_code
,
271 unsigned long address
, int si_code
)
273 struct mm_struct
*mm
= current
->mm
;
276 * Something tried to access memory that isn't in our memory map..
277 * Fix it, but check if it's kernel or user first..
279 mmap_read_unlock(mm
);
281 __bad_area_nosemaphore(regs
, error_code
, address
, si_code
);
285 bad_area(struct pt_regs
*regs
, unsigned long error_code
, unsigned long address
)
287 __bad_area(regs
, error_code
, address
, SEGV_MAPERR
);
291 bad_area_access_error(struct pt_regs
*regs
, unsigned long error_code
,
292 unsigned long address
)
294 __bad_area(regs
, error_code
, address
, SEGV_ACCERR
);
298 do_sigbus(struct pt_regs
*regs
, unsigned long error_code
, unsigned long address
)
300 struct task_struct
*tsk
= current
;
301 struct mm_struct
*mm
= tsk
->mm
;
303 mmap_read_unlock(mm
);
305 /* Kernel mode? Handle exceptions or die: */
306 if (!user_mode(regs
))
307 no_context(regs
, error_code
, address
);
309 force_sig_info_fault(SIGBUS
, BUS_ADRERR
, address
);
313 mm_fault_error(struct pt_regs
*regs
, unsigned long error_code
,
314 unsigned long address
, vm_fault_t fault
)
317 * Pagefault was interrupted by SIGKILL. We have no reason to
318 * continue pagefault.
320 if (fault_signal_pending(fault
, regs
)) {
321 if (!user_mode(regs
))
322 no_context(regs
, error_code
, address
);
326 /* Release mmap_lock first if necessary */
327 if (!(fault
& VM_FAULT_RETRY
))
328 mmap_read_unlock(current
->mm
);
330 if (!(fault
& VM_FAULT_ERROR
))
333 if (fault
& VM_FAULT_OOM
) {
334 /* Kernel mode? Handle exceptions or die: */
335 if (!user_mode(regs
)) {
336 no_context(regs
, error_code
, address
);
341 * We ran out of memory, call the OOM killer, and return the
342 * userspace (which will retry the fault, or kill us if we got
345 pagefault_out_of_memory();
347 if (fault
& VM_FAULT_SIGBUS
)
348 do_sigbus(regs
, error_code
, address
);
349 else if (fault
& VM_FAULT_SIGSEGV
)
350 bad_area(regs
, error_code
, address
);
358 static inline int access_error(int error_code
, struct vm_area_struct
*vma
)
360 if (error_code
& FAULT_CODE_WRITE
) {
361 /* write, present and write, not present: */
362 if (unlikely(!(vma
->vm_flags
& VM_WRITE
)))
367 /* ITLB miss on NX page */
368 if (unlikely((error_code
& FAULT_CODE_ITLB
) &&
369 !(vma
->vm_flags
& VM_EXEC
)))
372 /* read, not present: */
373 if (unlikely(!vma_is_accessible(vma
)))
379 static int fault_in_kernel_space(unsigned long address
)
381 return address
>= TASK_SIZE
;
385 * This routine handles page faults. It determines the address,
386 * and the problem, and then passes it off to one of the appropriate
389 asmlinkage
void __kprobes
do_page_fault(struct pt_regs
*regs
,
390 unsigned long error_code
,
391 unsigned long address
)
394 struct task_struct
*tsk
;
395 struct mm_struct
*mm
;
396 struct vm_area_struct
* vma
;
398 unsigned int flags
= FAULT_FLAG_DEFAULT
;
402 vec
= lookup_exception_vector();
405 * We fault-in kernel-space virtual memory on-demand. The
406 * 'reference' page table is init_mm.pgd.
408 * NOTE! We MUST NOT take any locks for this case. We may
409 * be in an interrupt or a critical region, and should
410 * only copy the information from the master page table,
413 if (unlikely(fault_in_kernel_space(address
))) {
414 if (vmalloc_fault(address
) >= 0)
416 if (kprobe_page_fault(regs
, vec
))
419 bad_area_nosemaphore(regs
, error_code
, address
);
423 if (unlikely(kprobe_page_fault(regs
, vec
)))
426 /* Only enable interrupts if they were on before the fault */
427 if ((regs
->sr
& SR_IMASK
) != SR_IMASK
)
430 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
433 * If we're in an interrupt, have no user context or are running
434 * with pagefaults disabled then we must not take the fault:
436 if (unlikely(faulthandler_disabled() || !mm
)) {
437 bad_area_nosemaphore(regs
, error_code
, address
);
442 vma
= lock_mm_and_find_vma(mm
, address
, regs
);
443 if (unlikely(!vma
)) {
444 bad_area_nosemaphore(regs
, error_code
, address
);
449 * Ok, we have a good vm_area for this memory access, so
452 if (unlikely(access_error(error_code
, vma
))) {
453 bad_area_access_error(regs
, error_code
, address
);
457 set_thread_fault_code(error_code
);
460 flags
|= FAULT_FLAG_USER
;
461 if (error_code
& FAULT_CODE_WRITE
)
462 flags
|= FAULT_FLAG_WRITE
;
465 * If for any reason at all we couldn't handle the fault,
466 * make sure we exit gracefully rather than endlessly redo
469 fault
= handle_mm_fault(vma
, address
, flags
, regs
);
471 if (unlikely(fault
& (VM_FAULT_RETRY
| VM_FAULT_ERROR
)))
472 if (mm_fault_error(regs
, error_code
, address
, fault
))
475 /* The fault is fully completed (including releasing mmap lock) */
476 if (fault
& VM_FAULT_COMPLETED
)
479 if (fault
& VM_FAULT_RETRY
) {
480 flags
|= FAULT_FLAG_TRIED
;
483 * No need to mmap_read_unlock(mm) as we would
484 * have already released it in __lock_page_or_retry
490 mmap_read_unlock(mm
);