1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2000-2010 Axis Communications AB
9 #include <linux/interrupt.h>
10 #include <linux/extable.h>
11 #include <linux/wait.h>
12 #include <linux/sched/signal.h>
13 #include <linux/uaccess.h>
14 #include <arch/system.h>
16 extern int find_fixup_code(struct pt_regs
*);
17 extern void die_if_kernel(const char *, struct pt_regs
*, long);
18 extern void show_registers(struct pt_regs
*regs
);
20 /* debug of low-level TLB reload */
29 /* debug of higher-level faults */
32 /* current active page directory */
34 DEFINE_PER_CPU(pgd_t
*, current_pgd
);
35 unsigned long cris_signal_return_page
;
38 * This routine handles page faults. It determines the address,
39 * and the problem, and then passes it off to one of the appropriate
42 * Notice that the address we're given is aligned to the page the fault
43 * occurred in, since we only get the PFN in R_MMU_CAUSE not the complete
47 * bit 0 == 0 means no page found, 1 means protection fault
48 * bit 1 == 0 means read, 1 means write
50 * If this routine detects a bad access, it returns 1, otherwise it
55 do_page_fault(unsigned long address
, struct pt_regs
*regs
,
56 int protection
, int writeaccess
)
58 struct task_struct
*tsk
;
60 struct vm_area_struct
* vma
;
63 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
66 "Page fault for %lX on %X at %lX, prot %d write %d\n",
67 address
, smp_processor_id(), instruction_pointer(regs
),
68 protection
, writeaccess
));
73 * We fault-in kernel-space virtual memory on-demand. The
74 * 'reference' page table is init_mm.pgd.
76 * NOTE! We MUST NOT take any locks for this case. We may
77 * be in an interrupt or a critical region, and should
78 * only copy the information from the master page table,
81 * NOTE2: This is done so that, when updating the vmalloc
82 * mappings we don't have to walk all processes pgdirs and
83 * add the high mappings all at once. Instead we do it as they
84 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
85 * bit set so sometimes the TLB can use a lingering entry.
87 * This verifies that the fault happens in kernel space
88 * and that the fault was not a protection error (error_code & 1).
91 if (address
>= VMALLOC_START
&&
96 /* When stack execution is not allowed we store the signal
97 * trampolines in the reserved cris_signal_return_page.
98 * Handle this in the exact same way as vmalloc (we know
99 * that the mapping is there and is valid so no need to
100 * call handle_mm_fault).
102 if (cris_signal_return_page
&&
103 address
== cris_signal_return_page
&&
104 !protection
&& user_mode(regs
))
107 /* we can and should enable interrupts at this point */
111 info
.si_code
= SEGV_MAPERR
;
114 * If we're in an interrupt, have pagefaults disabled or have no
115 * user context, we must not take the fault.
118 if (faulthandler_disabled() || !mm
)
122 flags
|= FAULT_FLAG_USER
;
124 down_read(&mm
->mmap_sem
);
125 vma
= find_vma(mm
, address
);
128 if (vma
->vm_start
<= address
)
130 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
132 if (user_mode(regs
)) {
134 * accessing the stack below usp is always a bug.
135 * we get page-aligned addresses so we can only check
136 * if we're within a page from usp, but that might be
137 * enough to catch brutal errors at least.
139 if (address
+ PAGE_SIZE
< rdusp())
142 if (expand_stack(vma
, address
))
146 * Ok, we have a good vm_area for this memory access, so
151 info
.si_code
= SEGV_ACCERR
;
153 /* first do some preliminary protection checks */
155 if (writeaccess
== 2){
156 if (!(vma
->vm_flags
& VM_EXEC
))
158 } else if (writeaccess
== 1) {
159 if (!(vma
->vm_flags
& VM_WRITE
))
161 flags
|= FAULT_FLAG_WRITE
;
163 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
168 * If for any reason at all we couldn't handle the fault,
169 * make sure we exit gracefully rather than endlessly redo
173 fault
= handle_mm_fault(vma
, address
, flags
);
175 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
178 if (unlikely(fault
& VM_FAULT_ERROR
)) {
179 if (fault
& VM_FAULT_OOM
)
181 else if (fault
& VM_FAULT_SIGSEGV
)
183 else if (fault
& VM_FAULT_SIGBUS
)
188 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
189 if (fault
& VM_FAULT_MAJOR
)
193 if (fault
& VM_FAULT_RETRY
) {
194 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
195 flags
|= FAULT_FLAG_TRIED
;
198 * No need to up_read(&mm->mmap_sem) as we would
199 * have already released it in __lock_page_or_retry
207 up_read(&mm
->mmap_sem
);
211 * Something tried to access memory that isn't in our memory map..
212 * Fix it, but check if it's kernel or user first..
216 up_read(&mm
->mmap_sem
);
218 bad_area_nosemaphore
:
219 DPG(show_registers(regs
));
221 /* User mode accesses just cause a SIGSEGV */
223 if (user_mode(regs
)) {
224 #ifdef CONFIG_NO_SEGFAULT_TERMINATION
225 DECLARE_WAIT_QUEUE_HEAD(wq
);
227 printk(KERN_NOTICE
"%s (pid %d) segfaults for page "
228 "address %08lx at pc %08lx\n",
230 address
, instruction_pointer(regs
));
232 /* With DPG on, we've already dumped registers above. */
234 show_registers(regs
);
236 #ifdef CONFIG_NO_SEGFAULT_TERMINATION
237 wait_event_interruptible(wq
, 0 == 1);
239 info
.si_signo
= SIGSEGV
;
241 /* info.si_code has been set above */
242 info
.si_addr
= (void *)address
;
243 force_sig_info(SIGSEGV
, &info
, tsk
);
250 /* Are we prepared to handle this kernel fault?
252 * (The kernel has valid exception-points in the source
253 * when it accesses user-memory. When it fails in one
254 * of those points, we find it in a table and do a jump
255 * to some fixup code that loads an appropriate error
259 if (find_fixup_code(regs
))
263 * Oops. The kernel tried to access some bad page. We'll have to
264 * terminate things with extreme prejudice.
267 if (!oops_in_progress
) {
268 oops_in_progress
= 1;
269 if ((unsigned long) (address
) < PAGE_SIZE
)
270 printk(KERN_ALERT
"Unable to handle kernel NULL "
271 "pointer dereference");
273 printk(KERN_ALERT
"Unable to handle kernel access"
274 " at virtual address %08lx\n", address
);
276 die_if_kernel("Oops", regs
, (writeaccess
<< 1) | protection
);
277 oops_in_progress
= 0;
283 * We ran out of memory, or some other thing happened to us that made
284 * us unable to handle the page fault gracefully.
288 up_read(&mm
->mmap_sem
);
289 if (!user_mode(regs
))
291 pagefault_out_of_memory();
295 up_read(&mm
->mmap_sem
);
298 * Send a sigbus, regardless of whether we were in kernel
301 info
.si_signo
= SIGBUS
;
303 info
.si_code
= BUS_ADRERR
;
304 info
.si_addr
= (void *)address
;
305 force_sig_info(SIGBUS
, &info
, tsk
);
307 /* Kernel mode? Handle exceptions or die */
308 if (!user_mode(regs
))
315 * Synchronize this task's top level page-table
316 * with the 'reference' page table.
318 * Use current_pgd instead of tsk->active_mm->pgd
319 * since the latter might be unavailable if this
320 * code is executed in a misfortunately run irq
321 * (like inside schedule() between switch_mm and
325 int offset
= pgd_index(address
);
331 pgd
= (pgd_t
*)per_cpu(current_pgd
, smp_processor_id()) + offset
;
332 pgd_k
= init_mm
.pgd
+ offset
;
334 /* Since we're two-level, we don't need to do both
335 * set_pgd and set_pmd (they do the same thing). If
336 * we go three-level at some point, do the right thing
337 * with pgd_present and set_pgd here.
339 * Also, since the vmalloc area is global, we don't
340 * need to copy individual PTE's, it is enough to
341 * copy the pgd pointer into the pte page of the
342 * root task. If that is there, we'll find our pte if
346 pud
= pud_offset(pgd
, address
);
347 pud_k
= pud_offset(pgd_k
, address
);
348 if (!pud_present(*pud_k
))
351 pmd
= pmd_offset(pud
, address
);
352 pmd_k
= pmd_offset(pud_k
, address
);
354 if (!pmd_present(*pmd_k
))
355 goto bad_area_nosemaphore
;
357 set_pmd(pmd
, *pmd_k
);
359 /* Make sure the actual PTE exists as well to
360 * catch kernel vmalloc-area accesses to non-mapped
361 * addresses. If we don't do this, this will just
362 * silently loop forever.
365 pte_k
= pte_offset_kernel(pmd_k
, address
);
366 if (!pte_present(*pte_k
))
373 /* Find fixup code. */
375 find_fixup_code(struct pt_regs
*regs
)
377 const struct exception_table_entry
*fixup
;
378 /* in case of delay slot fault (v32) */
379 unsigned long ip
= (instruction_pointer(regs
) & ~0x1);
381 fixup
= search_exception_tables(ip
);
383 /* Adjust the instruction pointer in the stackframe. */
384 instruction_pointer(regs
) = fixup
->fixup
;