1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
15 #include <linux/interrupt.h>
16 #include <linux/extable.h>
17 #include <linux/sched/signal.h>
18 #include <linux/perf_event.h>
20 #include <linux/uaccess.h>
22 #include <asm/mmu_context.h>
23 #include <asm/siginfo.h>
24 #include <asm/signal.h>
26 #define NUM_TLB_ENTRIES 64
27 #define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
29 /* __PHX__ :: - check the vmalloc_fault in do_page_fault()
30 * - also look into include/asm/mmu_context.h
32 volatile pgd_t
*current_pgd
[NR_CPUS
];
34 asmlinkage
void do_page_fault(struct pt_regs
*regs
, unsigned long address
,
35 unsigned long vector
, int write_acc
);
38 * This routine handles page faults. It determines the address,
39 * and the problem, and then passes it off to one of the appropriate
42 * If this routine detects a bad access, it returns 1, otherwise it
46 asmlinkage
void do_page_fault(struct pt_regs
*regs
, unsigned long address
,
47 unsigned long vector
, int write_acc
)
49 struct task_struct
*tsk
;
51 struct vm_area_struct
*vma
;
54 unsigned int flags
= FAULT_FLAG_DEFAULT
;
59 * We fault-in kernel-space virtual memory on-demand. The
60 * 'reference' page table is init_mm.pgd.
62 * NOTE! We MUST NOT take any locks for this case. We may
63 * be in an interrupt or a critical region, and should
64 * only copy the information from the master page table,
67 * NOTE2: This is done so that, when updating the vmalloc
68 * mappings we don't have to walk all processes pgdirs and
69 * add the high mappings all at once. Instead we do it as they
70 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
71 * bit set so sometimes the TLB can use a lingering entry.
73 * This verifies that the fault happens in kernel space
74 * and that the fault was not a protection error.
77 if (address
>= VMALLOC_START
&&
78 (vector
!= 0x300 && vector
!= 0x400) &&
82 /* If exceptions were enabled, we can reenable them here */
83 if (user_mode(regs
)) {
84 /* Exception was in userspace: reenable interrupts */
86 flags
|= FAULT_FLAG_USER
;
88 /* If exception was in a syscall, then IRQ's may have
89 * been enabled or disabled. If they were enabled,
92 if (regs
->sr
&& (SPR_SR_IEE
| SPR_SR_TEE
))
97 si_code
= SEGV_MAPERR
;
100 * If we're in an interrupt or have no user
101 * context, we must not take the fault..
104 if (in_interrupt() || !mm
)
107 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
111 vma
= find_vma(mm
, address
);
116 if (vma
->vm_start
<= address
)
119 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
122 if (user_mode(regs
)) {
124 * accessing the stack below usp is always a bug.
125 * we get page-aligned addresses so we can only check
126 * if we're within a page from usp, but that might be
127 * enough to catch brutal errors at least.
129 if (address
+ PAGE_SIZE
< regs
->sp
)
132 vma
= expand_stack(mm
, address
);
134 goto bad_area_nosemaphore
;
137 * Ok, we have a good vm_area for this memory access, so
142 si_code
= SEGV_ACCERR
;
144 /* first do some preliminary protection checks */
147 if (!(vma
->vm_flags
& VM_WRITE
))
149 flags
|= FAULT_FLAG_WRITE
;
152 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
156 /* are we trying to execute nonexecutable area */
157 if ((vector
== 0x400) && !(vma
->vm_page_prot
.pgprot
& _PAGE_EXEC
))
161 * If for any reason at all we couldn't handle the fault,
162 * make sure we exit gracefully rather than endlessly redo
166 fault
= handle_mm_fault(vma
, address
, flags
, regs
);
168 if (fault_signal_pending(fault
, regs
)) {
169 if (!user_mode(regs
))
174 /* The fault is fully completed (including releasing mmap lock) */
175 if (fault
& VM_FAULT_COMPLETED
)
178 if (unlikely(fault
& VM_FAULT_ERROR
)) {
179 if (fault
& VM_FAULT_OOM
)
181 else if (fault
& VM_FAULT_SIGSEGV
)
183 else if (fault
& VM_FAULT_SIGBUS
)
188 /*RGD modeled on Cris */
189 if (fault
& VM_FAULT_RETRY
) {
190 flags
|= FAULT_FLAG_TRIED
;
192 /* No need to mmap_read_unlock(mm) as we would
193 * have already released it in __lock_page_or_retry
200 mmap_read_unlock(mm
);
204 * Something tried to access memory that isn't in our memory map..
205 * Fix it, but check if it's kernel or user first..
209 mmap_read_unlock(mm
);
211 bad_area_nosemaphore
:
213 /* User mode accesses just cause a SIGSEGV */
215 if (user_mode(regs
)) {
216 force_sig_fault(SIGSEGV
, si_code
, (void __user
*)address
);
222 /* Are we prepared to handle this kernel fault?
224 * (The kernel has valid exception-points in the source
225 * when it acesses user-memory. When it fails in one
226 * of those points, we find it in a table and do a jump
227 * to some fixup code that loads an appropriate error
232 const struct exception_table_entry
*entry
;
234 if ((entry
= search_exception_tables(regs
->pc
)) != NULL
) {
235 /* Adjust the instruction pointer in the stackframe */
236 regs
->pc
= entry
->fixup
;
242 * Oops. The kernel tried to access some bad page. We'll have to
243 * terminate things with extreme prejudice.
246 if ((unsigned long)(address
) < PAGE_SIZE
)
248 "Unable to handle kernel NULL pointer dereference");
250 printk(KERN_ALERT
"Unable to handle kernel access");
251 printk(" at virtual address 0x%08lx\n", address
);
253 die("Oops", regs
, write_acc
);
256 * We ran out of memory, or some other thing happened to us that made
257 * us unable to handle the page fault gracefully.
261 mmap_read_unlock(mm
);
262 if (!user_mode(regs
))
264 pagefault_out_of_memory();
268 mmap_read_unlock(mm
);
271 * Send a sigbus, regardless of whether we were in kernel
274 force_sig_fault(SIGBUS
, BUS_ADRERR
, (void __user
*)address
);
276 /* Kernel mode? Handle exceptions or die */
277 if (!user_mode(regs
))
284 * Synchronize this task's top level page-table
285 * with the 'reference' page table.
287 * Use current_pgd instead of tsk->active_mm->pgd
288 * since the latter might be unavailable if this
289 * code is executed in a misfortunately run irq
290 * (like inside schedule() between switch_mm and
294 int offset
= pgd_index(address
);
302 phx_warn("do_page_fault(): vmalloc_fault will not work, "
303 "since current_pgd assign a proper value somewhere\n"
304 "anyhow we don't need this at the moment\n");
306 phx_mmu("vmalloc_fault");
308 pgd
= (pgd_t
*)current_pgd
[smp_processor_id()] + offset
;
309 pgd_k
= init_mm
.pgd
+ offset
;
311 /* Since we're two-level, we don't need to do both
312 * set_pgd and set_pmd (they do the same thing). If
313 * we go three-level at some point, do the right thing
314 * with pgd_present and set_pgd here.
316 * Also, since the vmalloc area is global, we don't
317 * need to copy individual PTE's, it is enough to
318 * copy the pgd pointer into the pte page of the
319 * root task. If that is there, we'll find our pte if
323 p4d
= p4d_offset(pgd
, address
);
324 p4d_k
= p4d_offset(pgd_k
, address
);
325 if (!p4d_present(*p4d_k
))
328 pud
= pud_offset(p4d
, address
);
329 pud_k
= pud_offset(p4d_k
, address
);
330 if (!pud_present(*pud_k
))
333 pmd
= pmd_offset(pud
, address
);
334 pmd_k
= pmd_offset(pud_k
, address
);
336 if (!pmd_present(*pmd_k
))
337 goto bad_area_nosemaphore
;
339 set_pmd(pmd
, *pmd_k
);
341 /* Make sure the actual PTE exists as well to
342 * catch kernel vmalloc-area accesses to non-mapped
343 * addresses. If we don't do this, this will just
344 * silently loop forever.
347 pte_k
= pte_offset_kernel(pmd_k
, address
);
348 if (!pte_present(*pte_k
))