1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/extable.h>
5 #include <linux/module.h>
6 #include <linux/signal.h>
7 #include <linux/ptrace.h>
9 #include <linux/init.h>
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/perf_event.h>
14 #include <asm/pgtable.h>
15 #include <asm/tlbflush.h>
17 extern void die(const char *str
, struct pt_regs
*regs
, long err
);
20 * This is useful to dump out the page tables associated with
23 void show_pte(struct mm_struct
*mm
, unsigned long addr
)
29 pr_alert("pgd = %p\n", mm
->pgd
);
30 pgd
= pgd_offset(mm
, addr
);
31 pr_alert("[%08lx] *pgd=%08lx", addr
, pgd_val(*pgd
));
46 p4d
= p4d_offset(pgd
, addr
);
47 pud
= pud_offset(p4d
, addr
);
48 pmd
= pmd_offset(pud
, addr
);
50 pr_alert(", *pmd=%08lx", pmd_val(*pmd
));
61 if (IS_ENABLED(CONFIG_HIGHMEM
))
64 /* We must not map this if we have highmem enabled */
65 pte
= pte_offset_map(pmd
, addr
);
66 pr_alert(", *pte=%08lx", pte_val(*pte
));
74 void do_page_fault(unsigned long entry
, unsigned long addr
,
75 unsigned int error_code
, struct pt_regs
*regs
)
77 struct task_struct
*tsk
;
79 struct vm_area_struct
*vma
;
82 unsigned int mask
= VM_READ
| VM_WRITE
| VM_EXEC
;
83 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
85 error_code
= error_code
& (ITYPE_mskINST
| ITYPE_mskETYPE
);
88 si_code
= SEGV_MAPERR
;
90 * We fault-in kernel-space virtual memory on-demand. The
91 * 'reference' page table is init_mm.pgd.
93 * NOTE! We MUST NOT take any locks for this case. We may
94 * be in an interrupt or a critical region, and should
95 * only copy the information from the master page table,
98 if (addr
>= TASK_SIZE
) {
100 goto bad_area_nosemaphore
;
102 if (addr
>= TASK_SIZE
&& addr
< VMALLOC_END
103 && (entry
== ENTRY_PTE_NOT_PRESENT
))
109 /* Send a signal to the task for handling the unalignment access. */
110 if (entry
== ENTRY_GENERAL_EXCPETION
111 && error_code
== ETYPE_ALIGNMENT_CHECK
) {
113 goto bad_area_nosemaphore
;
119 * If we're in an interrupt or have no user
120 * context, we must not take the fault..
122 if (unlikely(faulthandler_disabled() || !mm
))
126 * As per x86, we may deadlock here. However, since the kernel only
127 * validly references user space from well defined areas of the code,
128 * we can bug out early if this is from code which shouldn't.
130 if (unlikely(!down_read_trylock(&mm
->mmap_sem
))) {
131 if (!user_mode(regs
) &&
132 !search_exception_tables(instruction_pointer(regs
)))
135 down_read(&mm
->mmap_sem
);
138 * The above down_read_trylock() might have succeeded in which
139 * case, we'll have missed the might_sleep() from down_read().
142 if (IS_ENABLED(CONFIG_DEBUG_VM
)) {
143 if (!user_mode(regs
) &&
144 !search_exception_tables(instruction_pointer(regs
)))
149 vma
= find_vma(mm
, addr
);
154 if (vma
->vm_start
<= addr
)
157 if (unlikely(!(vma
->vm_flags
& VM_GROWSDOWN
)))
160 if (unlikely(expand_stack(vma
, addr
)))
164 * Ok, we have a good vm_area for this memory access, so
169 si_code
= SEGV_ACCERR
;
171 /* first do some preliminary protection checks */
172 if (entry
== ENTRY_PTE_NOT_PRESENT
) {
173 if (error_code
& ITYPE_mskINST
)
176 mask
= VM_READ
| VM_WRITE
;
178 } else if (entry
== ENTRY_TLB_MISC
) {
179 switch (error_code
& ITYPE_mskETYPE
) {
185 flags
|= FAULT_FLAG_WRITE
;
192 flags
|= FAULT_FLAG_WRITE
;
201 if (!(vma
->vm_flags
& mask
))
205 * If for any reason at all we couldn't handle the fault,
206 * make sure we exit gracefully rather than endlessly redo
210 fault
= handle_mm_fault(vma
, addr
, flags
);
213 * If we need to retry but a fatal signal is pending, handle the
214 * signal first. We do not need to release the mmap_sem because it
215 * would already be released in __lock_page_or_retry in mm/filemap.c.
217 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
)) {
218 if (!user_mode(regs
))
223 if (unlikely(fault
& VM_FAULT_ERROR
)) {
224 if (fault
& VM_FAULT_OOM
)
226 else if (fault
& VM_FAULT_SIGBUS
)
233 * Major/minor page fault accounting is only done on the initial
234 * attempt. If we go through a retry, it is extremely likely that the
235 * page will be found in page cache at that point.
237 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, addr
);
238 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
239 if (fault
& VM_FAULT_MAJOR
) {
241 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
,
245 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
,
248 if (fault
& VM_FAULT_RETRY
) {
249 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
250 flags
|= FAULT_FLAG_TRIED
;
252 /* No need to up_read(&mm->mmap_sem) as we would
253 * have already released it in __lock_page_or_retry
260 up_read(&mm
->mmap_sem
);
264 * Something tried to access memory that isn't in our memory map..
265 * Fix it, but check if it's kernel or user first..
268 up_read(&mm
->mmap_sem
);
270 bad_area_nosemaphore
:
272 /* User mode accesses just cause a SIGSEGV */
274 if (user_mode(regs
)) {
275 tsk
->thread
.address
= addr
;
276 tsk
->thread
.error_code
= error_code
;
277 tsk
->thread
.trap_no
= entry
;
278 force_sig_fault(SIGSEGV
, si_code
, (void __user
*)addr
);
284 /* Are we prepared to handle this kernel fault?
286 * (The kernel has valid exception-points in the source
287 * when it acesses user-memory. When it fails in one
288 * of those points, we find it in a table and do a jump
289 * to some fixup code that loads an appropriate error
294 const struct exception_table_entry
*entry
;
297 search_exception_tables(instruction_pointer(regs
))) !=
299 /* Adjust the instruction pointer in the stackframe */
300 instruction_pointer(regs
) = entry
->fixup
;
306 * Oops. The kernel tried to access some bad page. We'll have to
307 * terminate things with extreme prejudice.
311 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
312 (addr
< PAGE_SIZE
) ? "NULL pointer dereference" :
313 "paging request", addr
);
316 die("Oops", regs
, error_code
);
323 * We ran out of memory, or some other thing happened to us that made
324 * us unable to handle the page fault gracefully.
328 up_read(&mm
->mmap_sem
);
329 if (!user_mode(regs
))
331 pagefault_out_of_memory();
335 up_read(&mm
->mmap_sem
);
337 /* Kernel mode? Handle exceptions or die */
338 if (!user_mode(regs
))
344 tsk
->thread
.address
= addr
;
345 tsk
->thread
.error_code
= error_code
;
346 tsk
->thread
.trap_no
= entry
;
347 force_sig_fault(SIGBUS
, BUS_ADRERR
, (void __user
*)addr
);
354 * Synchronize this task's top level page-table
355 * with the 'reference' page table.
357 * Use current_pgd instead of tsk->active_mm->pgd
358 * since the latter might be unavailable if this
359 * code is executed in a misfortunately run irq
360 * (like inside schedule() between switch_mm and
364 unsigned int index
= pgd_index(addr
);
371 pgd
= (pgd_t
*) __va(__nds32__mfsr(NDS32_SR_L1_PPTB
)) + index
;
372 pgd_k
= init_mm
.pgd
+ index
;
374 if (!pgd_present(*pgd_k
))
377 p4d
= p4d_offset(pgd
, addr
);
378 p4d_k
= p4d_offset(pgd_k
, addr
);
379 if (!p4d_present(*p4d_k
))
382 pud
= pud_offset(p4d
, addr
);
383 pud_k
= pud_offset(p4d_k
, addr
);
384 if (!pud_present(*pud_k
))
387 pmd
= pmd_offset(pud
, addr
);
388 pmd_k
= pmd_offset(pud_k
, addr
);
389 if (!pmd_present(*pmd_k
))
392 if (!pmd_present(*pmd
))
393 set_pmd(pmd
, *pmd_k
);
395 BUG_ON(pmd_page(*pmd
) != pmd_page(*pmd_k
));
398 * Since the vmalloc area is global, we don't
399 * need to copy individual PTE's, it is enough to
400 * copy the pgd pointer into the pte page of the
401 * root task. If that is there, we'll find our pte if
405 /* Make sure the actual PTE exists as well to
406 * catch kernel vmalloc-area accesses to non-mapped
407 * addres. If we don't do this, this will just
408 * silently loop forever.
411 pte_k
= pte_offset_kernel(pmd_k
, addr
);
412 if (!pte_present(*pte_k
))