1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/extable.h>
5 #include <linux/module.h>
6 #include <linux/signal.h>
7 #include <linux/ptrace.h>
9 #include <linux/init.h>
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/perf_event.h>
14 #include <asm/pgtable.h>
15 #include <asm/tlbflush.h>
17 extern void die(const char *str
, struct pt_regs
*regs
, long err
);
20 * This is useful to dump out the page tables associated with
23 void show_pte(struct mm_struct
*mm
, unsigned long addr
)
29 pr_alert("pgd = %p\n", mm
->pgd
);
30 pgd
= pgd_offset(mm
, addr
);
31 pr_alert("[%08lx] *pgd=%08lx", addr
, pgd_val(*pgd
));
44 pmd
= pmd_offset(pgd
, addr
);
46 pr_alert(", *pmd=%08lx", pmd_val(*pmd
));
57 if (IS_ENABLED(CONFIG_HIGHMEM
))
60 /* We must not map this if we have highmem enabled */
61 pte
= pte_offset_map(pmd
, addr
);
62 pr_alert(", *pte=%08lx", pte_val(*pte
));
70 void do_page_fault(unsigned long entry
, unsigned long addr
,
71 unsigned int error_code
, struct pt_regs
*regs
)
73 struct task_struct
*tsk
;
75 struct vm_area_struct
*vma
;
78 unsigned int mask
= VM_READ
| VM_WRITE
| VM_EXEC
;
79 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
81 error_code
= error_code
& (ITYPE_mskINST
| ITYPE_mskETYPE
);
84 si_code
= SEGV_MAPERR
;
86 * We fault-in kernel-space virtual memory on-demand. The
87 * 'reference' page table is init_mm.pgd.
89 * NOTE! We MUST NOT take any locks for this case. We may
90 * be in an interrupt or a critical region, and should
91 * only copy the information from the master page table,
94 if (addr
>= TASK_SIZE
) {
96 goto bad_area_nosemaphore
;
98 if (addr
>= TASK_SIZE
&& addr
< VMALLOC_END
99 && (entry
== ENTRY_PTE_NOT_PRESENT
))
105 /* Send a signal to the task for handling the unalignment access. */
106 if (entry
== ENTRY_GENERAL_EXCPETION
107 && error_code
== ETYPE_ALIGNMENT_CHECK
) {
109 goto bad_area_nosemaphore
;
115 * If we're in an interrupt or have no user
116 * context, we must not take the fault..
118 if (unlikely(faulthandler_disabled() || !mm
))
122 * As per x86, we may deadlock here. However, since the kernel only
123 * validly references user space from well defined areas of the code,
124 * we can bug out early if this is from code which shouldn't.
126 if (unlikely(!down_read_trylock(&mm
->mmap_sem
))) {
127 if (!user_mode(regs
) &&
128 !search_exception_tables(instruction_pointer(regs
)))
131 down_read(&mm
->mmap_sem
);
134 * The above down_read_trylock() might have succeeded in which
135 * case, we'll have missed the might_sleep() from down_read().
138 if (IS_ENABLED(CONFIG_DEBUG_VM
)) {
139 if (!user_mode(regs
) &&
140 !search_exception_tables(instruction_pointer(regs
)))
145 vma
= find_vma(mm
, addr
);
150 if (vma
->vm_start
<= addr
)
153 if (unlikely(!(vma
->vm_flags
& VM_GROWSDOWN
)))
156 if (unlikely(expand_stack(vma
, addr
)))
160 * Ok, we have a good vm_area for this memory access, so
165 si_code
= SEGV_ACCERR
;
167 /* first do some preliminary protection checks */
168 if (entry
== ENTRY_PTE_NOT_PRESENT
) {
169 if (error_code
& ITYPE_mskINST
)
172 mask
= VM_READ
| VM_WRITE
;
174 } else if (entry
== ENTRY_TLB_MISC
) {
175 switch (error_code
& ITYPE_mskETYPE
) {
181 flags
|= FAULT_FLAG_WRITE
;
188 flags
|= FAULT_FLAG_WRITE
;
197 if (!(vma
->vm_flags
& mask
))
201 * If for any reason at all we couldn't handle the fault,
202 * make sure we exit gracefully rather than endlessly redo
206 fault
= handle_mm_fault(vma
, addr
, flags
);
209 * If we need to retry but a fatal signal is pending, handle the
210 * signal first. We do not need to release the mmap_sem because it
211 * would already be released in __lock_page_or_retry in mm/filemap.c.
213 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
)) {
214 if (!user_mode(regs
))
219 if (unlikely(fault
& VM_FAULT_ERROR
)) {
220 if (fault
& VM_FAULT_OOM
)
222 else if (fault
& VM_FAULT_SIGBUS
)
229 * Major/minor page fault accounting is only done on the initial
230 * attempt. If we go through a retry, it is extremely likely that the
231 * page will be found in page cache at that point.
233 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, addr
);
234 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
235 if (fault
& VM_FAULT_MAJOR
) {
237 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
,
241 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
,
244 if (fault
& VM_FAULT_RETRY
) {
245 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
246 flags
|= FAULT_FLAG_TRIED
;
248 /* No need to up_read(&mm->mmap_sem) as we would
249 * have already released it in __lock_page_or_retry
256 up_read(&mm
->mmap_sem
);
260 * Something tried to access memory that isn't in our memory map..
261 * Fix it, but check if it's kernel or user first..
264 up_read(&mm
->mmap_sem
);
266 bad_area_nosemaphore
:
268 /* User mode accesses just cause a SIGSEGV */
270 if (user_mode(regs
)) {
271 tsk
->thread
.address
= addr
;
272 tsk
->thread
.error_code
= error_code
;
273 tsk
->thread
.trap_no
= entry
;
274 force_sig_fault(SIGSEGV
, si_code
, (void __user
*)addr
, tsk
);
280 /* Are we prepared to handle this kernel fault?
282 * (The kernel has valid exception-points in the source
283 * when it acesses user-memory. When it fails in one
284 * of those points, we find it in a table and do a jump
285 * to some fixup code that loads an appropriate error
290 const struct exception_table_entry
*entry
;
293 search_exception_tables(instruction_pointer(regs
))) !=
295 /* Adjust the instruction pointer in the stackframe */
296 instruction_pointer(regs
) = entry
->fixup
;
302 * Oops. The kernel tried to access some bad page. We'll have to
303 * terminate things with extreme prejudice.
307 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
308 (addr
< PAGE_SIZE
) ? "NULL pointer dereference" :
309 "paging request", addr
);
312 die("Oops", regs
, error_code
);
319 * We ran out of memory, or some other thing happened to us that made
320 * us unable to handle the page fault gracefully.
324 up_read(&mm
->mmap_sem
);
325 if (!user_mode(regs
))
327 pagefault_out_of_memory();
331 up_read(&mm
->mmap_sem
);
333 /* Kernel mode? Handle exceptions or die */
334 if (!user_mode(regs
))
340 tsk
->thread
.address
= addr
;
341 tsk
->thread
.error_code
= error_code
;
342 tsk
->thread
.trap_no
= entry
;
343 force_sig_fault(SIGBUS
, BUS_ADRERR
, (void __user
*)addr
, tsk
);
350 * Synchronize this task's top level page-table
351 * with the 'reference' page table.
353 * Use current_pgd instead of tsk->active_mm->pgd
354 * since the latter might be unavailable if this
355 * code is executed in a misfortunately run irq
356 * (like inside schedule() between switch_mm and
360 unsigned int index
= pgd_index(addr
);
366 pgd
= (pgd_t
*) __va(__nds32__mfsr(NDS32_SR_L1_PPTB
)) + index
;
367 pgd_k
= init_mm
.pgd
+ index
;
369 if (!pgd_present(*pgd_k
))
372 pud
= pud_offset(pgd
, addr
);
373 pud_k
= pud_offset(pgd_k
, addr
);
374 if (!pud_present(*pud_k
))
377 pmd
= pmd_offset(pud
, addr
);
378 pmd_k
= pmd_offset(pud_k
, addr
);
379 if (!pmd_present(*pmd_k
))
382 if (!pmd_present(*pmd
))
383 set_pmd(pmd
, *pmd_k
);
385 BUG_ON(pmd_page(*pmd
) != pmd_page(*pmd_k
));
388 * Since the vmalloc area is global, we don't
389 * need to copy individual PTE's, it is enough to
390 * copy the pgd pointer into the pte page of the
391 * root task. If that is there, we'll find our pte if
395 /* Make sure the actual PTE exists as well to
396 * catch kernel vmalloc-area accesses to non-mapped
397 * addres. If we don't do this, this will just
398 * silently loop forever.
401 pte_k
= pte_offset_kernel(pmd_k
, addr
);
402 if (!pte_present(*pte_k
))