1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/extable.h>
5 #include <linux/module.h>
6 #include <linux/signal.h>
7 #include <linux/ptrace.h>
9 #include <linux/init.h>
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
13 #include <asm/pgtable.h>
14 #include <asm/tlbflush.h>
16 extern void die(const char *str
, struct pt_regs
*regs
, long err
);
19 * This is useful to dump out the page tables associated with
22 void show_pte(struct mm_struct
*mm
, unsigned long addr
)
28 pr_alert("pgd = %p\n", mm
->pgd
);
29 pgd
= pgd_offset(mm
, addr
);
30 pr_alert("[%08lx] *pgd=%08lx", addr
, pgd_val(*pgd
));
43 pmd
= pmd_offset(pgd
, addr
);
45 pr_alert(", *pmd=%08lx", pmd_val(*pmd
));
56 if (IS_ENABLED(CONFIG_HIGHMEM
))
59 /* We must not map this if we have highmem enabled */
60 pte
= pte_offset_map(pmd
, addr
);
61 pr_alert(", *pte=%08lx", pte_val(*pte
));
69 void do_page_fault(unsigned long entry
, unsigned long addr
,
70 unsigned int error_code
, struct pt_regs
*regs
)
72 struct task_struct
*tsk
;
74 struct vm_area_struct
*vma
;
77 unsigned int mask
= VM_READ
| VM_WRITE
| VM_EXEC
;
78 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
80 error_code
= error_code
& (ITYPE_mskINST
| ITYPE_mskETYPE
);
83 info
.si_code
= SEGV_MAPERR
;
85 * We fault-in kernel-space virtual memory on-demand. The
86 * 'reference' page table is init_mm.pgd.
88 * NOTE! We MUST NOT take any locks for this case. We may
89 * be in an interrupt or a critical region, and should
90 * only copy the information from the master page table,
93 if (addr
>= TASK_SIZE
) {
95 goto bad_area_nosemaphore
;
97 if (addr
>= TASK_SIZE
&& addr
< VMALLOC_END
98 && (entry
== ENTRY_PTE_NOT_PRESENT
))
104 /* Send a signal to the task for handling the unalignment access. */
105 if (entry
== ENTRY_GENERAL_EXCPETION
106 && error_code
== ETYPE_ALIGNMENT_CHECK
) {
108 goto bad_area_nosemaphore
;
114 * If we're in an interrupt or have no user
115 * context, we must not take the fault..
117 if (unlikely(faulthandler_disabled() || !mm
))
121 * As per x86, we may deadlock here. However, since the kernel only
122 * validly references user space from well defined areas of the code,
123 * we can bug out early if this is from code which shouldn't.
125 if (unlikely(!down_read_trylock(&mm
->mmap_sem
))) {
126 if (!user_mode(regs
) &&
127 !search_exception_tables(instruction_pointer(regs
)))
130 down_read(&mm
->mmap_sem
);
133 * The above down_read_trylock() might have succeeded in which
134 * case, we'll have missed the might_sleep() from down_read().
137 if (IS_ENABLED(CONFIG_DEBUG_VM
)) {
138 if (!user_mode(regs
) &&
139 !search_exception_tables(instruction_pointer(regs
)))
144 vma
= find_vma(mm
, addr
);
149 if (vma
->vm_start
<= addr
)
152 if (unlikely(!(vma
->vm_flags
& VM_GROWSDOWN
)))
155 if (unlikely(expand_stack(vma
, addr
)))
159 * Ok, we have a good vm_area for this memory access, so
164 info
.si_code
= SEGV_ACCERR
;
166 /* first do some preliminary protection checks */
167 if (entry
== ENTRY_PTE_NOT_PRESENT
) {
168 if (error_code
& ITYPE_mskINST
)
171 mask
= VM_READ
| VM_WRITE
;
172 if (vma
->vm_flags
& VM_WRITE
)
173 flags
|= FAULT_FLAG_WRITE
;
175 } else if (entry
== ENTRY_TLB_MISC
) {
176 switch (error_code
& ITYPE_mskETYPE
) {
182 flags
|= FAULT_FLAG_WRITE
;
189 flags
|= FAULT_FLAG_WRITE
;
198 if (!(vma
->vm_flags
& mask
))
202 * If for any reason at all we couldn't handle the fault,
203 * make sure we exit gracefully rather than endlessly redo
207 fault
= handle_mm_fault(vma
, addr
, flags
);
210 * If we need to retry but a fatal signal is pending, handle the
211 * signal first. We do not need to release the mmap_sem because it
212 * would already be released in __lock_page_or_retry in mm/filemap.c.
214 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
)) {
215 if (!user_mode(regs
))
220 if (unlikely(fault
& VM_FAULT_ERROR
)) {
221 if (fault
& VM_FAULT_OOM
)
223 else if (fault
& VM_FAULT_SIGBUS
)
230 * Major/minor page fault accounting is only done on the initial
231 * attempt. If we go through a retry, it is extremely likely that the
232 * page will be found in page cache at that point.
234 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
235 if (fault
& VM_FAULT_MAJOR
)
239 if (fault
& VM_FAULT_RETRY
) {
240 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
241 flags
|= FAULT_FLAG_TRIED
;
243 /* No need to up_read(&mm->mmap_sem) as we would
244 * have already released it in __lock_page_or_retry
251 up_read(&mm
->mmap_sem
);
255 * Something tried to access memory that isn't in our memory map..
256 * Fix it, but check if it's kernel or user first..
259 up_read(&mm
->mmap_sem
);
261 bad_area_nosemaphore
:
263 /* User mode accesses just cause a SIGSEGV */
265 if (user_mode(regs
)) {
266 tsk
->thread
.address
= addr
;
267 tsk
->thread
.error_code
= error_code
;
268 tsk
->thread
.trap_no
= entry
;
269 info
.si_signo
= SIGSEGV
;
271 /* info.si_code has been set above */
272 info
.si_addr
= (void *)addr
;
273 force_sig_info(SIGSEGV
, &info
, tsk
);
279 /* Are we prepared to handle this kernel fault?
281 * (The kernel has valid exception-points in the source
282 * when it acesses user-memory. When it fails in one
283 * of those points, we find it in a table and do a jump
284 * to some fixup code that loads an appropriate error
289 const struct exception_table_entry
*entry
;
292 search_exception_tables(instruction_pointer(regs
))) !=
294 /* Adjust the instruction pointer in the stackframe */
295 instruction_pointer(regs
) = entry
->fixup
;
301 * Oops. The kernel tried to access some bad page. We'll have to
302 * terminate things with extreme prejudice.
306 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
307 (addr
< PAGE_SIZE
) ? "NULL pointer dereference" :
308 "paging request", addr
);
311 die("Oops", regs
, error_code
);
318 * We ran out of memory, or some other thing happened to us that made
319 * us unable to handle the page fault gracefully.
323 up_read(&mm
->mmap_sem
);
324 if (!user_mode(regs
))
326 pagefault_out_of_memory();
330 up_read(&mm
->mmap_sem
);
332 /* Kernel mode? Handle exceptions or die */
333 if (!user_mode(regs
))
339 tsk
->thread
.address
= addr
;
340 tsk
->thread
.error_code
= error_code
;
341 tsk
->thread
.trap_no
= entry
;
342 info
.si_signo
= SIGBUS
;
344 info
.si_code
= BUS_ADRERR
;
345 info
.si_addr
= (void *)addr
;
346 force_sig_info(SIGBUS
, &info
, tsk
);
353 * Synchronize this task's top level page-table
354 * with the 'reference' page table.
356 * Use current_pgd instead of tsk->active_mm->pgd
357 * since the latter might be unavailable if this
358 * code is executed in a misfortunately run irq
359 * (like inside schedule() between switch_mm and
363 unsigned int index
= pgd_index(addr
);
369 pgd
= (pgd_t
*) __va(__nds32__mfsr(NDS32_SR_L1_PPTB
)) + index
;
370 pgd_k
= init_mm
.pgd
+ index
;
372 if (!pgd_present(*pgd_k
))
375 pud
= pud_offset(pgd
, addr
);
376 pud_k
= pud_offset(pgd_k
, addr
);
377 if (!pud_present(*pud_k
))
380 pmd
= pmd_offset(pud
, addr
);
381 pmd_k
= pmd_offset(pud_k
, addr
);
382 if (!pmd_present(*pmd_k
))
385 if (!pmd_present(*pmd
))
386 set_pmd(pmd
, *pmd_k
);
388 BUG_ON(pmd_page(*pmd
) != pmd_page(*pmd_k
));
391 * Since the vmalloc area is global, we don't
392 * need to copy individual PTE's, it is enough to
393 * copy the pgd pointer into the pte page of the
394 * root task. If that is there, we'll find our pte if
398 /* Make sure the actual PTE exists as well to
399 * catch kernel vmalloc-area accesses to non-mapped
400 * addres. If we don't do this, this will just
401 * silently loop forever.
404 pte_k
= pte_offset_kernel(pmd_k
, addr
);
405 if (!pte_present(*pte_k
))