1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
17 #include <asm/pgalloc.h>
18 #include <asm/ptrace.h>
19 #include <asm/tlbflush.h>
21 #include "../kernel/head.h"
24 * This routine handles page faults. It determines the address and the
25 * problem, and then passes it off to one of the appropriate routines.
27 asmlinkage
void do_page_fault(struct pt_regs
*regs
)
29 struct task_struct
*tsk
;
30 struct vm_area_struct
*vma
;
32 unsigned long addr
, cause
;
33 unsigned int flags
= FAULT_FLAG_DEFAULT
;
34 int code
= SEGV_MAPERR
;
44 * Fault-in kernel-space virtual memory on-demand.
45 * The 'reference' page table is init_mm.pgd.
47 * NOTE! We MUST NOT take any locks for this case. We may
48 * be in an interrupt or a critical region, and should
49 * only copy the information from the master page table,
52 if (unlikely((addr
>= VMALLOC_START
) && (addr
<= VMALLOC_END
)))
55 /* Enable interrupts if they were enabled in the parent context. */
56 if (likely(regs
->status
& SR_PIE
))
60 * If we're in an interrupt, have no user context, or are running
61 * in an atomic region, then we must not take the fault.
63 if (unlikely(faulthandler_disabled() || !mm
))
67 flags
|= FAULT_FLAG_USER
;
69 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, addr
);
72 down_read(&mm
->mmap_sem
);
73 vma
= find_vma(mm
, addr
);
76 if (likely(vma
->vm_start
<= addr
))
78 if (unlikely(!(vma
->vm_flags
& VM_GROWSDOWN
)))
80 if (unlikely(expand_stack(vma
, addr
)))
84 * Ok, we have a good vm_area for this memory access, so
91 case EXC_INST_PAGE_FAULT
:
92 if (!(vma
->vm_flags
& VM_EXEC
))
95 case EXC_LOAD_PAGE_FAULT
:
96 if (!(vma
->vm_flags
& VM_READ
))
99 case EXC_STORE_PAGE_FAULT
:
100 if (!(vma
->vm_flags
& VM_WRITE
))
102 flags
|= FAULT_FLAG_WRITE
;
105 panic("%s: unhandled cause %lu", __func__
, cause
);
109 * If for any reason at all we could not handle the fault,
110 * make sure we exit gracefully rather than endlessly redo
113 fault
= handle_mm_fault(vma
, addr
, flags
);
116 * If we need to retry but a fatal signal is pending, handle the
117 * signal first. We do not need to release the mmap_sem because it
118 * would already be released in __lock_page_or_retry in mm/filemap.c.
120 if (fault_signal_pending(fault
, regs
))
123 if (unlikely(fault
& VM_FAULT_ERROR
)) {
124 if (fault
& VM_FAULT_OOM
)
126 else if (fault
& VM_FAULT_SIGBUS
)
132 * Major/minor page fault accounting is only done on the
133 * initial attempt. If we go through a retry, it is extremely
134 * likely that the page will be found in page cache at that point.
136 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
137 if (fault
& VM_FAULT_MAJOR
) {
139 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
,
143 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
,
146 if (fault
& VM_FAULT_RETRY
) {
147 flags
|= FAULT_FLAG_TRIED
;
150 * No need to up_read(&mm->mmap_sem) as we would
151 * have already released it in __lock_page_or_retry
158 up_read(&mm
->mmap_sem
);
162 * Something tried to access memory that isn't in our memory map.
163 * Fix it, but check if it's kernel or user first.
166 up_read(&mm
->mmap_sem
);
167 /* User mode accesses just cause a SIGSEGV */
168 if (user_mode(regs
)) {
169 do_trap(regs
, SIGSEGV
, code
, addr
);
174 /* Are we prepared to handle this kernel fault? */
175 if (fixup_exception(regs
))
179 * Oops. The kernel tried to access some bad page. We'll have to
180 * terminate things with extreme prejudice.
183 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT
"\n",
184 (addr
< PAGE_SIZE
) ? "NULL pointer dereference" :
185 "paging request", addr
);
190 * We ran out of memory, call the OOM killer, and return the userspace
191 * (which will retry the fault, or kill us if we got oom-killed).
194 up_read(&mm
->mmap_sem
);
195 if (!user_mode(regs
))
197 pagefault_out_of_memory();
201 up_read(&mm
->mmap_sem
);
202 /* Kernel mode? Handle exceptions or die */
203 if (!user_mode(regs
))
205 do_trap(regs
, SIGBUS
, BUS_ADRERR
, addr
);
217 /* User mode accesses just cause a SIGSEGV */
219 return do_trap(regs
, SIGSEGV
, code
, addr
);
222 * Synchronize this task's top level page-table
223 * with the 'reference' page table.
225 * Do _not_ use "tsk->active_mm->pgd" here.
226 * We might be inside an interrupt in the middle
229 index
= pgd_index(addr
);
230 pgd
= (pgd_t
*)pfn_to_virt(csr_read(CSR_SATP
)) + index
;
231 pgd_k
= init_mm
.pgd
+ index
;
233 if (!pgd_present(*pgd_k
))
235 set_pgd(pgd
, *pgd_k
);
237 p4d
= p4d_offset(pgd
, addr
);
238 p4d_k
= p4d_offset(pgd_k
, addr
);
239 if (!p4d_present(*p4d_k
))
242 pud
= pud_offset(p4d
, addr
);
243 pud_k
= pud_offset(p4d_k
, addr
);
244 if (!pud_present(*pud_k
))
248 * Since the vmalloc area is global, it is unnecessary
249 * to copy individual PTEs
251 pmd
= pmd_offset(pud
, addr
);
252 pmd_k
= pmd_offset(pud_k
, addr
);
253 if (!pmd_present(*pmd_k
))
255 set_pmd(pmd
, *pmd_k
);
258 * Make sure the actual PTE exists as well to
259 * catch kernel vmalloc-area accesses to non-mapped
260 * addresses. If we don't do this, this will just
261 * silently loop forever.
263 pte_k
= pte_offset_kernel(pmd_k
, addr
);
264 if (!pte_present(*pte_k
))
268 * The kernel assumes that TLBs don't cache invalid
269 * entries, but in RISC-V, SFENCE.VMA specifies an
270 * ordering constraint, not a cache flush; it is
271 * necessary even after writing invalid entries.
273 local_flush_tlb_page(addr
);