1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
17 #include <asm/ptrace.h>
18 #include <asm/tlbflush.h>
20 #include "../kernel/head.h"
22 static inline void no_context(struct pt_regs
*regs
, unsigned long addr
)
24 /* Are we prepared to handle this kernel fault? */
25 if (fixup_exception(regs
))
29 * Oops. The kernel tried to access some bad page. We'll have to
30 * terminate things with extreme prejudice.
33 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT
"\n",
34 (addr
< PAGE_SIZE
) ? "NULL pointer dereference" :
35 "paging request", addr
);
40 static inline void mm_fault_error(struct pt_regs
*regs
, unsigned long addr
, vm_fault_t fault
)
42 if (fault
& VM_FAULT_OOM
) {
44 * We ran out of memory, call the OOM killer, and return the userspace
45 * (which will retry the fault, or kill us if we got oom-killed).
47 if (!user_mode(regs
)) {
48 no_context(regs
, addr
);
51 pagefault_out_of_memory();
53 } else if (fault
& VM_FAULT_SIGBUS
) {
54 /* Kernel mode? Handle exceptions or die */
55 if (!user_mode(regs
)) {
56 no_context(regs
, addr
);
59 do_trap(regs
, SIGBUS
, BUS_ADRERR
, addr
);
65 static inline void bad_area(struct pt_regs
*regs
, struct mm_struct
*mm
, int code
, unsigned long addr
)
68 * Something tried to access memory that isn't in our memory map.
69 * Fix it, but check if it's kernel or user first.
72 /* User mode accesses just cause a SIGSEGV */
73 if (user_mode(regs
)) {
74 do_trap(regs
, SIGSEGV
, code
, addr
);
78 no_context(regs
, addr
);
81 static inline void vmalloc_fault(struct pt_regs
*regs
, int code
, unsigned long addr
)
91 /* User mode accesses just cause a SIGSEGV */
93 return do_trap(regs
, SIGSEGV
, code
, addr
);
96 * Synchronize this task's top level page-table
97 * with the 'reference' page table.
99 * Do _not_ use "tsk->active_mm->pgd" here.
100 * We might be inside an interrupt in the middle
103 index
= pgd_index(addr
);
104 pfn
= csr_read(CSR_SATP
) & SATP_PPN
;
105 pgd
= (pgd_t
*)pfn_to_virt(pfn
) + index
;
106 pgd_k
= init_mm
.pgd
+ index
;
108 if (!pgd_present(*pgd_k
)) {
109 no_context(regs
, addr
);
112 set_pgd(pgd
, *pgd_k
);
114 p4d
= p4d_offset(pgd
, addr
);
115 p4d_k
= p4d_offset(pgd_k
, addr
);
116 if (!p4d_present(*p4d_k
)) {
117 no_context(regs
, addr
);
121 pud
= pud_offset(p4d
, addr
);
122 pud_k
= pud_offset(p4d_k
, addr
);
123 if (!pud_present(*pud_k
)) {
124 no_context(regs
, addr
);
129 * Since the vmalloc area is global, it is unnecessary
130 * to copy individual PTEs
132 pmd
= pmd_offset(pud
, addr
);
133 pmd_k
= pmd_offset(pud_k
, addr
);
134 if (!pmd_present(*pmd_k
)) {
135 no_context(regs
, addr
);
138 set_pmd(pmd
, *pmd_k
);
141 * Make sure the actual PTE exists as well to
142 * catch kernel vmalloc-area accesses to non-mapped
143 * addresses. If we don't do this, this will just
144 * silently loop forever.
146 pte_k
= pte_offset_kernel(pmd_k
, addr
);
147 if (!pte_present(*pte_k
)) {
148 no_context(regs
, addr
);
153 * The kernel assumes that TLBs don't cache invalid
154 * entries, but in RISC-V, SFENCE.VMA specifies an
155 * ordering constraint, not a cache flush; it is
156 * necessary even after writing invalid entries.
158 local_flush_tlb_page(addr
);
161 static inline bool access_error(unsigned long cause
, struct vm_area_struct
*vma
)
164 case EXC_INST_PAGE_FAULT
:
165 if (!(vma
->vm_flags
& VM_EXEC
)) {
169 case EXC_LOAD_PAGE_FAULT
:
170 if (!(vma
->vm_flags
& VM_READ
)) {
174 case EXC_STORE_PAGE_FAULT
:
175 if (!(vma
->vm_flags
& VM_WRITE
)) {
180 panic("%s: unhandled cause %lu", __func__
, cause
);
186 * This routine handles page faults. It determines the address and the
187 * problem, and then passes it off to one of the appropriate routines.
189 asmlinkage
void do_page_fault(struct pt_regs
*regs
)
191 struct task_struct
*tsk
;
192 struct vm_area_struct
*vma
;
193 struct mm_struct
*mm
;
194 unsigned long addr
, cause
;
195 unsigned int flags
= FAULT_FLAG_DEFAULT
;
196 int code
= SEGV_MAPERR
;
200 addr
= regs
->badaddr
;
206 * Fault-in kernel-space virtual memory on-demand.
207 * The 'reference' page table is init_mm.pgd.
209 * NOTE! We MUST NOT take any locks for this case. We may
210 * be in an interrupt or a critical region, and should
211 * only copy the information from the master page table,
214 if (unlikely((addr
>= VMALLOC_START
) && (addr
<= VMALLOC_END
))) {
215 vmalloc_fault(regs
, code
, addr
);
219 /* Enable interrupts if they were enabled in the parent context. */
220 if (likely(regs
->status
& SR_PIE
))
224 * If we're in an interrupt, have no user context, or are running
225 * in an atomic region, then we must not take the fault.
227 if (unlikely(faulthandler_disabled() || !mm
)) {
228 no_context(regs
, addr
);
233 flags
|= FAULT_FLAG_USER
;
235 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, addr
);
237 if (cause
== EXC_STORE_PAGE_FAULT
)
238 flags
|= FAULT_FLAG_WRITE
;
239 else if (cause
== EXC_INST_PAGE_FAULT
)
240 flags
|= FAULT_FLAG_INSTRUCTION
;
243 vma
= find_vma(mm
, addr
);
244 if (unlikely(!vma
)) {
245 bad_area(regs
, mm
, code
, addr
);
248 if (likely(vma
->vm_start
<= addr
))
250 if (unlikely(!(vma
->vm_flags
& VM_GROWSDOWN
))) {
251 bad_area(regs
, mm
, code
, addr
);
254 if (unlikely(expand_stack(vma
, addr
))) {
255 bad_area(regs
, mm
, code
, addr
);
260 * Ok, we have a good vm_area for this memory access, so
266 if (unlikely(access_error(cause
, vma
))) {
267 bad_area(regs
, mm
, code
, addr
);
272 * If for any reason at all we could not handle the fault,
273 * make sure we exit gracefully rather than endlessly redo
276 fault
= handle_mm_fault(vma
, addr
, flags
, regs
);
279 * If we need to retry but a fatal signal is pending, handle the
280 * signal first. We do not need to release the mmap_lock because it
281 * would already be released in __lock_page_or_retry in mm/filemap.c.
283 if (fault_signal_pending(fault
, regs
))
286 if (unlikely((fault
& VM_FAULT_RETRY
) && (flags
& FAULT_FLAG_ALLOW_RETRY
))) {
287 flags
|= FAULT_FLAG_TRIED
;
290 * No need to mmap_read_unlock(mm) as we would
291 * have already released it in __lock_page_or_retry
297 mmap_read_unlock(mm
);
299 if (unlikely(fault
& VM_FAULT_ERROR
)) {
300 mm_fault_error(regs
, addr
, fault
);