1 // SPDX-License-Identifier: GPL-2.0-only
3 * Memory fault handling for Hexagon
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
9 * Page fault handling for the Hexagon Virtual Machine.
10 * Can also be called by a native port emulating the HVM
14 #include <asm/traps.h>
15 #include <linux/uaccess.h>
17 #include <linux/sched/signal.h>
18 #include <linux/signal.h>
19 #include <linux/extable.h>
20 #include <linux/hardirq.h>
21 #include <linux/perf_event.h>
24 * Decode of hardware exception sends us to one of several
25 * entry points. At each, we generate canonical arguments
26 * for handling by the abstract memory management code.
34 * Canonical page fault handler
36 void do_page_fault(unsigned long address
, long cause
, struct pt_regs
*regs
)
38 struct vm_area_struct
*vma
;
39 struct mm_struct
*mm
= current
->mm
;
41 int si_code
= SEGV_MAPERR
;
43 const struct exception_table_entry
*fixup
;
44 unsigned int flags
= FAULT_FLAG_DEFAULT
;
47 * If we're in an interrupt or have no user context,
48 * then must not take the fault.
50 if (unlikely(in_interrupt() || !mm
))
56 flags
|= FAULT_FLAG_USER
;
58 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
61 vma
= find_vma(mm
, address
);
65 if (vma
->vm_start
<= address
)
68 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
71 if (expand_stack(vma
, address
))
75 /* Address space is OK. Now check access rights. */
76 si_code
= SEGV_ACCERR
;
80 if (!(vma
->vm_flags
& VM_EXEC
))
84 if (!(vma
->vm_flags
& VM_READ
))
88 if (!(vma
->vm_flags
& VM_WRITE
))
90 flags
|= FAULT_FLAG_WRITE
;
94 fault
= handle_mm_fault(vma
, address
, flags
, regs
);
96 if (fault_signal_pending(fault
, regs
))
99 /* The most common case -- we are done. */
100 if (likely(!(fault
& VM_FAULT_ERROR
))) {
101 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
102 if (fault
& VM_FAULT_RETRY
) {
103 flags
|= FAULT_FLAG_TRIED
;
108 mmap_read_unlock(mm
);
112 mmap_read_unlock(mm
);
114 /* Handle copyin/out exception cases */
115 if (!user_mode(regs
))
118 if (fault
& VM_FAULT_OOM
) {
119 pagefault_out_of_memory();
123 /* User-mode address is in the memory map, but we are
124 * unable to fix up the page fault.
126 if (fault
& VM_FAULT_SIGBUS
) {
128 si_code
= BUS_ADRERR
;
130 /* Address is not in the memory map */
133 si_code
= SEGV_ACCERR
;
135 force_sig_fault(si_signo
, si_code
, (void __user
*)address
);
139 mmap_read_unlock(mm
);
141 if (user_mode(regs
)) {
142 force_sig_fault(SIGSEGV
, si_code
, (void __user
*)address
);
145 /* Kernel-mode fault falls through */
148 fixup
= search_exception_tables(pt_elr(regs
));
150 pt_set_elr(regs
, fixup
->fixup
);
154 /* Things are looking very, very bad now */
156 printk(KERN_EMERG
"Unable to handle kernel paging request at "
157 "virtual address 0x%08lx, regs %p\n", address
, regs
);
158 die("Bad Kernel VA", regs
, SIGKILL
);
162 void read_protection_fault(struct pt_regs
*regs
)
164 unsigned long badvadr
= pt_badva(regs
);
166 do_page_fault(badvadr
, FLT_LOAD
, regs
);
169 void write_protection_fault(struct pt_regs
*regs
)
171 unsigned long badvadr
= pt_badva(regs
);
173 do_page_fault(badvadr
, FLT_STORE
, regs
);
176 void execute_protection_fault(struct pt_regs
*regs
)
178 unsigned long badvadr
= pt_badva(regs
);
180 do_page_fault(badvadr
, FLT_IFETCH
, regs
);