1 // SPDX-License-Identifier: GPL-2.0-only
3 * Memory fault handling for Hexagon
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
9 * Page fault handling for the Hexagon Virtual Machine.
10 * Can also be called by a native port emulating the HVM
14 #include <asm/pgtable.h>
15 #include <asm/traps.h>
16 #include <linux/uaccess.h>
18 #include <linux/sched/signal.h>
19 #include <linux/signal.h>
20 #include <linux/extable.h>
21 #include <linux/hardirq.h>
24 * Decode of hardware exception sends us to one of several
25 * entry points. At each, we generate canonical arguments
26 * for handling by the abstract memory management code.
34 * Canonical page fault handler
36 void do_page_fault(unsigned long address
, long cause
, struct pt_regs
*regs
)
38 struct vm_area_struct
*vma
;
39 struct mm_struct
*mm
= current
->mm
;
41 int si_code
= SEGV_MAPERR
;
43 const struct exception_table_entry
*fixup
;
44 unsigned int flags
= FAULT_FLAG_DEFAULT
;
47 * If we're in an interrupt or have no user context,
48 * then must not take the fault.
50 if (unlikely(in_interrupt() || !mm
))
56 flags
|= FAULT_FLAG_USER
;
58 down_read(&mm
->mmap_sem
);
59 vma
= find_vma(mm
, address
);
63 if (vma
->vm_start
<= address
)
66 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
69 if (expand_stack(vma
, address
))
73 /* Address space is OK. Now check access rights. */
74 si_code
= SEGV_ACCERR
;
78 if (!(vma
->vm_flags
& VM_EXEC
))
82 if (!(vma
->vm_flags
& VM_READ
))
86 if (!(vma
->vm_flags
& VM_WRITE
))
88 flags
|= FAULT_FLAG_WRITE
;
92 fault
= handle_mm_fault(vma
, address
, flags
);
94 if (fault_signal_pending(fault
, regs
))
97 /* The most common case -- we are done. */
98 if (likely(!(fault
& VM_FAULT_ERROR
))) {
99 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
100 if (fault
& VM_FAULT_MAJOR
)
104 if (fault
& VM_FAULT_RETRY
) {
105 flags
|= FAULT_FLAG_TRIED
;
110 up_read(&mm
->mmap_sem
);
114 up_read(&mm
->mmap_sem
);
116 /* Handle copyin/out exception cases */
117 if (!user_mode(regs
))
120 if (fault
& VM_FAULT_OOM
) {
121 pagefault_out_of_memory();
125 /* User-mode address is in the memory map, but we are
126 * unable to fix up the page fault.
128 if (fault
& VM_FAULT_SIGBUS
) {
130 si_code
= BUS_ADRERR
;
132 /* Address is not in the memory map */
135 si_code
= SEGV_ACCERR
;
137 force_sig_fault(si_signo
, si_code
, (void __user
*)address
);
141 up_read(&mm
->mmap_sem
);
143 if (user_mode(regs
)) {
144 force_sig_fault(SIGSEGV
, si_code
, (void __user
*)address
);
147 /* Kernel-mode fault falls through */
150 fixup
= search_exception_tables(pt_elr(regs
));
152 pt_set_elr(regs
, fixup
->fixup
);
156 /* Things are looking very, very bad now */
158 printk(KERN_EMERG
"Unable to handle kernel paging request at "
159 "virtual address 0x%08lx, regs %p\n", address
, regs
);
160 die("Bad Kernel VA", regs
, SIGKILL
);
164 void read_protection_fault(struct pt_regs
*regs
)
166 unsigned long badvadr
= pt_badva(regs
);
168 do_page_fault(badvadr
, FLT_LOAD
, regs
);
171 void write_protection_fault(struct pt_regs
*regs
)
173 unsigned long badvadr
= pt_badva(regs
);
175 do_page_fault(badvadr
, FLT_STORE
, regs
);
178 void execute_protection_fault(struct pt_regs
*regs
)
180 unsigned long badvadr
= pt_badva(regs
);
182 do_page_fault(badvadr
, FLT_IFETCH
, regs
);