1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/m68k/mm/fault.c
5 * Copyright (C) 1995 Hamish Macdonald
8 #include <linux/mman.h>
10 #include <linux/kernel.h>
11 #include <linux/ptrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
16 #include <asm/setup.h>
17 #include <asm/traps.h>
18 #include <asm/pgalloc.h>
20 extern void die_if_kernel(char *, struct pt_regs
*, long);
22 int send_fault_sig(struct pt_regs
*regs
)
27 signo
= current
->thread
.signo
;
28 si_code
= current
->thread
.code
;
29 addr
= (void __user
*)current
->thread
.faddr
;
30 pr_debug("send_fault_sig: %p,%d,%d\n", addr
, signo
, si_code
);
32 if (user_mode(regs
)) {
33 force_sig_fault(signo
, si_code
, addr
);
35 if (fixup_exception(regs
))
38 //if (signo == SIGBUS)
39 // force_sig_fault(si_signo, si_code, addr);
42 * Oops. The kernel tried to access some bad page. We'll have to
43 * terminate things with extreme prejudice.
45 if ((unsigned long)addr
< PAGE_SIZE
)
46 pr_alert("Unable to handle kernel NULL pointer dereference");
48 pr_alert("Unable to handle kernel access");
49 pr_cont(" at virtual address %p\n", addr
);
50 die_if_kernel("Oops", regs
, 0 /*error_code*/);
58 * This routine handles page faults. It determines the problem, and
59 * then passes it off to one of the appropriate routines.
62 * bit 0 == 0 means no page found, 1 means protection fault
63 * bit 1 == 0 means read, 1 means write
65 * If this routine detects a bad access, it returns 1, otherwise it
68 int do_page_fault(struct pt_regs
*regs
, unsigned long address
,
69 unsigned long error_code
)
71 struct mm_struct
*mm
= current
->mm
;
72 struct vm_area_struct
* vma
;
74 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
76 pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
77 regs
->sr
, regs
->pc
, address
, error_code
, mm
? mm
->pgd
: NULL
);
80 * If we're in an interrupt or have no user
81 * context, we must not take the fault..
83 if (faulthandler_disabled() || !mm
)
87 flags
|= FAULT_FLAG_USER
;
89 down_read(&mm
->mmap_sem
);
91 vma
= find_vma(mm
, address
);
94 if (vma
->vm_flags
& VM_IO
)
96 if (vma
->vm_start
<= address
)
98 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
100 if (user_mode(regs
)) {
101 /* Accessing the stack below usp is always a bug. The
102 "+ 256" is there due to some instructions doing
103 pre-decrement on the stack and that doesn't show up
105 if (address
+ 256 < rdusp())
108 if (expand_stack(vma
, address
))
112 * Ok, we have a good vm_area for this memory access, so
116 pr_debug("do_page_fault: good_area\n");
117 switch (error_code
& 3) {
118 default: /* 3: write, present */
120 case 2: /* write, not present */
121 if (!(vma
->vm_flags
& VM_WRITE
))
123 flags
|= FAULT_FLAG_WRITE
;
125 case 1: /* read, present */
127 case 0: /* read, not present */
128 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
133 * If for any reason at all we couldn't handle the fault,
134 * make sure we exit gracefully rather than endlessly redo
138 fault
= handle_mm_fault(vma
, address
, flags
);
139 pr_debug("handle_mm_fault returns %x\n", fault
);
141 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
144 if (unlikely(fault
& VM_FAULT_ERROR
)) {
145 if (fault
& VM_FAULT_OOM
)
147 else if (fault
& VM_FAULT_SIGSEGV
)
149 else if (fault
& VM_FAULT_SIGBUS
)
155 * Major/minor page fault accounting is only done on the
156 * initial attempt. If we go through a retry, it is extremely
157 * likely that the page will be found in page cache at that point.
159 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
160 if (fault
& VM_FAULT_MAJOR
)
164 if (fault
& VM_FAULT_RETRY
) {
165 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
167 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
168 flags
|= FAULT_FLAG_TRIED
;
171 * No need to up_read(&mm->mmap_sem) as we would
172 * have already released it in __lock_page_or_retry
180 up_read(&mm
->mmap_sem
);
184 * We ran out of memory, or some other thing happened to us that made
185 * us unable to handle the page fault gracefully.
188 up_read(&mm
->mmap_sem
);
189 if (!user_mode(regs
))
191 pagefault_out_of_memory();
195 current
->thread
.signo
= SIGBUS
;
196 current
->thread
.faddr
= address
;
197 return send_fault_sig(regs
);
200 current
->thread
.signo
= SIGBUS
;
201 current
->thread
.code
= BUS_ADRERR
;
202 current
->thread
.faddr
= address
;
206 current
->thread
.signo
= SIGSEGV
;
207 current
->thread
.code
= SEGV_MAPERR
;
208 current
->thread
.faddr
= address
;
212 current
->thread
.signo
= SIGSEGV
;
213 current
->thread
.code
= SEGV_ACCERR
;
214 current
->thread
.faddr
= address
;
217 up_read(&mm
->mmap_sem
);
218 return send_fault_sig(regs
);