mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / sparc / mm / fault_32.c
blobbe3136f142a9993e0c6c8cfa1d651b1685654a73
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fault.c: Page fault handlers for the Sparc.
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
10 #include <asm/head.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/sched.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/threads.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/perf_event.h>
23 #include <linux/interrupt.h>
24 #include <linux/kdebug.h>
25 #include <linux/uaccess.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 #include <asm/traps.h>
35 #include "mm_32.h"
37 int show_unhandled_signals = 1;
39 static void __noreturn unhandled_fault(unsigned long address,
40 struct task_struct *tsk,
41 struct pt_regs *regs)
43 if ((unsigned long) address < PAGE_SIZE) {
44 printk(KERN_ALERT
45 "Unable to handle kernel NULL pointer dereference\n");
46 } else {
47 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
48 address);
50 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
51 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
52 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
53 (tsk->mm ? (unsigned long) tsk->mm->pgd :
54 (unsigned long) tsk->active_mm->pgd));
55 die_if_kernel("Oops", regs);
58 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
59 unsigned long address)
61 struct pt_regs regs;
62 unsigned long g2;
63 unsigned int insn;
64 int i;
66 i = search_extables_range(ret_pc, &g2);
67 switch (i) {
68 case 3:
69 /* load & store will be handled by fixup */
70 return 3;
72 case 1:
73 /* store will be handled by fixup, load will bump out */
74 /* for _to_ macros */
75 insn = *((unsigned int *) pc);
76 if ((insn >> 21) & 1)
77 return 1;
78 break;
80 case 2:
81 /* load will be handled by fixup, store will bump out */
82 /* for _from_ macros */
83 insn = *((unsigned int *) pc);
84 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
85 return 2;
86 break;
88 default:
89 break;
92 memset(&regs, 0, sizeof(regs));
93 regs.pc = pc;
94 regs.npc = pc + 4;
95 __asm__ __volatile__(
96 "rd %%psr, %0\n\t"
97 "nop\n\t"
98 "nop\n\t"
99 "nop\n" : "=r" (regs.psr));
100 unhandled_fault(address, current, &regs);
102 /* Not reached */
103 return 0;
106 static inline void
107 show_signal_msg(struct pt_regs *regs, int sig, int code,
108 unsigned long address, struct task_struct *tsk)
110 if (!unhandled_signal(tsk, sig))
111 return;
113 if (!printk_ratelimit())
114 return;
116 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
117 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
118 tsk->comm, task_pid_nr(tsk), address,
119 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
120 (void *)regs->u_regs[UREG_FP], code);
122 print_vma_addr(KERN_CONT " in ", regs->pc);
124 printk(KERN_CONT "\n");
127 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
128 unsigned long addr)
130 siginfo_t info;
132 info.si_signo = sig;
133 info.si_code = code;
134 info.si_errno = 0;
135 info.si_addr = (void __user *) addr;
136 info.si_trapno = 0;
138 if (unlikely(show_unhandled_signals))
139 show_signal_msg(regs, sig, info.si_code,
140 addr, current);
142 force_sig_info (sig, &info, current);
145 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
147 unsigned int insn;
149 if (text_fault)
150 return regs->pc;
152 if (regs->psr & PSR_PS)
153 insn = *(unsigned int *) regs->pc;
154 else
155 __get_user(insn, (unsigned int *) regs->pc);
157 return safe_compute_effective_address(regs, insn);
160 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
161 int text_fault)
163 unsigned long addr = compute_si_addr(regs, text_fault);
165 __do_fault_siginfo(code, sig, regs, addr);
168 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
169 unsigned long address)
171 struct vm_area_struct *vma;
172 struct task_struct *tsk = current;
173 struct mm_struct *mm = tsk->mm;
174 unsigned int fixup;
175 unsigned long g2;
176 int from_user = !(regs->psr & PSR_PS);
177 int fault, code;
178 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
180 if (text_fault)
181 address = regs->pc;
184 * We fault-in kernel-space virtual memory on-demand. The
185 * 'reference' page table is init_mm.pgd.
187 * NOTE! We MUST NOT take any locks for this case. We may
188 * be in an interrupt or a critical region, and should
189 * only copy the information from the master page table,
190 * nothing more.
192 code = SEGV_MAPERR;
193 if (address >= TASK_SIZE)
194 goto vmalloc_fault;
197 * If we're in an interrupt or have no user
198 * context, we must not take the fault..
200 if (pagefault_disabled() || !mm)
201 goto no_context;
203 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
205 retry:
206 down_read(&mm->mmap_sem);
208 if (!from_user && address >= PAGE_OFFSET)
209 goto bad_area;
211 vma = find_vma(mm, address);
212 if (!vma)
213 goto bad_area;
214 if (vma->vm_start <= address)
215 goto good_area;
216 if (!(vma->vm_flags & VM_GROWSDOWN))
217 goto bad_area;
218 if (expand_stack(vma, address))
219 goto bad_area;
221 * Ok, we have a good vm_area for this memory access, so
222 * we can handle it..
224 good_area:
225 code = SEGV_ACCERR;
226 if (write) {
227 if (!(vma->vm_flags & VM_WRITE))
228 goto bad_area;
229 } else {
230 /* Allow reads even for write-only mappings */
231 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
232 goto bad_area;
235 if (from_user)
236 flags |= FAULT_FLAG_USER;
237 if (write)
238 flags |= FAULT_FLAG_WRITE;
241 * If for any reason at all we couldn't handle the fault,
242 * make sure we exit gracefully rather than endlessly redo
243 * the fault.
245 fault = handle_mm_fault(vma, address, flags);
247 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
248 return;
250 if (unlikely(fault & VM_FAULT_ERROR)) {
251 if (fault & VM_FAULT_OOM)
252 goto out_of_memory;
253 else if (fault & VM_FAULT_SIGSEGV)
254 goto bad_area;
255 else if (fault & VM_FAULT_SIGBUS)
256 goto do_sigbus;
257 BUG();
260 if (flags & FAULT_FLAG_ALLOW_RETRY) {
261 if (fault & VM_FAULT_MAJOR) {
262 current->maj_flt++;
263 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
264 1, regs, address);
265 } else {
266 current->min_flt++;
267 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
268 1, regs, address);
270 if (fault & VM_FAULT_RETRY) {
271 flags &= ~FAULT_FLAG_ALLOW_RETRY;
272 flags |= FAULT_FLAG_TRIED;
274 /* No need to up_read(&mm->mmap_sem) as we would
275 * have already released it in __lock_page_or_retry
276 * in mm/filemap.c.
279 goto retry;
283 up_read(&mm->mmap_sem);
284 return;
287 * Something tried to access memory that isn't in our memory map..
288 * Fix it, but check if it's kernel or user first..
290 bad_area:
291 up_read(&mm->mmap_sem);
293 bad_area_nosemaphore:
294 /* User mode accesses just cause a SIGSEGV */
295 if (from_user) {
296 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
297 return;
300 /* Is this in ex_table? */
301 no_context:
302 g2 = regs->u_regs[UREG_G2];
303 if (!from_user) {
304 fixup = search_extables_range(regs->pc, &g2);
305 /* Values below 10 are reserved for other things */
306 if (fixup > 10) {
307 extern const unsigned int __memset_start[];
308 extern const unsigned int __memset_end[];
309 extern const unsigned int __csum_partial_copy_start[];
310 extern const unsigned int __csum_partial_copy_end[];
312 #ifdef DEBUG_EXCEPTIONS
313 printk("Exception: PC<%08lx> faddr<%08lx>\n",
314 regs->pc, address);
315 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
316 regs->pc, fixup, g2);
317 #endif
318 if ((regs->pc >= (unsigned long)__memset_start &&
319 regs->pc < (unsigned long)__memset_end) ||
320 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
321 regs->pc < (unsigned long)__csum_partial_copy_end)) {
322 regs->u_regs[UREG_I4] = address;
323 regs->u_regs[UREG_I5] = regs->pc;
325 regs->u_regs[UREG_G2] = g2;
326 regs->pc = fixup;
327 regs->npc = regs->pc + 4;
328 return;
332 unhandled_fault(address, tsk, regs);
333 do_exit(SIGKILL);
336 * We ran out of memory, or some other thing happened to us that made
337 * us unable to handle the page fault gracefully.
339 out_of_memory:
340 up_read(&mm->mmap_sem);
341 if (from_user) {
342 pagefault_out_of_memory();
343 return;
345 goto no_context;
347 do_sigbus:
348 up_read(&mm->mmap_sem);
349 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
350 if (!from_user)
351 goto no_context;
353 vmalloc_fault:
356 * Synchronize this task's top level page-table
357 * with the 'reference' page table.
359 int offset = pgd_index(address);
360 pgd_t *pgd, *pgd_k;
361 pmd_t *pmd, *pmd_k;
363 pgd = tsk->active_mm->pgd + offset;
364 pgd_k = init_mm.pgd + offset;
366 if (!pgd_present(*pgd)) {
367 if (!pgd_present(*pgd_k))
368 goto bad_area_nosemaphore;
369 pgd_val(*pgd) = pgd_val(*pgd_k);
370 return;
373 pmd = pmd_offset(pgd, address);
374 pmd_k = pmd_offset(pgd_k, address);
376 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
377 goto bad_area_nosemaphore;
379 *pmd = *pmd_k;
380 return;
384 /* This always deals with user addresses. */
385 static void force_user_fault(unsigned long address, int write)
387 struct vm_area_struct *vma;
388 struct task_struct *tsk = current;
389 struct mm_struct *mm = tsk->mm;
390 unsigned int flags = FAULT_FLAG_USER;
391 int code;
393 code = SEGV_MAPERR;
395 down_read(&mm->mmap_sem);
396 vma = find_vma(mm, address);
397 if (!vma)
398 goto bad_area;
399 if (vma->vm_start <= address)
400 goto good_area;
401 if (!(vma->vm_flags & VM_GROWSDOWN))
402 goto bad_area;
403 if (expand_stack(vma, address))
404 goto bad_area;
405 good_area:
406 code = SEGV_ACCERR;
407 if (write) {
408 if (!(vma->vm_flags & VM_WRITE))
409 goto bad_area;
410 flags |= FAULT_FLAG_WRITE;
411 } else {
412 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
413 goto bad_area;
415 switch (handle_mm_fault(vma, address, flags)) {
416 case VM_FAULT_SIGBUS:
417 case VM_FAULT_OOM:
418 goto do_sigbus;
420 up_read(&mm->mmap_sem);
421 return;
422 bad_area:
423 up_read(&mm->mmap_sem);
424 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
425 return;
427 do_sigbus:
428 up_read(&mm->mmap_sem);
429 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
432 static void check_stack_aligned(unsigned long sp)
434 if (sp & 0x7UL)
435 force_sig(SIGILL, current);
438 void window_overflow_fault(void)
440 unsigned long sp;
442 sp = current_thread_info()->rwbuf_stkptrs[0];
443 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
444 force_user_fault(sp + 0x38, 1);
445 force_user_fault(sp, 1);
447 check_stack_aligned(sp);
450 void window_underflow_fault(unsigned long sp)
452 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
453 force_user_fault(sp + 0x38, 0);
454 force_user_fault(sp, 0);
456 check_stack_aligned(sp);
459 void window_ret_fault(struct pt_regs *regs)
461 unsigned long sp;
463 sp = regs->u_regs[UREG_FP];
464 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
465 force_user_fault(sp + 0x38, 0);
466 force_user_fault(sp, 0);
468 check_stack_aligned(sp);