s390/pfault: fix task state race
[linux/fpc-iii.git] / arch / s390 / mm / fault.c
blobc6057f21d4b7a4d8eb3cd18b761d515d0e287be2
1 /*
2 * arch/s390/mm/fault.c
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (uweigand@de.ibm.com)
9 * Derived from "arch/i386/mm/fault.c"
10 * Copyright (C) 1995 Linus Torvalds
13 #include <linux/kernel_stat.h>
14 #include <linux/perf_event.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/module.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/system.h>
36 #include <asm/pgtable.h>
37 #include <asm/irq.h>
38 #include <asm/mmu_context.h>
39 #include "../kernel/entry.h"
41 #ifndef CONFIG_64BIT
42 #define __FAIL_ADDR_MASK 0x7ffff000
43 #define __SUBCODE_MASK 0x0200
44 #define __PF_RES_FIELD 0ULL
45 #else /* CONFIG_64BIT */
46 #define __FAIL_ADDR_MASK -4096L
47 #define __SUBCODE_MASK 0x0600
48 #define __PF_RES_FIELD 0x8000000000000000ULL
49 #endif /* CONFIG_64BIT */
51 #define VM_FAULT_BADCONTEXT 0x010000
52 #define VM_FAULT_BADMAP 0x020000
53 #define VM_FAULT_BADACCESS 0x040000
55 static unsigned long store_indication;
57 void fault_init(void)
59 if (test_facility(2) && test_facility(75))
60 store_indication = 0xc00;
63 static inline int notify_page_fault(struct pt_regs *regs)
65 int ret = 0;
67 /* kprobe_running() needs smp_processor_id() */
68 if (kprobes_built_in() && !user_mode(regs)) {
69 preempt_disable();
70 if (kprobe_running() && kprobe_fault_handler(regs, 14))
71 ret = 1;
72 preempt_enable();
74 return ret;
79 * Unlock any spinlocks which will prevent us from getting the
80 * message out.
82 void bust_spinlocks(int yes)
84 if (yes) {
85 oops_in_progress = 1;
86 } else {
87 int loglevel_save = console_loglevel;
88 console_unblank();
89 oops_in_progress = 0;
91 * OK, the message is on the console. Now we call printk()
92 * without oops_in_progress set so that printk will give klogd
93 * a poke. Hold onto your hats...
95 console_loglevel = 15;
96 printk(" ");
97 console_loglevel = loglevel_save;
102 * Returns the address space associated with the fault.
103 * Returns 0 for kernel space and 1 for user space.
105 static inline int user_space_fault(unsigned long trans_exc_code)
108 * The lowest two bits of the translation exception
109 * identification indicate which paging table was used.
111 trans_exc_code &= 3;
112 if (trans_exc_code == 2)
113 /* Access via secondary space, set_fs setting decides */
114 return current->thread.mm_segment.ar4;
115 if (user_mode == HOME_SPACE_MODE)
116 /* User space if the access has been done via home space. */
117 return trans_exc_code == 3;
119 * If the user space is not the home space the kernel runs in home
120 * space. Access via secondary space has already been covered,
121 * access via primary space or access register is from user space
122 * and access via home space is from the kernel.
124 return trans_exc_code != 3;
127 static inline void report_user_fault(struct pt_regs *regs, long signr)
129 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
130 return;
131 if (!unhandled_signal(current, signr))
132 return;
133 if (!printk_ratelimit())
134 return;
135 printk(KERN_ALERT "User process fault: interruption code 0x%X ",
136 regs->int_code);
137 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
138 printk(KERN_CONT "\n");
139 printk(KERN_ALERT "failing address: %lX\n",
140 regs->int_parm_long & __FAIL_ADDR_MASK);
141 show_regs(regs);
145 * Send SIGSEGV to task. This is an external routine
146 * to keep the stack usage of do_page_fault small.
148 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
150 struct siginfo si;
152 report_user_fault(regs, SIGSEGV);
153 si.si_signo = SIGSEGV;
154 si.si_code = si_code;
155 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
156 force_sig_info(SIGSEGV, &si, current);
159 static noinline void do_no_context(struct pt_regs *regs)
161 const struct exception_table_entry *fixup;
162 unsigned long address;
164 /* Are we prepared to handle this kernel fault? */
165 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
166 if (fixup) {
167 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
168 return;
172 * Oops. The kernel tried to access some bad page. We'll have to
173 * terminate things with extreme prejudice.
175 address = regs->int_parm_long & __FAIL_ADDR_MASK;
176 if (!user_space_fault(regs->int_parm_long))
177 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
178 " at virtual kernel address %p\n", (void *)address);
179 else
180 printk(KERN_ALERT "Unable to handle kernel paging request"
181 " at virtual user address %p\n", (void *)address);
183 die(regs, "Oops");
184 do_exit(SIGKILL);
187 static noinline void do_low_address(struct pt_regs *regs)
189 /* Low-address protection hit in kernel mode means
190 NULL pointer write access in kernel mode. */
191 if (regs->psw.mask & PSW_MASK_PSTATE) {
192 /* Low-address protection hit in user mode 'cannot happen'. */
193 die (regs, "Low-address protection");
194 do_exit(SIGKILL);
197 do_no_context(regs);
200 static noinline void do_sigbus(struct pt_regs *regs)
202 struct task_struct *tsk = current;
203 struct siginfo si;
206 * Send a sigbus, regardless of whether we were in kernel
207 * or user mode.
209 si.si_signo = SIGBUS;
210 si.si_errno = 0;
211 si.si_code = BUS_ADRERR;
212 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
213 force_sig_info(SIGBUS, &si, tsk);
216 static noinline void do_fault_error(struct pt_regs *regs, int fault)
218 int si_code;
220 switch (fault) {
221 case VM_FAULT_BADACCESS:
222 case VM_FAULT_BADMAP:
223 /* Bad memory access. Check if it is kernel or user space. */
224 if (regs->psw.mask & PSW_MASK_PSTATE) {
225 /* User mode accesses just cause a SIGSEGV */
226 si_code = (fault == VM_FAULT_BADMAP) ?
227 SEGV_MAPERR : SEGV_ACCERR;
228 do_sigsegv(regs, si_code);
229 return;
231 case VM_FAULT_BADCONTEXT:
232 do_no_context(regs);
233 break;
234 default: /* fault & VM_FAULT_ERROR */
235 if (fault & VM_FAULT_OOM) {
236 if (!(regs->psw.mask & PSW_MASK_PSTATE))
237 do_no_context(regs);
238 else
239 pagefault_out_of_memory();
240 } else if (fault & VM_FAULT_SIGBUS) {
241 /* Kernel mode? Handle exceptions or die */
242 if (!(regs->psw.mask & PSW_MASK_PSTATE))
243 do_no_context(regs);
244 else
245 do_sigbus(regs);
246 } else
247 BUG();
248 break;
253 * This routine handles page faults. It determines the address,
254 * and the problem, and then passes it off to one of the appropriate
255 * routines.
257 * interruption code (int_code):
258 * 04 Protection -> Write-Protection (suprression)
259 * 10 Segment translation -> Not present (nullification)
260 * 11 Page translation -> Not present (nullification)
261 * 3b Region third trans. -> Not present (nullification)
263 static inline int do_exception(struct pt_regs *regs, int access)
265 struct task_struct *tsk;
266 struct mm_struct *mm;
267 struct vm_area_struct *vma;
268 unsigned long trans_exc_code;
269 unsigned long address;
270 unsigned int flags;
271 int fault;
273 if (notify_page_fault(regs))
274 return 0;
276 tsk = current;
277 mm = tsk->mm;
278 trans_exc_code = regs->int_parm_long;
281 * Verify that the fault happened in user space, that
282 * we are not in an interrupt and that there is a
283 * user context.
285 fault = VM_FAULT_BADCONTEXT;
286 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
287 goto out;
289 address = trans_exc_code & __FAIL_ADDR_MASK;
290 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
291 flags = FAULT_FLAG_ALLOW_RETRY;
292 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
293 flags |= FAULT_FLAG_WRITE;
294 down_read(&mm->mmap_sem);
296 #ifdef CONFIG_PGSTE
297 if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
298 address = __gmap_fault(address,
299 (struct gmap *) S390_lowcore.gmap);
300 if (address == -EFAULT) {
301 fault = VM_FAULT_BADMAP;
302 goto out_up;
304 if (address == -ENOMEM) {
305 fault = VM_FAULT_OOM;
306 goto out_up;
309 #endif
311 retry:
312 fault = VM_FAULT_BADMAP;
313 vma = find_vma(mm, address);
314 if (!vma)
315 goto out_up;
317 if (unlikely(vma->vm_start > address)) {
318 if (!(vma->vm_flags & VM_GROWSDOWN))
319 goto out_up;
320 if (expand_stack(vma, address))
321 goto out_up;
325 * Ok, we have a good vm_area for this memory access, so
326 * we can handle it..
328 fault = VM_FAULT_BADACCESS;
329 if (unlikely(!(vma->vm_flags & access)))
330 goto out_up;
332 if (is_vm_hugetlb_page(vma))
333 address &= HPAGE_MASK;
335 * If for any reason at all we couldn't handle the fault,
336 * make sure we exit gracefully rather than endlessly redo
337 * the fault.
339 fault = handle_mm_fault(mm, vma, address, flags);
340 if (unlikely(fault & VM_FAULT_ERROR))
341 goto out_up;
344 * Major/minor page fault accounting is only done on the
345 * initial attempt. If we go through a retry, it is extremely
346 * likely that the page will be found in page cache at that point.
348 if (flags & FAULT_FLAG_ALLOW_RETRY) {
349 if (fault & VM_FAULT_MAJOR) {
350 tsk->maj_flt++;
351 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
352 regs, address);
353 } else {
354 tsk->min_flt++;
355 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
356 regs, address);
358 if (fault & VM_FAULT_RETRY) {
359 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
360 * of starvation. */
361 flags &= ~FAULT_FLAG_ALLOW_RETRY;
362 down_read(&mm->mmap_sem);
363 goto retry;
367 * The instruction that caused the program check will
368 * be repeated. Don't signal single step via SIGTRAP.
370 clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
371 fault = 0;
372 out_up:
373 up_read(&mm->mmap_sem);
374 out:
375 return fault;
378 void __kprobes do_protection_exception(struct pt_regs *regs)
380 unsigned long trans_exc_code;
381 int fault;
383 trans_exc_code = regs->int_parm_long;
384 /* Protection exception is suppressing, decrement psw address. */
385 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
387 * Check for low-address protection. This needs to be treated
388 * as a special case because the translation exception code
389 * field is not guaranteed to contain valid data in this case.
391 if (unlikely(!(trans_exc_code & 4))) {
392 do_low_address(regs);
393 return;
395 fault = do_exception(regs, VM_WRITE);
396 if (unlikely(fault))
397 do_fault_error(regs, fault);
400 void __kprobes do_dat_exception(struct pt_regs *regs)
402 int access, fault;
404 access = VM_READ | VM_EXEC | VM_WRITE;
405 fault = do_exception(regs, access);
406 if (unlikely(fault))
407 do_fault_error(regs, fault);
410 #ifdef CONFIG_64BIT
411 void __kprobes do_asce_exception(struct pt_regs *regs)
413 struct mm_struct *mm = current->mm;
414 struct vm_area_struct *vma;
415 unsigned long trans_exc_code;
417 trans_exc_code = regs->int_parm_long;
418 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
419 goto no_context;
421 down_read(&mm->mmap_sem);
422 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
423 up_read(&mm->mmap_sem);
425 if (vma) {
426 update_mm(mm, current);
427 return;
430 /* User mode accesses just cause a SIGSEGV */
431 if (regs->psw.mask & PSW_MASK_PSTATE) {
432 do_sigsegv(regs, SEGV_MAPERR);
433 return;
436 no_context:
437 do_no_context(regs);
439 #endif
441 int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
443 struct pt_regs regs;
444 int access, fault;
446 regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
447 if (!irqs_disabled())
448 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
449 regs.psw.addr = (unsigned long) __builtin_return_address(0);
450 regs.psw.addr |= PSW_ADDR_AMODE;
451 regs.int_code = pgm_int_code;
452 regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
453 access = write ? VM_WRITE : VM_READ;
454 fault = do_exception(&regs, access);
455 if (unlikely(fault)) {
456 if (fault & VM_FAULT_OOM)
457 return -EFAULT;
458 else if (fault & VM_FAULT_SIGBUS)
459 do_sigbus(&regs);
461 return fault ? -EFAULT : 0;
464 #ifdef CONFIG_PFAULT
466 * 'pfault' pseudo page faults routines.
468 static int pfault_disable;
470 static int __init nopfault(char *str)
472 pfault_disable = 1;
473 return 1;
476 __setup("nopfault", nopfault);
478 struct pfault_refbk {
479 u16 refdiagc;
480 u16 reffcode;
481 u16 refdwlen;
482 u16 refversn;
483 u64 refgaddr;
484 u64 refselmk;
485 u64 refcmpmk;
486 u64 reserved;
487 } __attribute__ ((packed, aligned(8)));
489 int pfault_init(void)
491 struct pfault_refbk refbk = {
492 .refdiagc = 0x258,
493 .reffcode = 0,
494 .refdwlen = 5,
495 .refversn = 2,
496 .refgaddr = __LC_CURRENT_PID,
497 .refselmk = 1ULL << 48,
498 .refcmpmk = 1ULL << 48,
499 .reserved = __PF_RES_FIELD };
500 int rc;
502 if (pfault_disable)
503 return -1;
504 asm volatile(
505 " diag %1,%0,0x258\n"
506 "0: j 2f\n"
507 "1: la %0,8\n"
508 "2:\n"
509 EX_TABLE(0b,1b)
510 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
511 return rc;
514 void pfault_fini(void)
516 struct pfault_refbk refbk = {
517 .refdiagc = 0x258,
518 .reffcode = 1,
519 .refdwlen = 5,
520 .refversn = 2,
523 if (pfault_disable)
524 return;
525 asm volatile(
526 " diag %0,0,0x258\n"
527 "0:\n"
528 EX_TABLE(0b,0b)
529 : : "a" (&refbk), "m" (refbk) : "cc");
532 static DEFINE_SPINLOCK(pfault_lock);
533 static LIST_HEAD(pfault_list);
535 static void pfault_interrupt(unsigned int ext_int_code,
536 unsigned int param32, unsigned long param64)
538 struct task_struct *tsk;
539 __u16 subcode;
540 pid_t pid;
543 * Get the external interruption subcode & pfault
544 * initial/completion signal bit. VM stores this
545 * in the 'cpu address' field associated with the
546 * external interrupt.
548 subcode = ext_int_code >> 16;
549 if ((subcode & 0xff00) != __SUBCODE_MASK)
550 return;
551 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
552 if (subcode & 0x0080) {
553 /* Get the token (= pid of the affected task). */
554 pid = sizeof(void *) == 4 ? param32 : param64;
555 rcu_read_lock();
556 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
557 if (tsk)
558 get_task_struct(tsk);
559 rcu_read_unlock();
560 if (!tsk)
561 return;
562 } else {
563 tsk = current;
565 spin_lock(&pfault_lock);
566 if (subcode & 0x0080) {
567 /* signal bit is set -> a page has been swapped in by VM */
568 if (tsk->thread.pfault_wait == 1) {
569 /* Initial interrupt was faster than the completion
570 * interrupt. pfault_wait is valid. Set pfault_wait
571 * back to zero and wake up the process. This can
572 * safely be done because the task is still sleeping
573 * and can't produce new pfaults. */
574 tsk->thread.pfault_wait = 0;
575 list_del(&tsk->thread.list);
576 wake_up_process(tsk);
577 put_task_struct(tsk);
578 } else {
579 /* Completion interrupt was faster than initial
580 * interrupt. Set pfault_wait to -1 so the initial
581 * interrupt doesn't put the task to sleep.
582 * If the task is not running, ignore the completion
583 * interrupt since it must be a leftover of a PFAULT
584 * CANCEL operation which didn't remove all pending
585 * completion interrupts. */
586 if (tsk->state == TASK_RUNNING)
587 tsk->thread.pfault_wait = -1;
589 put_task_struct(tsk);
590 } else {
591 /* signal bit not set -> a real page is missing. */
592 if (tsk->thread.pfault_wait == 1) {
593 /* Already on the list with a reference: put to sleep */
594 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
595 set_tsk_need_resched(tsk);
596 } else if (tsk->thread.pfault_wait == -1) {
597 /* Completion interrupt was faster than the initial
598 * interrupt (pfault_wait == -1). Set pfault_wait
599 * back to zero and exit. */
600 tsk->thread.pfault_wait = 0;
601 } else {
602 /* Initial interrupt arrived before completion
603 * interrupt. Let the task sleep.
604 * An extra task reference is needed since a different
605 * cpu may set the task state to TASK_RUNNING again
606 * before the scheduler is reached. */
607 get_task_struct(tsk);
608 tsk->thread.pfault_wait = 1;
609 list_add(&tsk->thread.list, &pfault_list);
610 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
611 set_tsk_need_resched(tsk);
614 spin_unlock(&pfault_lock);
617 static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
618 unsigned long action, void *hcpu)
620 struct thread_struct *thread, *next;
621 struct task_struct *tsk;
623 switch (action) {
624 case CPU_DEAD:
625 case CPU_DEAD_FROZEN:
626 spin_lock_irq(&pfault_lock);
627 list_for_each_entry_safe(thread, next, &pfault_list, list) {
628 thread->pfault_wait = 0;
629 list_del(&thread->list);
630 tsk = container_of(thread, struct task_struct, thread);
631 wake_up_process(tsk);
632 put_task_struct(tsk);
634 spin_unlock_irq(&pfault_lock);
635 break;
636 default:
637 break;
639 return NOTIFY_OK;
642 static int __init pfault_irq_init(void)
644 int rc;
646 rc = register_external_interrupt(0x2603, pfault_interrupt);
647 if (rc)
648 goto out_extint;
649 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
650 if (rc)
651 goto out_pfault;
652 service_subclass_irq_register();
653 hotcpu_notifier(pfault_cpu_notify, 0);
654 return 0;
656 out_pfault:
657 unregister_external_interrupt(0x2603, pfault_interrupt);
658 out_extint:
659 pfault_disable = 1;
660 return rc;
662 early_initcall(pfault_irq_init);
664 #endif /* CONFIG_PFAULT */