clk: samsung: Add bus clock for GPU/G3D on Exynos4412
[linux/fpc-iii.git] / arch / s390 / mm / fault.c
blobc220399ae196e07ab3d6cf5a2ef5a4931eaff025
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/diag.h>
36 #include <asm/pgtable.h>
37 #include <asm/gmap.h>
38 #include <asm/irq.h>
39 #include <asm/mmu_context.h>
40 #include <asm/facility.h>
41 #include "../kernel/entry.h"
43 #define __FAIL_ADDR_MASK -4096L
44 #define __SUBCODE_MASK 0x0600
45 #define __PF_RES_FIELD 0x8000000000000000ULL
47 #define VM_FAULT_BADCONTEXT 0x010000
48 #define VM_FAULT_BADMAP 0x020000
49 #define VM_FAULT_BADACCESS 0x040000
50 #define VM_FAULT_SIGNAL 0x080000
51 #define VM_FAULT_PFAULT 0x100000
53 enum fault_type {
54 KERNEL_FAULT,
55 USER_FAULT,
56 VDSO_FAULT,
57 GMAP_FAULT,
60 static unsigned long store_indication __read_mostly;
62 static int __init fault_init(void)
64 if (test_facility(75))
65 store_indication = 0xc00;
66 return 0;
68 early_initcall(fault_init);
70 static inline int notify_page_fault(struct pt_regs *regs)
72 int ret = 0;
74 /* kprobe_running() needs smp_processor_id() */
75 if (kprobes_built_in() && !user_mode(regs)) {
76 preempt_disable();
77 if (kprobe_running() && kprobe_fault_handler(regs, 14))
78 ret = 1;
79 preempt_enable();
81 return ret;
85 * Find out which address space caused the exception.
86 * Access register mode is impossible, ignore space == 3.
88 static inline enum fault_type get_fault_type(struct pt_regs *regs)
90 unsigned long trans_exc_code;
92 trans_exc_code = regs->int_parm_long & 3;
93 if (likely(trans_exc_code == 0)) {
94 /* primary space exception */
95 if (IS_ENABLED(CONFIG_PGSTE) &&
96 test_pt_regs_flag(regs, PIF_GUEST_FAULT))
97 return GMAP_FAULT;
98 if (current->thread.mm_segment == USER_DS)
99 return USER_FAULT;
100 return KERNEL_FAULT;
102 if (trans_exc_code == 2) {
103 /* secondary space exception */
104 if (current->thread.mm_segment & 1) {
105 if (current->thread.mm_segment == USER_DS_SACF)
106 return USER_FAULT;
107 return KERNEL_FAULT;
109 return VDSO_FAULT;
111 /* home space exception -> access via kernel ASCE */
112 return KERNEL_FAULT;
115 static int bad_address(void *p)
117 unsigned long dummy;
119 return probe_kernel_address((unsigned long *)p, dummy);
122 static void dump_pagetable(unsigned long asce, unsigned long address)
124 unsigned long *table = __va(asce & _ASCE_ORIGIN);
126 pr_alert("AS:%016lx ", asce);
127 switch (asce & _ASCE_TYPE_MASK) {
128 case _ASCE_TYPE_REGION1:
129 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
130 if (bad_address(table))
131 goto bad;
132 pr_cont("R1:%016lx ", *table);
133 if (*table & _REGION_ENTRY_INVALID)
134 goto out;
135 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
136 /* fallthrough */
137 case _ASCE_TYPE_REGION2:
138 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
139 if (bad_address(table))
140 goto bad;
141 pr_cont("R2:%016lx ", *table);
142 if (*table & _REGION_ENTRY_INVALID)
143 goto out;
144 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
145 /* fallthrough */
146 case _ASCE_TYPE_REGION3:
147 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
148 if (bad_address(table))
149 goto bad;
150 pr_cont("R3:%016lx ", *table);
151 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
152 goto out;
153 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
154 /* fallthrough */
155 case _ASCE_TYPE_SEGMENT:
156 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
157 if (bad_address(table))
158 goto bad;
159 pr_cont("S:%016lx ", *table);
160 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
161 goto out;
162 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
164 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
165 if (bad_address(table))
166 goto bad;
167 pr_cont("P:%016lx ", *table);
168 out:
169 pr_cont("\n");
170 return;
171 bad:
172 pr_cont("BAD\n");
175 static void dump_fault_info(struct pt_regs *regs)
177 unsigned long asce;
179 pr_alert("Failing address: %016lx TEID: %016lx\n",
180 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
181 pr_alert("Fault in ");
182 switch (regs->int_parm_long & 3) {
183 case 3:
184 pr_cont("home space ");
185 break;
186 case 2:
187 pr_cont("secondary space ");
188 break;
189 case 1:
190 pr_cont("access register ");
191 break;
192 case 0:
193 pr_cont("primary space ");
194 break;
196 pr_cont("mode while using ");
197 switch (get_fault_type(regs)) {
198 case USER_FAULT:
199 asce = S390_lowcore.user_asce;
200 pr_cont("user ");
201 break;
202 case VDSO_FAULT:
203 asce = S390_lowcore.vdso_asce;
204 pr_cont("vdso ");
205 break;
206 case GMAP_FAULT:
207 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
208 pr_cont("gmap ");
209 break;
210 case KERNEL_FAULT:
211 asce = S390_lowcore.kernel_asce;
212 pr_cont("kernel ");
213 break;
215 pr_cont("ASCE.\n");
216 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
219 int show_unhandled_signals = 1;
221 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
223 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
224 return;
225 if (!unhandled_signal(current, signr))
226 return;
227 if (!printk_ratelimit())
228 return;
229 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
230 regs->int_code & 0xffff, regs->int_code >> 17);
231 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
232 printk(KERN_CONT "\n");
233 if (is_mm_fault)
234 dump_fault_info(regs);
235 show_regs(regs);
239 * Send SIGSEGV to task. This is an external routine
240 * to keep the stack usage of do_page_fault small.
242 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
244 report_user_fault(regs, SIGSEGV, 1);
245 force_sig_fault(SIGSEGV, si_code,
246 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK),
247 current);
250 const struct exception_table_entry *s390_search_extables(unsigned long addr)
252 const struct exception_table_entry *fixup;
254 fixup = search_extable(__start_dma_ex_table,
255 __stop_dma_ex_table - __start_dma_ex_table,
256 addr);
257 if (!fixup)
258 fixup = search_exception_tables(addr);
259 return fixup;
262 static noinline void do_no_context(struct pt_regs *regs)
264 const struct exception_table_entry *fixup;
266 /* Are we prepared to handle this kernel fault? */
267 fixup = s390_search_extables(regs->psw.addr);
268 if (fixup) {
269 regs->psw.addr = extable_fixup(fixup);
270 return;
274 * Oops. The kernel tried to access some bad page. We'll have to
275 * terminate things with extreme prejudice.
277 if (get_fault_type(regs) == KERNEL_FAULT)
278 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
279 " in virtual kernel address space\n");
280 else
281 printk(KERN_ALERT "Unable to handle kernel paging request"
282 " in virtual user address space\n");
283 dump_fault_info(regs);
284 die(regs, "Oops");
285 do_exit(SIGKILL);
288 static noinline void do_low_address(struct pt_regs *regs)
290 /* Low-address protection hit in kernel mode means
291 NULL pointer write access in kernel mode. */
292 if (regs->psw.mask & PSW_MASK_PSTATE) {
293 /* Low-address protection hit in user mode 'cannot happen'. */
294 die (regs, "Low-address protection");
295 do_exit(SIGKILL);
298 do_no_context(regs);
301 static noinline void do_sigbus(struct pt_regs *regs)
304 * Send a sigbus, regardless of whether we were in kernel
305 * or user mode.
307 force_sig_fault(SIGBUS, BUS_ADRERR,
308 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK),
309 current);
312 static noinline int signal_return(struct pt_regs *regs)
314 u16 instruction;
315 int rc;
317 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
318 if (rc)
319 return rc;
320 if (instruction == 0x0a77) {
321 set_pt_regs_flag(regs, PIF_SYSCALL);
322 regs->int_code = 0x00040077;
323 return 0;
324 } else if (instruction == 0x0aad) {
325 set_pt_regs_flag(regs, PIF_SYSCALL);
326 regs->int_code = 0x000400ad;
327 return 0;
329 return -EACCES;
332 static noinline void do_fault_error(struct pt_regs *regs, int access,
333 vm_fault_t fault)
335 int si_code;
337 switch (fault) {
338 case VM_FAULT_BADACCESS:
339 if (access == VM_EXEC && signal_return(regs) == 0)
340 break;
341 case VM_FAULT_BADMAP:
342 /* Bad memory access. Check if it is kernel or user space. */
343 if (user_mode(regs)) {
344 /* User mode accesses just cause a SIGSEGV */
345 si_code = (fault == VM_FAULT_BADMAP) ?
346 SEGV_MAPERR : SEGV_ACCERR;
347 do_sigsegv(regs, si_code);
348 break;
350 case VM_FAULT_BADCONTEXT:
351 case VM_FAULT_PFAULT:
352 do_no_context(regs);
353 break;
354 case VM_FAULT_SIGNAL:
355 if (!user_mode(regs))
356 do_no_context(regs);
357 break;
358 default: /* fault & VM_FAULT_ERROR */
359 if (fault & VM_FAULT_OOM) {
360 if (!user_mode(regs))
361 do_no_context(regs);
362 else
363 pagefault_out_of_memory();
364 } else if (fault & VM_FAULT_SIGSEGV) {
365 /* Kernel mode? Handle exceptions or die */
366 if (!user_mode(regs))
367 do_no_context(regs);
368 else
369 do_sigsegv(regs, SEGV_MAPERR);
370 } else if (fault & VM_FAULT_SIGBUS) {
371 /* Kernel mode? Handle exceptions or die */
372 if (!user_mode(regs))
373 do_no_context(regs);
374 else
375 do_sigbus(regs);
376 } else
377 BUG();
378 break;
383 * This routine handles page faults. It determines the address,
384 * and the problem, and then passes it off to one of the appropriate
385 * routines.
387 * interruption code (int_code):
388 * 04 Protection -> Write-Protection (suprression)
389 * 10 Segment translation -> Not present (nullification)
390 * 11 Page translation -> Not present (nullification)
391 * 3b Region third trans. -> Not present (nullification)
393 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
395 struct gmap *gmap;
396 struct task_struct *tsk;
397 struct mm_struct *mm;
398 struct vm_area_struct *vma;
399 enum fault_type type;
400 unsigned long trans_exc_code;
401 unsigned long address;
402 unsigned int flags;
403 vm_fault_t fault;
405 tsk = current;
407 * The instruction that caused the program check has
408 * been nullified. Don't signal single step via SIGTRAP.
410 clear_pt_regs_flag(regs, PIF_PER_TRAP);
412 if (notify_page_fault(regs))
413 return 0;
415 mm = tsk->mm;
416 trans_exc_code = regs->int_parm_long;
419 * Verify that the fault happened in user space, that
420 * we are not in an interrupt and that there is a
421 * user context.
423 fault = VM_FAULT_BADCONTEXT;
424 type = get_fault_type(regs);
425 switch (type) {
426 case KERNEL_FAULT:
427 goto out;
428 case VDSO_FAULT:
429 fault = VM_FAULT_BADMAP;
430 goto out;
431 case USER_FAULT:
432 case GMAP_FAULT:
433 if (faulthandler_disabled() || !mm)
434 goto out;
435 break;
438 address = trans_exc_code & __FAIL_ADDR_MASK;
439 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
440 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
441 if (user_mode(regs))
442 flags |= FAULT_FLAG_USER;
443 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
444 flags |= FAULT_FLAG_WRITE;
445 down_read(&mm->mmap_sem);
447 gmap = NULL;
448 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
449 gmap = (struct gmap *) S390_lowcore.gmap;
450 current->thread.gmap_addr = address;
451 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
452 current->thread.gmap_int_code = regs->int_code & 0xffff;
453 address = __gmap_translate(gmap, address);
454 if (address == -EFAULT) {
455 fault = VM_FAULT_BADMAP;
456 goto out_up;
458 if (gmap->pfault_enabled)
459 flags |= FAULT_FLAG_RETRY_NOWAIT;
462 retry:
463 fault = VM_FAULT_BADMAP;
464 vma = find_vma(mm, address);
465 if (!vma)
466 goto out_up;
468 if (unlikely(vma->vm_start > address)) {
469 if (!(vma->vm_flags & VM_GROWSDOWN))
470 goto out_up;
471 if (expand_stack(vma, address))
472 goto out_up;
476 * Ok, we have a good vm_area for this memory access, so
477 * we can handle it..
479 fault = VM_FAULT_BADACCESS;
480 if (unlikely(!(vma->vm_flags & access)))
481 goto out_up;
483 if (is_vm_hugetlb_page(vma))
484 address &= HPAGE_MASK;
486 * If for any reason at all we couldn't handle the fault,
487 * make sure we exit gracefully rather than endlessly redo
488 * the fault.
490 fault = handle_mm_fault(vma, address, flags);
491 /* No reason to continue if interrupted by SIGKILL. */
492 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
493 fault = VM_FAULT_SIGNAL;
494 if (flags & FAULT_FLAG_RETRY_NOWAIT)
495 goto out_up;
496 goto out;
498 if (unlikely(fault & VM_FAULT_ERROR))
499 goto out_up;
502 * Major/minor page fault accounting is only done on the
503 * initial attempt. If we go through a retry, it is extremely
504 * likely that the page will be found in page cache at that point.
506 if (flags & FAULT_FLAG_ALLOW_RETRY) {
507 if (fault & VM_FAULT_MAJOR) {
508 tsk->maj_flt++;
509 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
510 regs, address);
511 } else {
512 tsk->min_flt++;
513 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
514 regs, address);
516 if (fault & VM_FAULT_RETRY) {
517 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
518 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
519 /* FAULT_FLAG_RETRY_NOWAIT has been set,
520 * mmap_sem has not been released */
521 current->thread.gmap_pfault = 1;
522 fault = VM_FAULT_PFAULT;
523 goto out_up;
525 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
526 * of starvation. */
527 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
528 FAULT_FLAG_RETRY_NOWAIT);
529 flags |= FAULT_FLAG_TRIED;
530 down_read(&mm->mmap_sem);
531 goto retry;
534 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
535 address = __gmap_link(gmap, current->thread.gmap_addr,
536 address);
537 if (address == -EFAULT) {
538 fault = VM_FAULT_BADMAP;
539 goto out_up;
541 if (address == -ENOMEM) {
542 fault = VM_FAULT_OOM;
543 goto out_up;
546 fault = 0;
547 out_up:
548 up_read(&mm->mmap_sem);
549 out:
550 return fault;
553 void do_protection_exception(struct pt_regs *regs)
555 unsigned long trans_exc_code;
556 int access;
557 vm_fault_t fault;
559 trans_exc_code = regs->int_parm_long;
561 * Protection exceptions are suppressing, decrement psw address.
562 * The exception to this rule are aborted transactions, for these
563 * the PSW already points to the correct location.
565 if (!(regs->int_code & 0x200))
566 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
568 * Check for low-address protection. This needs to be treated
569 * as a special case because the translation exception code
570 * field is not guaranteed to contain valid data in this case.
572 if (unlikely(!(trans_exc_code & 4))) {
573 do_low_address(regs);
574 return;
576 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
577 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
578 (regs->psw.addr & PAGE_MASK);
579 access = VM_EXEC;
580 fault = VM_FAULT_BADACCESS;
581 } else {
582 access = VM_WRITE;
583 fault = do_exception(regs, access);
585 if (unlikely(fault))
586 do_fault_error(regs, access, fault);
588 NOKPROBE_SYMBOL(do_protection_exception);
590 void do_dat_exception(struct pt_regs *regs)
592 int access;
593 vm_fault_t fault;
595 access = VM_READ | VM_EXEC | VM_WRITE;
596 fault = do_exception(regs, access);
597 if (unlikely(fault))
598 do_fault_error(regs, access, fault);
600 NOKPROBE_SYMBOL(do_dat_exception);
602 #ifdef CONFIG_PFAULT
604 * 'pfault' pseudo page faults routines.
606 static int pfault_disable;
608 static int __init nopfault(char *str)
610 pfault_disable = 1;
611 return 1;
614 __setup("nopfault", nopfault);
616 struct pfault_refbk {
617 u16 refdiagc;
618 u16 reffcode;
619 u16 refdwlen;
620 u16 refversn;
621 u64 refgaddr;
622 u64 refselmk;
623 u64 refcmpmk;
624 u64 reserved;
625 } __attribute__ ((packed, aligned(8)));
627 static struct pfault_refbk pfault_init_refbk = {
628 .refdiagc = 0x258,
629 .reffcode = 0,
630 .refdwlen = 5,
631 .refversn = 2,
632 .refgaddr = __LC_LPP,
633 .refselmk = 1ULL << 48,
634 .refcmpmk = 1ULL << 48,
635 .reserved = __PF_RES_FIELD
638 int pfault_init(void)
640 int rc;
642 if (pfault_disable)
643 return -1;
644 diag_stat_inc(DIAG_STAT_X258);
645 asm volatile(
646 " diag %1,%0,0x258\n"
647 "0: j 2f\n"
648 "1: la %0,8\n"
649 "2:\n"
650 EX_TABLE(0b,1b)
651 : "=d" (rc)
652 : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
653 return rc;
656 static struct pfault_refbk pfault_fini_refbk = {
657 .refdiagc = 0x258,
658 .reffcode = 1,
659 .refdwlen = 5,
660 .refversn = 2,
663 void pfault_fini(void)
666 if (pfault_disable)
667 return;
668 diag_stat_inc(DIAG_STAT_X258);
669 asm volatile(
670 " diag %0,0,0x258\n"
671 "0: nopr %%r7\n"
672 EX_TABLE(0b,0b)
673 : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
676 static DEFINE_SPINLOCK(pfault_lock);
677 static LIST_HEAD(pfault_list);
679 #define PF_COMPLETE 0x0080
682 * The mechanism of our pfault code: if Linux is running as guest, runs a user
683 * space process and the user space process accesses a page that the host has
684 * paged out we get a pfault interrupt.
686 * This allows us, within the guest, to schedule a different process. Without
687 * this mechanism the host would have to suspend the whole virtual cpu until
688 * the page has been paged in.
690 * So when we get such an interrupt then we set the state of the current task
691 * to uninterruptible and also set the need_resched flag. Both happens within
692 * interrupt context(!). If we later on want to return to user space we
693 * recognize the need_resched flag and then call schedule(). It's not very
694 * obvious how this works...
696 * Of course we have a lot of additional fun with the completion interrupt (->
697 * host signals that a page of a process has been paged in and the process can
698 * continue to run). This interrupt can arrive on any cpu and, since we have
699 * virtual cpus, actually appear before the interrupt that signals that a page
700 * is missing.
702 static void pfault_interrupt(struct ext_code ext_code,
703 unsigned int param32, unsigned long param64)
705 struct task_struct *tsk;
706 __u16 subcode;
707 pid_t pid;
710 * Get the external interruption subcode & pfault initial/completion
711 * signal bit. VM stores this in the 'cpu address' field associated
712 * with the external interrupt.
714 subcode = ext_code.subcode;
715 if ((subcode & 0xff00) != __SUBCODE_MASK)
716 return;
717 inc_irq_stat(IRQEXT_PFL);
718 /* Get the token (= pid of the affected task). */
719 pid = param64 & LPP_PID_MASK;
720 rcu_read_lock();
721 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
722 if (tsk)
723 get_task_struct(tsk);
724 rcu_read_unlock();
725 if (!tsk)
726 return;
727 spin_lock(&pfault_lock);
728 if (subcode & PF_COMPLETE) {
729 /* signal bit is set -> a page has been swapped in by VM */
730 if (tsk->thread.pfault_wait == 1) {
731 /* Initial interrupt was faster than the completion
732 * interrupt. pfault_wait is valid. Set pfault_wait
733 * back to zero and wake up the process. This can
734 * safely be done because the task is still sleeping
735 * and can't produce new pfaults. */
736 tsk->thread.pfault_wait = 0;
737 list_del(&tsk->thread.list);
738 wake_up_process(tsk);
739 put_task_struct(tsk);
740 } else {
741 /* Completion interrupt was faster than initial
742 * interrupt. Set pfault_wait to -1 so the initial
743 * interrupt doesn't put the task to sleep.
744 * If the task is not running, ignore the completion
745 * interrupt since it must be a leftover of a PFAULT
746 * CANCEL operation which didn't remove all pending
747 * completion interrupts. */
748 if (tsk->state == TASK_RUNNING)
749 tsk->thread.pfault_wait = -1;
751 } else {
752 /* signal bit not set -> a real page is missing. */
753 if (WARN_ON_ONCE(tsk != current))
754 goto out;
755 if (tsk->thread.pfault_wait == 1) {
756 /* Already on the list with a reference: put to sleep */
757 goto block;
758 } else if (tsk->thread.pfault_wait == -1) {
759 /* Completion interrupt was faster than the initial
760 * interrupt (pfault_wait == -1). Set pfault_wait
761 * back to zero and exit. */
762 tsk->thread.pfault_wait = 0;
763 } else {
764 /* Initial interrupt arrived before completion
765 * interrupt. Let the task sleep.
766 * An extra task reference is needed since a different
767 * cpu may set the task state to TASK_RUNNING again
768 * before the scheduler is reached. */
769 get_task_struct(tsk);
770 tsk->thread.pfault_wait = 1;
771 list_add(&tsk->thread.list, &pfault_list);
772 block:
773 /* Since this must be a userspace fault, there
774 * is no kernel task state to trample. Rely on the
775 * return to userspace schedule() to block. */
776 __set_current_state(TASK_UNINTERRUPTIBLE);
777 set_tsk_need_resched(tsk);
778 set_preempt_need_resched();
781 out:
782 spin_unlock(&pfault_lock);
783 put_task_struct(tsk);
786 static int pfault_cpu_dead(unsigned int cpu)
788 struct thread_struct *thread, *next;
789 struct task_struct *tsk;
791 spin_lock_irq(&pfault_lock);
792 list_for_each_entry_safe(thread, next, &pfault_list, list) {
793 thread->pfault_wait = 0;
794 list_del(&thread->list);
795 tsk = container_of(thread, struct task_struct, thread);
796 wake_up_process(tsk);
797 put_task_struct(tsk);
799 spin_unlock_irq(&pfault_lock);
800 return 0;
803 static int __init pfault_irq_init(void)
805 int rc;
807 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
808 if (rc)
809 goto out_extint;
810 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
811 if (rc)
812 goto out_pfault;
813 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
814 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
815 NULL, pfault_cpu_dead);
816 return 0;
818 out_pfault:
819 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
820 out_extint:
821 pfault_disable = 1;
822 return rc;
824 early_initcall(pfault_irq_init);
826 #endif /* CONFIG_PFAULT */