3 * Copyright IBM Corp. 1999
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (uweigand@de.ibm.com)
7 * Derived from "arch/i386/mm/fault.c"
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/kernel_stat.h>
12 #include <linux/perf_event.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
22 #include <linux/compat.h>
23 #include <linux/smp.h>
24 #include <linux/kdebug.h>
25 #include <linux/init.h>
26 #include <linux/console.h>
27 #include <linux/module.h>
28 #include <linux/hardirq.h>
29 #include <linux/kprobes.h>
30 #include <linux/uaccess.h>
31 #include <linux/hugetlb.h>
32 #include <asm/asm-offsets.h>
33 #include <asm/pgtable.h>
35 #include <asm/mmu_context.h>
36 #include <asm/facility.h>
37 #include "../kernel/entry.h"
40 #define __FAIL_ADDR_MASK 0x7ffff000
41 #define __SUBCODE_MASK 0x0200
42 #define __PF_RES_FIELD 0ULL
43 #else /* CONFIG_64BIT */
44 #define __FAIL_ADDR_MASK -4096L
45 #define __SUBCODE_MASK 0x0600
46 #define __PF_RES_FIELD 0x8000000000000000ULL
47 #endif /* CONFIG_64BIT */
49 #define VM_FAULT_BADCONTEXT 0x010000
50 #define VM_FAULT_BADMAP 0x020000
51 #define VM_FAULT_BADACCESS 0x040000
52 #define VM_FAULT_SIGNAL 0x080000
53 #define VM_FAULT_PFAULT 0x100000
55 static unsigned long store_indication __read_mostly
;
58 static int __init
fault_init(void)
60 if (test_facility(75))
61 store_indication
= 0xc00;
64 early_initcall(fault_init
);
67 static inline int notify_page_fault(struct pt_regs
*regs
)
71 /* kprobe_running() needs smp_processor_id() */
72 if (kprobes_built_in() && !user_mode(regs
)) {
74 if (kprobe_running() && kprobe_fault_handler(regs
, 14))
83 * Unlock any spinlocks which will prevent us from getting the
86 void bust_spinlocks(int yes
)
91 int loglevel_save
= console_loglevel
;
95 * OK, the message is on the console. Now we call printk()
96 * without oops_in_progress set so that printk will give klogd
97 * a poke. Hold onto your hats...
99 console_loglevel
= 15;
101 console_loglevel
= loglevel_save
;
106 * Returns the address space associated with the fault.
107 * Returns 0 for kernel space and 1 for user space.
109 static inline int user_space_fault(struct pt_regs
*regs
)
111 unsigned long trans_exc_code
;
114 * The lowest two bits of the translation exception
115 * identification indicate which paging table was used.
117 trans_exc_code
= regs
->int_parm_long
& 3;
118 if (trans_exc_code
== 3) /* home space -> kernel */
122 if (trans_exc_code
== 2) /* secondary space -> set_fs */
123 return current
->thread
.mm_segment
.ar4
;
124 if (current
->flags
& PF_VCPU
)
129 static int bad_address(void *p
)
133 return probe_kernel_address((unsigned long *)p
, dummy
);
137 static void dump_pagetable(unsigned long asce
, unsigned long address
)
139 unsigned long *table
= __va(asce
& PAGE_MASK
);
141 pr_alert("AS:%016lx ", asce
);
142 switch (asce
& _ASCE_TYPE_MASK
) {
143 case _ASCE_TYPE_REGION1
:
144 table
= table
+ ((address
>> 53) & 0x7ff);
145 if (bad_address(table
))
147 pr_cont("R1:%016lx ", *table
);
148 if (*table
& _REGION_ENTRY_INVALID
)
150 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
152 case _ASCE_TYPE_REGION2
:
153 table
= table
+ ((address
>> 42) & 0x7ff);
154 if (bad_address(table
))
156 pr_cont("R2:%016lx ", *table
);
157 if (*table
& _REGION_ENTRY_INVALID
)
159 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
161 case _ASCE_TYPE_REGION3
:
162 table
= table
+ ((address
>> 31) & 0x7ff);
163 if (bad_address(table
))
165 pr_cont("R3:%016lx ", *table
);
166 if (*table
& (_REGION_ENTRY_INVALID
| _REGION3_ENTRY_LARGE
))
168 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
170 case _ASCE_TYPE_SEGMENT
:
171 table
= table
+ ((address
>> 20) & 0x7ff);
172 if (bad_address(table
))
174 pr_cont(KERN_CONT
"S:%016lx ", *table
);
175 if (*table
& (_SEGMENT_ENTRY_INVALID
| _SEGMENT_ENTRY_LARGE
))
177 table
= (unsigned long *)(*table
& _SEGMENT_ENTRY_ORIGIN
);
179 table
= table
+ ((address
>> 12) & 0xff);
180 if (bad_address(table
))
182 pr_cont("P:%016lx ", *table
);
190 #else /* CONFIG_64BIT */
192 static void dump_pagetable(unsigned long asce
, unsigned long address
)
194 unsigned long *table
= __va(asce
& PAGE_MASK
);
196 pr_alert("AS:%08lx ", asce
);
197 table
= table
+ ((address
>> 20) & 0x7ff);
198 if (bad_address(table
))
200 pr_cont("S:%08lx ", *table
);
201 if (*table
& _SEGMENT_ENTRY_INVALID
)
203 table
= (unsigned long *)(*table
& _SEGMENT_ENTRY_ORIGIN
);
204 table
= table
+ ((address
>> 12) & 0xff);
205 if (bad_address(table
))
207 pr_cont("P:%08lx ", *table
);
215 #endif /* CONFIG_64BIT */
217 static void dump_fault_info(struct pt_regs
*regs
)
221 pr_alert("Fault in ");
222 switch (regs
->int_parm_long
& 3) {
224 pr_cont("home space ");
227 pr_cont("secondary space ");
230 pr_cont("access register ");
233 pr_cont("primary space ");
236 pr_cont("mode while using ");
237 if (!user_space_fault(regs
)) {
238 asce
= S390_lowcore
.kernel_asce
;
242 else if ((current
->flags
& PF_VCPU
) && S390_lowcore
.gmap
) {
243 struct gmap
*gmap
= (struct gmap
*)S390_lowcore
.gmap
;
249 asce
= S390_lowcore
.user_asce
;
253 dump_pagetable(asce
, regs
->int_parm_long
& __FAIL_ADDR_MASK
);
256 static inline void report_user_fault(struct pt_regs
*regs
, long signr
)
258 if ((task_pid_nr(current
) > 1) && !show_unhandled_signals
)
260 if (!unhandled_signal(current
, signr
))
262 if (!printk_ratelimit())
264 printk(KERN_ALERT
"User process fault: interruption code 0x%X ",
266 print_vma_addr(KERN_CONT
"in ", regs
->psw
.addr
& PSW_ADDR_INSN
);
267 printk(KERN_CONT
"\n");
268 printk(KERN_ALERT
"failing address: %016lx TEID: %016lx\n",
269 regs
->int_parm_long
& __FAIL_ADDR_MASK
, regs
->int_parm_long
);
270 dump_fault_info(regs
);
275 * Send SIGSEGV to task. This is an external routine
276 * to keep the stack usage of do_page_fault small.
278 static noinline
void do_sigsegv(struct pt_regs
*regs
, int si_code
)
282 report_user_fault(regs
, SIGSEGV
);
283 si
.si_signo
= SIGSEGV
;
284 si
.si_code
= si_code
;
285 si
.si_addr
= (void __user
*)(regs
->int_parm_long
& __FAIL_ADDR_MASK
);
286 force_sig_info(SIGSEGV
, &si
, current
);
289 static noinline
void do_no_context(struct pt_regs
*regs
)
291 const struct exception_table_entry
*fixup
;
292 unsigned long address
;
294 /* Are we prepared to handle this kernel fault? */
295 fixup
= search_exception_tables(regs
->psw
.addr
& PSW_ADDR_INSN
);
297 regs
->psw
.addr
= extable_fixup(fixup
) | PSW_ADDR_AMODE
;
302 * Oops. The kernel tried to access some bad page. We'll have to
303 * terminate things with extreme prejudice.
305 address
= regs
->int_parm_long
& __FAIL_ADDR_MASK
;
306 if (!user_space_fault(regs
))
307 printk(KERN_ALERT
"Unable to handle kernel pointer dereference"
308 " in virtual kernel address space\n");
310 printk(KERN_ALERT
"Unable to handle kernel paging request"
311 " in virtual user address space\n");
312 printk(KERN_ALERT
"failing address: %016lx TEID: %016lx\n",
313 regs
->int_parm_long
& __FAIL_ADDR_MASK
, regs
->int_parm_long
);
314 dump_fault_info(regs
);
319 static noinline
void do_low_address(struct pt_regs
*regs
)
321 /* Low-address protection hit in kernel mode means
322 NULL pointer write access in kernel mode. */
323 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
324 /* Low-address protection hit in user mode 'cannot happen'. */
325 die (regs
, "Low-address protection");
332 static noinline
void do_sigbus(struct pt_regs
*regs
)
334 struct task_struct
*tsk
= current
;
338 * Send a sigbus, regardless of whether we were in kernel
341 si
.si_signo
= SIGBUS
;
343 si
.si_code
= BUS_ADRERR
;
344 si
.si_addr
= (void __user
*)(regs
->int_parm_long
& __FAIL_ADDR_MASK
);
345 force_sig_info(SIGBUS
, &si
, tsk
);
348 static noinline
void do_fault_error(struct pt_regs
*regs
, int fault
)
353 case VM_FAULT_BADACCESS
:
354 case VM_FAULT_BADMAP
:
355 /* Bad memory access. Check if it is kernel or user space. */
356 if (user_mode(regs
)) {
357 /* User mode accesses just cause a SIGSEGV */
358 si_code
= (fault
== VM_FAULT_BADMAP
) ?
359 SEGV_MAPERR
: SEGV_ACCERR
;
360 do_sigsegv(regs
, si_code
);
363 case VM_FAULT_BADCONTEXT
:
364 case VM_FAULT_PFAULT
:
367 case VM_FAULT_SIGNAL
:
368 if (!user_mode(regs
))
371 default: /* fault & VM_FAULT_ERROR */
372 if (fault
& VM_FAULT_OOM
) {
373 if (!user_mode(regs
))
376 pagefault_out_of_memory();
377 } else if (fault
& VM_FAULT_SIGBUS
) {
378 /* Kernel mode? Handle exceptions or die */
379 if (!user_mode(regs
))
390 * This routine handles page faults. It determines the address,
391 * and the problem, and then passes it off to one of the appropriate
394 * interruption code (int_code):
395 * 04 Protection -> Write-Protection (suprression)
396 * 10 Segment translation -> Not present (nullification)
397 * 11 Page translation -> Not present (nullification)
398 * 3b Region third trans. -> Not present (nullification)
400 static inline int do_exception(struct pt_regs
*regs
, int access
)
405 struct task_struct
*tsk
;
406 struct mm_struct
*mm
;
407 struct vm_area_struct
*vma
;
408 unsigned long trans_exc_code
;
409 unsigned long address
;
415 * The instruction that caused the program check has
416 * been nullified. Don't signal single step via SIGTRAP.
418 clear_pt_regs_flag(regs
, PIF_PER_TRAP
);
420 if (notify_page_fault(regs
))
424 trans_exc_code
= regs
->int_parm_long
;
427 * Verify that the fault happened in user space, that
428 * we are not in an interrupt and that there is a
431 fault
= VM_FAULT_BADCONTEXT
;
432 if (unlikely(!user_space_fault(regs
) || in_atomic() || !mm
))
435 address
= trans_exc_code
& __FAIL_ADDR_MASK
;
436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
437 flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
439 flags
|= FAULT_FLAG_USER
;
440 if (access
== VM_WRITE
|| (trans_exc_code
& store_indication
) == 0x400)
441 flags
|= FAULT_FLAG_WRITE
;
442 down_read(&mm
->mmap_sem
);
445 gmap
= (current
->flags
& PF_VCPU
) ?
446 (struct gmap
*) S390_lowcore
.gmap
: NULL
;
448 current
->thread
.gmap_addr
= address
;
449 address
= __gmap_translate(gmap
, address
);
450 if (address
== -EFAULT
) {
451 fault
= VM_FAULT_BADMAP
;
454 if (gmap
->pfault_enabled
)
455 flags
|= FAULT_FLAG_RETRY_NOWAIT
;
460 fault
= VM_FAULT_BADMAP
;
461 vma
= find_vma(mm
, address
);
465 if (unlikely(vma
->vm_start
> address
)) {
466 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
468 if (expand_stack(vma
, address
))
473 * Ok, we have a good vm_area for this memory access, so
476 fault
= VM_FAULT_BADACCESS
;
477 if (unlikely(!(vma
->vm_flags
& access
)))
480 if (is_vm_hugetlb_page(vma
))
481 address
&= HPAGE_MASK
;
483 * If for any reason at all we couldn't handle the fault,
484 * make sure we exit gracefully rather than endlessly redo
487 fault
= handle_mm_fault(mm
, vma
, address
, flags
);
488 /* No reason to continue if interrupted by SIGKILL. */
489 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
)) {
490 fault
= VM_FAULT_SIGNAL
;
493 if (unlikely(fault
& VM_FAULT_ERROR
))
497 * Major/minor page fault accounting is only done on the
498 * initial attempt. If we go through a retry, it is extremely
499 * likely that the page will be found in page cache at that point.
501 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
502 if (fault
& VM_FAULT_MAJOR
) {
504 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
, 1,
508 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
, 1,
511 if (fault
& VM_FAULT_RETRY
) {
513 if (gmap
&& (flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
514 /* FAULT_FLAG_RETRY_NOWAIT has been set,
515 * mmap_sem has not been released */
516 current
->thread
.gmap_pfault
= 1;
517 fault
= VM_FAULT_PFAULT
;
521 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
523 flags
&= ~(FAULT_FLAG_ALLOW_RETRY
|
524 FAULT_FLAG_RETRY_NOWAIT
);
525 flags
|= FAULT_FLAG_TRIED
;
526 down_read(&mm
->mmap_sem
);
532 address
= __gmap_link(gmap
, current
->thread
.gmap_addr
,
534 if (address
== -EFAULT
) {
535 fault
= VM_FAULT_BADMAP
;
538 if (address
== -ENOMEM
) {
539 fault
= VM_FAULT_OOM
;
546 up_read(&mm
->mmap_sem
);
551 void __kprobes
do_protection_exception(struct pt_regs
*regs
)
553 unsigned long trans_exc_code
;
556 trans_exc_code
= regs
->int_parm_long
;
558 * Protection exceptions are suppressing, decrement psw address.
559 * The exception to this rule are aborted transactions, for these
560 * the PSW already points to the correct location.
562 if (!(regs
->int_code
& 0x200))
563 regs
->psw
.addr
= __rewind_psw(regs
->psw
, regs
->int_code
>> 16);
565 * Check for low-address protection. This needs to be treated
566 * as a special case because the translation exception code
567 * field is not guaranteed to contain valid data in this case.
569 if (unlikely(!(trans_exc_code
& 4))) {
570 do_low_address(regs
);
573 fault
= do_exception(regs
, VM_WRITE
);
575 do_fault_error(regs
, fault
);
578 void __kprobes
do_dat_exception(struct pt_regs
*regs
)
582 access
= VM_READ
| VM_EXEC
| VM_WRITE
;
583 fault
= do_exception(regs
, access
);
585 do_fault_error(regs
, fault
);
590 * 'pfault' pseudo page faults routines.
592 static int pfault_disable
;
594 static int __init
nopfault(char *str
)
600 __setup("nopfault", nopfault
);
602 struct pfault_refbk
{
611 } __attribute__ ((packed
, aligned(8)));
613 int pfault_init(void)
615 struct pfault_refbk refbk
= {
620 .refgaddr
= __LC_CURRENT_PID
,
621 .refselmk
= 1ULL << 48,
622 .refcmpmk
= 1ULL << 48,
623 .reserved
= __PF_RES_FIELD
};
629 " diag %1,%0,0x258\n"
634 : "=d" (rc
) : "a" (&refbk
), "m" (refbk
) : "cc");
638 void pfault_fini(void)
640 struct pfault_refbk refbk
= {
653 : : "a" (&refbk
), "m" (refbk
) : "cc");
656 static DEFINE_SPINLOCK(pfault_lock
);
657 static LIST_HEAD(pfault_list
);
659 static void pfault_interrupt(struct ext_code ext_code
,
660 unsigned int param32
, unsigned long param64
)
662 struct task_struct
*tsk
;
667 * Get the external interruption subcode & pfault
668 * initial/completion signal bit. VM stores this
669 * in the 'cpu address' field associated with the
670 * external interrupt.
672 subcode
= ext_code
.subcode
;
673 if ((subcode
& 0xff00) != __SUBCODE_MASK
)
675 inc_irq_stat(IRQEXT_PFL
);
676 /* Get the token (= pid of the affected task). */
677 pid
= sizeof(void *) == 4 ? param32
: param64
;
679 tsk
= find_task_by_pid_ns(pid
, &init_pid_ns
);
681 get_task_struct(tsk
);
685 spin_lock(&pfault_lock
);
686 if (subcode
& 0x0080) {
687 /* signal bit is set -> a page has been swapped in by VM */
688 if (tsk
->thread
.pfault_wait
== 1) {
689 /* Initial interrupt was faster than the completion
690 * interrupt. pfault_wait is valid. Set pfault_wait
691 * back to zero and wake up the process. This can
692 * safely be done because the task is still sleeping
693 * and can't produce new pfaults. */
694 tsk
->thread
.pfault_wait
= 0;
695 list_del(&tsk
->thread
.list
);
696 wake_up_process(tsk
);
697 put_task_struct(tsk
);
699 /* Completion interrupt was faster than initial
700 * interrupt. Set pfault_wait to -1 so the initial
701 * interrupt doesn't put the task to sleep.
702 * If the task is not running, ignore the completion
703 * interrupt since it must be a leftover of a PFAULT
704 * CANCEL operation which didn't remove all pending
705 * completion interrupts. */
706 if (tsk
->state
== TASK_RUNNING
)
707 tsk
->thread
.pfault_wait
= -1;
710 /* signal bit not set -> a real page is missing. */
711 if (WARN_ON_ONCE(tsk
!= current
))
713 if (tsk
->thread
.pfault_wait
== 1) {
714 /* Already on the list with a reference: put to sleep */
715 __set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
716 set_tsk_need_resched(tsk
);
717 } else if (tsk
->thread
.pfault_wait
== -1) {
718 /* Completion interrupt was faster than the initial
719 * interrupt (pfault_wait == -1). Set pfault_wait
720 * back to zero and exit. */
721 tsk
->thread
.pfault_wait
= 0;
723 /* Initial interrupt arrived before completion
724 * interrupt. Let the task sleep.
725 * An extra task reference is needed since a different
726 * cpu may set the task state to TASK_RUNNING again
727 * before the scheduler is reached. */
728 get_task_struct(tsk
);
729 tsk
->thread
.pfault_wait
= 1;
730 list_add(&tsk
->thread
.list
, &pfault_list
);
731 __set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
732 set_tsk_need_resched(tsk
);
736 spin_unlock(&pfault_lock
);
737 put_task_struct(tsk
);
740 static int pfault_cpu_notify(struct notifier_block
*self
, unsigned long action
,
743 struct thread_struct
*thread
, *next
;
744 struct task_struct
*tsk
;
746 switch (action
& ~CPU_TASKS_FROZEN
) {
748 spin_lock_irq(&pfault_lock
);
749 list_for_each_entry_safe(thread
, next
, &pfault_list
, list
) {
750 thread
->pfault_wait
= 0;
751 list_del(&thread
->list
);
752 tsk
= container_of(thread
, struct task_struct
, thread
);
753 wake_up_process(tsk
);
754 put_task_struct(tsk
);
756 spin_unlock_irq(&pfault_lock
);
764 static int __init
pfault_irq_init(void)
768 rc
= register_external_irq(EXT_IRQ_CP_SERVICE
, pfault_interrupt
);
771 rc
= pfault_init() == 0 ? 0 : -EOPNOTSUPP
;
774 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
775 hotcpu_notifier(pfault_cpu_notify
, 0);
779 unregister_external_irq(EXT_IRQ_CP_SERVICE
, pfault_interrupt
);
784 early_initcall(pfault_irq_init
);
786 #endif /* CONFIG_PFAULT */