[ARM] pxa: update defconfig for Verdex Pro
[linux-2.6/verdex.git] / arch / s390 / mm / fault.c
blob6d507462967a820e9128d968d6916203bc236788
1 /*
2 * arch/s390/mm/fault.c
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (uweigand@de.ibm.com)
9 * Derived from "arch/i386/mm/fault.c"
10 * Copyright (C) 1995 Linus Torvalds
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/mm.h>
23 #include <linux/compat.h>
24 #include <linux/smp.h>
25 #include <linux/kdebug.h>
26 #include <linux/init.h>
27 #include <linux/console.h>
28 #include <linux/module.h>
29 #include <linux/hardirq.h>
30 #include <linux/kprobes.h>
31 #include <linux/uaccess.h>
32 #include <linux/hugetlb.h>
33 #include <asm/system.h>
34 #include <asm/pgtable.h>
35 #include <asm/s390_ext.h>
36 #include <asm/mmu_context.h>
37 #include "../kernel/entry.h"
39 #ifndef CONFIG_64BIT
40 #define __FAIL_ADDR_MASK 0x7ffff000
41 #define __FIXUP_MASK 0x7fffffff
42 #define __SUBCODE_MASK 0x0200
43 #define __PF_RES_FIELD 0ULL
44 #else /* CONFIG_64BIT */
45 #define __FAIL_ADDR_MASK -4096L
46 #define __FIXUP_MASK ~0L
47 #define __SUBCODE_MASK 0x0600
48 #define __PF_RES_FIELD 0x8000000000000000ULL
49 #endif /* CONFIG_64BIT */
51 #ifdef CONFIG_SYSCTL
52 extern int sysctl_userprocess_debug;
53 #endif
55 #ifdef CONFIG_KPROBES
56 static inline int notify_page_fault(struct pt_regs *regs, long err)
58 int ret = 0;
60 /* kprobe_running() needs smp_processor_id() */
61 if (!user_mode(regs)) {
62 preempt_disable();
63 if (kprobe_running() && kprobe_fault_handler(regs, 14))
64 ret = 1;
65 preempt_enable();
68 return ret;
70 #else
71 static inline int notify_page_fault(struct pt_regs *regs, long err)
73 return 0;
75 #endif
79 * Unlock any spinlocks which will prevent us from getting the
80 * message out.
82 void bust_spinlocks(int yes)
84 if (yes) {
85 oops_in_progress = 1;
86 } else {
87 int loglevel_save = console_loglevel;
88 console_unblank();
89 oops_in_progress = 0;
91 * OK, the message is on the console. Now we call printk()
92 * without oops_in_progress set so that printk will give klogd
93 * a poke. Hold onto your hats...
95 console_loglevel = 15;
96 printk(" ");
97 console_loglevel = loglevel_save;
102 * Returns the address space associated with the fault.
103 * Returns 0 for kernel space, 1 for user space and
104 * 2 for code execution in user space with noexec=on.
106 static inline int check_space(struct task_struct *tsk)
109 * The lowest two bits of S390_lowcore.trans_exc_code
110 * indicate which paging table was used.
112 int desc = S390_lowcore.trans_exc_code & 3;
114 if (desc == 3) /* Home Segment Table Descriptor */
115 return switch_amode == 0;
116 if (desc == 2) /* Secondary Segment Table Descriptor */
117 return tsk->thread.mm_segment.ar4;
118 #ifdef CONFIG_S390_SWITCH_AMODE
119 if (unlikely(desc == 1)) { /* STD determined via access register */
120 /* %a0 always indicates primary space. */
121 if (S390_lowcore.exc_access_id != 0) {
122 save_access_regs(tsk->thread.acrs);
124 * An alet of 0 indicates primary space.
125 * An alet of 1 indicates secondary space.
126 * Any other alet values generate an
127 * alen-translation exception.
129 if (tsk->thread.acrs[S390_lowcore.exc_access_id])
130 return tsk->thread.mm_segment.ar4;
133 #endif
134 /* Primary Segment Table Descriptor */
135 return switch_amode << s390_noexec;
139 * Send SIGSEGV to task. This is an external routine
140 * to keep the stack usage of do_page_fault small.
142 static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
143 int si_code, unsigned long address)
145 struct siginfo si;
147 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
148 #if defined(CONFIG_SYSCTL)
149 if (sysctl_userprocess_debug)
150 #endif
152 printk("User process fault: interruption code 0x%lX\n",
153 error_code);
154 printk("failing address: %lX\n", address);
155 show_regs(regs);
157 #endif
158 si.si_signo = SIGSEGV;
159 si.si_code = si_code;
160 si.si_addr = (void __user *) address;
161 force_sig_info(SIGSEGV, &si, current);
164 static void do_no_context(struct pt_regs *regs, unsigned long error_code,
165 unsigned long address)
167 const struct exception_table_entry *fixup;
169 /* Are we prepared to handle this kernel fault? */
170 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
171 if (fixup) {
172 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
173 return;
177 * Oops. The kernel tried to access some bad page. We'll have to
178 * terminate things with extreme prejudice.
180 if (check_space(current) == 0)
181 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
182 " at virtual kernel address %p\n", (void *)address);
183 else
184 printk(KERN_ALERT "Unable to handle kernel paging request"
185 " at virtual user address %p\n", (void *)address);
187 die("Oops", regs, error_code);
188 do_exit(SIGKILL);
191 static void do_low_address(struct pt_regs *regs, unsigned long error_code)
193 /* Low-address protection hit in kernel mode means
194 NULL pointer write access in kernel mode. */
195 if (regs->psw.mask & PSW_MASK_PSTATE) {
196 /* Low-address protection hit in user mode 'cannot happen'. */
197 die ("Low-address protection", regs, error_code);
198 do_exit(SIGKILL);
201 do_no_context(regs, error_code, 0);
204 static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
205 unsigned long address)
207 struct task_struct *tsk = current;
208 struct mm_struct *mm = tsk->mm;
210 up_read(&mm->mmap_sem);
212 * Send a sigbus, regardless of whether we were in kernel
213 * or user mode.
215 tsk->thread.prot_addr = address;
216 tsk->thread.trap_no = error_code;
217 force_sig(SIGBUS, tsk);
219 /* Kernel mode? Handle exceptions or die */
220 if (!(regs->psw.mask & PSW_MASK_PSTATE))
221 do_no_context(regs, error_code, address);
224 #ifdef CONFIG_S390_EXEC_PROTECT
225 static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
226 unsigned long address, unsigned long error_code)
228 u16 instruction;
229 int rc;
230 #ifdef CONFIG_COMPAT
231 int compat;
232 #endif
234 pagefault_disable();
235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
236 pagefault_enable();
237 if (rc)
238 return -EFAULT;
240 up_read(&mm->mmap_sem);
241 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
242 #ifdef CONFIG_COMPAT
243 compat = is_compat_task();
244 if (compat && instruction == 0x0a77)
245 sys32_sigreturn();
246 else if (compat && instruction == 0x0aad)
247 sys32_rt_sigreturn();
248 else
249 #endif
250 if (instruction == 0x0a77)
251 sys_sigreturn();
252 else if (instruction == 0x0aad)
253 sys_rt_sigreturn();
254 else {
255 current->thread.prot_addr = address;
256 current->thread.trap_no = error_code;
257 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
259 return 0;
261 #endif /* CONFIG_S390_EXEC_PROTECT */
264 * This routine handles page faults. It determines the address,
265 * and the problem, and then passes it off to one of the appropriate
266 * routines.
268 * error_code:
269 * 04 Protection -> Write-Protection (suprression)
270 * 10 Segment translation -> Not present (nullification)
271 * 11 Page translation -> Not present (nullification)
272 * 3b Region third trans. -> Not present (nullification)
274 static inline void
275 do_exception(struct pt_regs *regs, unsigned long error_code, int write)
277 struct task_struct *tsk;
278 struct mm_struct *mm;
279 struct vm_area_struct *vma;
280 unsigned long address;
281 int space;
282 int si_code;
283 int fault;
285 if (notify_page_fault(regs, error_code))
286 return;
288 tsk = current;
289 mm = tsk->mm;
291 /* get the failing address and the affected space */
292 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
293 space = check_space(tsk);
296 * Verify that the fault happened in user space, that
297 * we are not in an interrupt and that there is a
298 * user context.
300 if (unlikely(space == 0 || in_atomic() || !mm))
301 goto no_context;
304 * When we get here, the fault happened in the current
305 * task's user address space, so we can switch on the
306 * interrupts again and then search the VMAs
308 local_irq_enable();
309 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
310 down_read(&mm->mmap_sem);
312 si_code = SEGV_MAPERR;
313 vma = find_vma(mm, address);
314 if (!vma)
315 goto bad_area;
317 #ifdef CONFIG_S390_EXEC_PROTECT
318 if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
319 if (!signal_return(mm, regs, address, error_code))
321 * signal_return() has done an up_read(&mm->mmap_sem)
322 * if it returns 0.
324 return;
325 #endif
327 if (vma->vm_start <= address)
328 goto good_area;
329 if (!(vma->vm_flags & VM_GROWSDOWN))
330 goto bad_area;
331 if (expand_stack(vma, address))
332 goto bad_area;
334 * Ok, we have a good vm_area for this memory access, so
335 * we can handle it..
337 good_area:
338 si_code = SEGV_ACCERR;
339 if (!write) {
340 /* page not present, check vm flags */
341 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
342 goto bad_area;
343 } else {
344 if (!(vma->vm_flags & VM_WRITE))
345 goto bad_area;
348 if (is_vm_hugetlb_page(vma))
349 address &= HPAGE_MASK;
351 * If for any reason at all we couldn't handle the fault,
352 * make sure we exit gracefully rather than endlessly redo
353 * the fault.
355 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
356 if (unlikely(fault & VM_FAULT_ERROR)) {
357 if (fault & VM_FAULT_OOM) {
358 up_read(&mm->mmap_sem);
359 pagefault_out_of_memory();
360 return;
361 } else if (fault & VM_FAULT_SIGBUS) {
362 do_sigbus(regs, error_code, address);
363 return;
365 BUG();
367 if (fault & VM_FAULT_MAJOR) {
368 tsk->maj_flt++;
369 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
370 regs, address);
371 } else {
372 tsk->min_flt++;
373 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
374 regs, address);
376 up_read(&mm->mmap_sem);
378 * The instruction that caused the program check will
379 * be repeated. Don't signal single step via SIGTRAP.
381 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
382 return;
385 * Something tried to access memory that isn't in our memory map..
386 * Fix it, but check if it's kernel or user first..
388 bad_area:
389 up_read(&mm->mmap_sem);
391 /* User mode accesses just cause a SIGSEGV */
392 if (regs->psw.mask & PSW_MASK_PSTATE) {
393 tsk->thread.prot_addr = address;
394 tsk->thread.trap_no = error_code;
395 do_sigsegv(regs, error_code, si_code, address);
396 return;
399 no_context:
400 do_no_context(regs, error_code, address);
403 void __kprobes do_protection_exception(struct pt_regs *regs,
404 long error_code)
406 /* Protection exception is supressing, decrement psw address. */
407 regs->psw.addr -= (error_code >> 16);
409 * Check for low-address protection. This needs to be treated
410 * as a special case because the translation exception code
411 * field is not guaranteed to contain valid data in this case.
413 if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
414 do_low_address(regs, error_code);
415 return;
417 do_exception(regs, 4, 1);
420 void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
422 do_exception(regs, error_code & 0xff, 0);
425 #ifdef CONFIG_64BIT
426 void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
428 struct mm_struct *mm;
429 struct vm_area_struct *vma;
430 unsigned long address;
431 int space;
433 mm = current->mm;
434 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
435 space = check_space(current);
437 if (unlikely(space == 0 || in_atomic() || !mm))
438 goto no_context;
440 local_irq_enable();
442 down_read(&mm->mmap_sem);
443 vma = find_vma(mm, address);
444 up_read(&mm->mmap_sem);
446 if (vma) {
447 update_mm(mm, current);
448 return;
451 /* User mode accesses just cause a SIGSEGV */
452 if (regs->psw.mask & PSW_MASK_PSTATE) {
453 current->thread.prot_addr = address;
454 current->thread.trap_no = error_code;
455 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
456 return;
459 no_context:
460 do_no_context(regs, error_code, address);
462 #endif
464 #ifdef CONFIG_PFAULT
466 * 'pfault' pseudo page faults routines.
468 static ext_int_info_t ext_int_pfault;
469 static int pfault_disable = 0;
471 static int __init nopfault(char *str)
473 pfault_disable = 1;
474 return 1;
477 __setup("nopfault", nopfault);
479 typedef struct {
480 __u16 refdiagc;
481 __u16 reffcode;
482 __u16 refdwlen;
483 __u16 refversn;
484 __u64 refgaddr;
485 __u64 refselmk;
486 __u64 refcmpmk;
487 __u64 reserved;
488 } __attribute__ ((packed, aligned(8))) pfault_refbk_t;
490 int pfault_init(void)
492 pfault_refbk_t refbk =
493 { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
494 __PF_RES_FIELD };
495 int rc;
497 if (!MACHINE_IS_VM || pfault_disable)
498 return -1;
499 asm volatile(
500 " diag %1,%0,0x258\n"
501 "0: j 2f\n"
502 "1: la %0,8\n"
503 "2:\n"
504 EX_TABLE(0b,1b)
505 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
506 __ctl_set_bit(0, 9);
507 return rc;
510 void pfault_fini(void)
512 pfault_refbk_t refbk =
513 { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
515 if (!MACHINE_IS_VM || pfault_disable)
516 return;
517 __ctl_clear_bit(0,9);
518 asm volatile(
519 " diag %0,0,0x258\n"
520 "0:\n"
521 EX_TABLE(0b,0b)
522 : : "a" (&refbk), "m" (refbk) : "cc");
525 static void pfault_interrupt(__u16 error_code)
527 struct task_struct *tsk;
528 __u16 subcode;
531 * Get the external interruption subcode & pfault
532 * initial/completion signal bit. VM stores this
533 * in the 'cpu address' field associated with the
534 * external interrupt.
536 subcode = S390_lowcore.cpu_addr;
537 if ((subcode & 0xff00) != __SUBCODE_MASK)
538 return;
541 * Get the token (= address of the task structure of the affected task).
543 tsk = *(struct task_struct **) __LC_PFAULT_INTPARM;
545 if (subcode & 0x0080) {
546 /* signal bit is set -> a page has been swapped in by VM */
547 if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
548 /* Initial interrupt was faster than the completion
549 * interrupt. pfault_wait is valid. Set pfault_wait
550 * back to zero and wake up the process. This can
551 * safely be done because the task is still sleeping
552 * and can't produce new pfaults. */
553 tsk->thread.pfault_wait = 0;
554 wake_up_process(tsk);
555 put_task_struct(tsk);
557 } else {
558 /* signal bit not set -> a real page is missing. */
559 get_task_struct(tsk);
560 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
561 if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
562 /* Completion interrupt was faster than the initial
563 * interrupt (swapped in a -1 for pfault_wait). Set
564 * pfault_wait back to zero and exit. This can be
565 * done safely because tsk is running in kernel
566 * mode and can't produce new pfaults. */
567 tsk->thread.pfault_wait = 0;
568 set_task_state(tsk, TASK_RUNNING);
569 put_task_struct(tsk);
570 } else
571 set_tsk_need_resched(tsk);
575 void __init pfault_irq_init(void)
577 if (!MACHINE_IS_VM)
578 return;
581 * Try to get pfault pseudo page faults going.
583 if (register_early_external_interrupt(0x2603, pfault_interrupt,
584 &ext_int_pfault) != 0)
585 panic("Couldn't request external interrupt 0x2603");
587 if (pfault_init() == 0)
588 return;
590 /* Tough luck, no pfault. */
591 pfault_disable = 1;
592 unregister_early_external_interrupt(0x2603, pfault_interrupt,
593 &ext_int_pfault);
595 #endif