2 * MMU fault handling support.
4 * Copyright (C) 1998-2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
10 #include <linux/smp_lock.h>
11 #include <linux/interrupt.h>
12 #include <linux/kprobes.h>
14 #include <asm/pgtable.h>
15 #include <asm/processor.h>
16 #include <asm/system.h>
17 #include <asm/uaccess.h>
18 #include <asm/kdebug.h>
20 extern void die (char *, struct pt_regs
*, long);
23 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
24 * (inside region 5, on ia64) and that page is present.
27 mapped_kernel_page_is_present (unsigned long address
)
34 pgd
= pgd_offset_k(address
);
35 if (pgd_none(*pgd
) || pgd_bad(*pgd
))
38 pud
= pud_offset(pgd
, address
);
39 if (pud_none(*pud
) || pud_bad(*pud
))
42 pmd
= pmd_offset(pud
, address
);
43 if (pmd_none(*pmd
) || pmd_bad(*pmd
))
46 ptep
= pte_offset_kernel(pmd
, address
);
51 return pte_present(pte
);
55 ia64_do_page_fault (unsigned long address
, unsigned long isr
, struct pt_regs
*regs
)
57 int signal
= SIGSEGV
, code
= SEGV_MAPERR
;
58 struct vm_area_struct
*vma
, *prev_vma
;
59 struct mm_struct
*mm
= current
->mm
;
64 * If we're in an interrupt or have no user context, we must not take the fault..
66 if (in_atomic() || !mm
)
69 #ifdef CONFIG_VIRTUAL_MEM_MAP
71 * If fault is in region 5 and we are in the kernel, we may already
72 * have the mmap_sem (pfn_valid macro is called during mmap). There
73 * is no vma for region 5 addr's anyway, so skip getting the semaphore
74 * and go directly to the exception handling code.
77 if ((REGION_NUMBER(address
) == 5) && !user_mode(regs
))
82 * This is to handle the kprobes on user space access instructions
84 if (notify_die(DIE_PAGE_FAULT
, "page fault", regs
, code
, TRAP_BRKPT
,
85 SIGSEGV
) == NOTIFY_STOP
)
88 down_read(&mm
->mmap_sem
);
90 vma
= find_vma_prev(mm
, address
, &prev_vma
);
94 /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */
95 if (address
< vma
->vm_start
)
101 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
103 # define VM_READ_BIT 0
104 # define VM_WRITE_BIT 1
105 # define VM_EXEC_BIT 2
107 # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
108 || (1 << VM_EXEC_BIT) != VM_EXEC)
109 # error File is out of sync with <linux/mm.h>. Please update.
112 mask
= ( (((isr
>> IA64_ISR_X_BIT
) & 1UL) << VM_EXEC_BIT
)
113 | (((isr
>> IA64_ISR_W_BIT
) & 1UL) << VM_WRITE_BIT
)
114 | (((isr
>> IA64_ISR_R_BIT
) & 1UL) << VM_READ_BIT
));
116 if ((vma
->vm_flags
& mask
) != mask
)
121 * If for any reason at all we couldn't handle the fault, make
122 * sure we exit gracefully rather than endlessly redo the
125 switch (handle_mm_fault(mm
, vma
, address
, (mask
& VM_WRITE
) != 0)) {
132 case VM_FAULT_SIGBUS
:
134 * We ran out of memory, or some other thing happened
135 * to us that made us unable to handle the page fault
145 up_read(&mm
->mmap_sem
);
149 if (!(prev_vma
&& (prev_vma
->vm_flags
& VM_GROWSUP
) && (address
== prev_vma
->vm_end
))) {
150 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
152 if (REGION_NUMBER(address
) != REGION_NUMBER(vma
->vm_start
)
153 || REGION_OFFSET(address
) >= RGN_MAP_LIMIT
)
155 if (expand_stack(vma
, address
))
159 if (REGION_NUMBER(address
) != REGION_NUMBER(vma
->vm_start
)
160 || REGION_OFFSET(address
) >= RGN_MAP_LIMIT
)
163 * Since the register backing store is accessed sequentially,
164 * we disallow growing it by more than a page at a time.
166 if (address
> vma
->vm_end
+ PAGE_SIZE
- sizeof(long))
168 if (expand_upwards(vma
, address
))
174 up_read(&mm
->mmap_sem
);
175 #ifdef CONFIG_VIRTUAL_MEM_MAP
178 if ((isr
& IA64_ISR_SP
)
179 || ((isr
& IA64_ISR_NA
) && (isr
& IA64_ISR_CODE_MASK
) == IA64_ISR_CODE_LFETCH
))
182 * This fault was due to a speculative load or lfetch.fault, set the "ed"
183 * bit in the psr to ensure forward progress. (Target register will get a
184 * NaT for ld.s, lfetch will be canceled.)
186 ia64_psr(regs
)->ed
= 1;
189 if (user_mode(regs
)) {
190 si
.si_signo
= signal
;
193 si
.si_addr
= (void __user
*) address
;
195 si
.si_flags
= __ISR_VALID
;
196 force_sig_info(signal
, &si
, current
);
201 if ((isr
& IA64_ISR_SP
)
202 || ((isr
& IA64_ISR_NA
) && (isr
& IA64_ISR_CODE_MASK
) == IA64_ISR_CODE_LFETCH
))
205 * This fault was due to a speculative load or lfetch.fault, set the "ed"
206 * bit in the psr to ensure forward progress. (Target register will get a
207 * NaT for ld.s, lfetch will be canceled.)
209 ia64_psr(regs
)->ed
= 1;
214 * Since we have no vma's for region 5, we might get here even if the address is
215 * valid, due to the VHPT walker inserting a non present translation that becomes
216 * stale. If that happens, the non present fault handler already purged the stale
217 * translation, which fixed the problem. So, we check to see if the translation is
218 * valid, and return if it is.
220 if (REGION_NUMBER(address
) == 5 && mapped_kernel_page_is_present(address
))
223 if (ia64_done_with_exception(regs
))
227 * Oops. The kernel tried to access some bad page. We'll have to terminate things
228 * with extreme prejudice.
232 if (address
< PAGE_SIZE
)
233 printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference (address %016lx)\n", address
);
235 printk(KERN_ALERT
"Unable to handle kernel paging request at "
236 "virtual address %016lx\n", address
);
237 die("Oops", regs
, isr
);
243 up_read(&mm
->mmap_sem
);
244 if (current
->pid
== 1) {
246 down_read(&mm
->mmap_sem
);
249 printk(KERN_CRIT
"VM: killing process %s\n", current
->comm
);