powerpc: Delete __cpuinit usage from all users
[linux/fpc-iii.git] / arch / ia64 / mm / fault.c
blob6cf0341f978e59ddf235c44e70dcf615799390ed
1 /*
2 * MMU fault handling support.
4 * Copyright (C) 1998-2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/interrupt.h>
11 #include <linux/kprobes.h>
12 #include <linux/kdebug.h>
13 #include <linux/prefetch.h>
15 #include <asm/pgtable.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
19 extern int die(char *, struct pt_regs *, long);
21 #ifdef CONFIG_KPROBES
22 static inline int notify_page_fault(struct pt_regs *regs, int trap)
24 int ret = 0;
26 if (!user_mode(regs)) {
27 /* kprobe_running() needs smp_processor_id() */
28 preempt_disable();
29 if (kprobe_running() && kprobe_fault_handler(regs, trap))
30 ret = 1;
31 preempt_enable();
34 return ret;
36 #else
37 static inline int notify_page_fault(struct pt_regs *regs, int trap)
39 return 0;
41 #endif
44 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
45 * (inside region 5, on ia64) and that page is present.
47 static int
48 mapped_kernel_page_is_present (unsigned long address)
50 pgd_t *pgd;
51 pud_t *pud;
52 pmd_t *pmd;
53 pte_t *ptep, pte;
55 pgd = pgd_offset_k(address);
56 if (pgd_none(*pgd) || pgd_bad(*pgd))
57 return 0;
59 pud = pud_offset(pgd, address);
60 if (pud_none(*pud) || pud_bad(*pud))
61 return 0;
63 pmd = pmd_offset(pud, address);
64 if (pmd_none(*pmd) || pmd_bad(*pmd))
65 return 0;
67 ptep = pte_offset_kernel(pmd, address);
68 if (!ptep)
69 return 0;
71 pte = *ptep;
72 return pte_present(pte);
75 # define VM_READ_BIT 0
76 # define VM_WRITE_BIT 1
77 # define VM_EXEC_BIT 2
79 void __kprobes
80 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
82 int signal = SIGSEGV, code = SEGV_MAPERR;
83 struct vm_area_struct *vma, *prev_vma;
84 struct mm_struct *mm = current->mm;
85 struct siginfo si;
86 unsigned long mask;
87 int fault;
88 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
90 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
91 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
93 flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
95 /* mmap_sem is performance critical.... */
96 prefetchw(&mm->mmap_sem);
99 * If we're in an interrupt or have no user context, we must not take the fault..
101 if (in_atomic() || !mm)
102 goto no_context;
104 #ifdef CONFIG_VIRTUAL_MEM_MAP
106 * If fault is in region 5 and we are in the kernel, we may already
107 * have the mmap_sem (pfn_valid macro is called during mmap). There
108 * is no vma for region 5 addr's anyway, so skip getting the semaphore
109 * and go directly to the exception handling code.
112 if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
113 goto bad_area_no_up;
114 #endif
117 * This is to handle the kprobes on user space access instructions
119 if (notify_page_fault(regs, TRAP_BRKPT))
120 return;
122 retry:
123 down_read(&mm->mmap_sem);
125 vma = find_vma_prev(mm, address, &prev_vma);
126 if (!vma && !prev_vma )
127 goto bad_area;
130 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
132 * May find no vma, but could be that the last vm area is the
133 * register backing store that needs to expand upwards, in
134 * this case vma will be null, but prev_vma will ne non-null
136 if (( !vma && prev_vma ) || (address < vma->vm_start) )
137 goto check_expansion;
139 good_area:
140 code = SEGV_ACCERR;
142 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
144 # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
145 || (1 << VM_EXEC_BIT) != VM_EXEC)
146 # error File is out of sync with <linux/mm.h>. Please update.
147 # endif
149 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
150 goto bad_area;
152 if ((vma->vm_flags & mask) != mask)
153 goto bad_area;
156 * If for any reason at all we couldn't handle the fault, make
157 * sure we exit gracefully rather than endlessly redo the
158 * fault.
160 fault = handle_mm_fault(mm, vma, address, flags);
162 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
163 return;
165 if (unlikely(fault & VM_FAULT_ERROR)) {
167 * We ran out of memory, or some other thing happened
168 * to us that made us unable to handle the page fault
169 * gracefully.
171 if (fault & VM_FAULT_OOM) {
172 goto out_of_memory;
173 } else if (fault & VM_FAULT_SIGBUS) {
174 signal = SIGBUS;
175 goto bad_area;
177 BUG();
180 if (flags & FAULT_FLAG_ALLOW_RETRY) {
181 if (fault & VM_FAULT_MAJOR)
182 current->maj_flt++;
183 else
184 current->min_flt++;
185 if (fault & VM_FAULT_RETRY) {
186 flags &= ~FAULT_FLAG_ALLOW_RETRY;
187 flags |= FAULT_FLAG_TRIED;
189 /* No need to up_read(&mm->mmap_sem) as we would
190 * have already released it in __lock_page_or_retry
191 * in mm/filemap.c.
194 goto retry;
198 up_read(&mm->mmap_sem);
199 return;
201 check_expansion:
202 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
203 if (!vma)
204 goto bad_area;
205 if (!(vma->vm_flags & VM_GROWSDOWN))
206 goto bad_area;
207 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
208 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
209 goto bad_area;
210 if (expand_stack(vma, address))
211 goto bad_area;
212 } else {
213 vma = prev_vma;
214 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
215 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
216 goto bad_area;
218 * Since the register backing store is accessed sequentially,
219 * we disallow growing it by more than a page at a time.
221 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
222 goto bad_area;
223 if (expand_upwards(vma, address))
224 goto bad_area;
226 goto good_area;
228 bad_area:
229 up_read(&mm->mmap_sem);
230 #ifdef CONFIG_VIRTUAL_MEM_MAP
231 bad_area_no_up:
232 #endif
233 if ((isr & IA64_ISR_SP)
234 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
237 * This fault was due to a speculative load or lfetch.fault, set the "ed"
238 * bit in the psr to ensure forward progress. (Target register will get a
239 * NaT for ld.s, lfetch will be canceled.)
241 ia64_psr(regs)->ed = 1;
242 return;
244 if (user_mode(regs)) {
245 si.si_signo = signal;
246 si.si_errno = 0;
247 si.si_code = code;
248 si.si_addr = (void __user *) address;
249 si.si_isr = isr;
250 si.si_flags = __ISR_VALID;
251 force_sig_info(signal, &si, current);
252 return;
255 no_context:
256 if ((isr & IA64_ISR_SP)
257 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
260 * This fault was due to a speculative load or lfetch.fault, set the "ed"
261 * bit in the psr to ensure forward progress. (Target register will get a
262 * NaT for ld.s, lfetch will be canceled.)
264 ia64_psr(regs)->ed = 1;
265 return;
269 * Since we have no vma's for region 5, we might get here even if the address is
270 * valid, due to the VHPT walker inserting a non present translation that becomes
271 * stale. If that happens, the non present fault handler already purged the stale
272 * translation, which fixed the problem. So, we check to see if the translation is
273 * valid, and return if it is.
275 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
276 return;
278 if (ia64_done_with_exception(regs))
279 return;
282 * Oops. The kernel tried to access some bad page. We'll have to terminate things
283 * with extreme prejudice.
285 bust_spinlocks(1);
287 if (address < PAGE_SIZE)
288 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
289 else
290 printk(KERN_ALERT "Unable to handle kernel paging request at "
291 "virtual address %016lx\n", address);
292 if (die("Oops", regs, isr))
293 regs = NULL;
294 bust_spinlocks(0);
295 if (regs)
296 do_exit(SIGKILL);
297 return;
299 out_of_memory:
300 up_read(&mm->mmap_sem);
301 if (!user_mode(regs))
302 goto no_context;
303 pagefault_out_of_memory();