2 * Copyright (C) 2004-2006 Atmel Corporation
4 * Based on linux/arch/sh/mm/fault.c:
5 * Copyright (C) 1999 Niibe Yutaka
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/pagemap.h>
15 #include <linux/kdebug.h>
16 #include <linux/kprobes.h>
18 #include <asm/mmu_context.h>
19 #include <asm/sysreg.h>
21 #include <asm/uaccess.h>
24 static inline int notify_page_fault(struct pt_regs
*regs
, int trap
)
28 if (!user_mode(regs
)) {
29 if (kprobe_running() && kprobe_fault_handler(regs
, trap
))
36 static inline int notify_page_fault(struct pt_regs
*regs
, int trap
)
42 int exception_trace
= 1;
45 * This routine handles page faults. It determines the address and the
46 * problem, and then passes it off to one of the appropriate routines.
48 * ecr is the Exception Cause Register. Possible values are:
49 * 6: Protection fault (instruction access)
50 * 15: Protection fault (read access)
51 * 16: Protection fault (write access)
52 * 20: Page not found (instruction access)
53 * 24: Page not found (read access)
54 * 28: Page not found (write access)
56 asmlinkage
void do_page_fault(unsigned long ecr
, struct pt_regs
*regs
)
58 struct task_struct
*tsk
;
60 struct vm_area_struct
*vma
;
61 const struct exception_table_entry
*fixup
;
62 unsigned long address
;
69 if (notify_page_fault(regs
, ecr
))
72 address
= sysreg_read(TLBEAR
);
81 * If we're in an interrupt or have no user context, we must
82 * not take the fault...
84 if (in_atomic() || !mm
|| regs
->sr
& SYSREG_BIT(GM
))
89 down_read(&mm
->mmap_sem
);
91 vma
= find_vma(mm
, address
);
94 if (vma
->vm_start
<= address
)
96 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
98 if (expand_stack(vma
, address
))
102 * Ok, we have a good vm_area for this memory access, so we
110 case ECR_PROTECTION_X
:
112 if (!(vma
->vm_flags
& VM_EXEC
))
115 case ECR_PROTECTION_R
:
117 if (!(vma
->vm_flags
& (VM_READ
| VM_WRITE
| VM_EXEC
)))
120 case ECR_PROTECTION_W
:
122 if (!(vma
->vm_flags
& VM_WRITE
))
127 panic("Unhandled case %lu in do_page_fault!", ecr
);
131 * If for any reason at all we couldn't handle the fault, make
132 * sure we exit gracefully rather than endlessly redo the
136 fault
= handle_mm_fault(mm
, vma
, address
, writeaccess
);
137 if (unlikely(fault
& VM_FAULT_ERROR
)) {
138 if (fault
& VM_FAULT_OOM
)
140 else if (fault
& VM_FAULT_SIGBUS
)
144 if (fault
& VM_FAULT_MAJOR
)
149 up_read(&mm
->mmap_sem
);
153 * Something tried to access memory that isn't in our memory
154 * map. Fix it, but check if it's kernel or user first...
157 up_read(&mm
->mmap_sem
);
159 if (user_mode(regs
)) {
160 if (exception_trace
&& printk_ratelimit())
161 printk("%s%s[%d]: segfault at %08lx pc %08lx "
162 "sp %08lx ecr %lu\n",
163 is_global_init(tsk
) ? KERN_EMERG
: KERN_INFO
,
164 tsk
->comm
, tsk
->pid
, address
, regs
->pc
,
166 _exception(SIGSEGV
, regs
, code
, address
);
171 /* Are we prepared to handle this kernel fault? */
172 fixup
= search_exception_tables(regs
->pc
);
174 regs
->pc
= fixup
->fixup
;
179 * Oops. The kernel tried to access some bad page. We'll have
180 * to terminate things with extreme prejudice.
182 if (address
< PAGE_SIZE
)
184 "Unable to handle kernel NULL pointer dereference");
187 "Unable to handle kernel paging request");
188 printk(" at virtual address %08lx\n", address
);
190 page
= sysreg_read(PTBR
);
191 printk(KERN_ALERT
"ptbr = %08lx", page
);
192 <<<<<<< HEAD
:arch
/avr32
/mm
/fault
.c
194 if (address
>= TASK_SIZE
)
195 page
= (unsigned long)swapper_pg_dir
;
196 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/avr32
/mm
/fault
.c
198 page
= ((unsigned long *)page
)[address
>> 22];
199 printk(" pgd = %08lx", page
);
200 if (page
& _PAGE_PRESENT
) {
202 address
&= 0x003ff000;
203 page
= ((unsigned long *)__va(page
))[address
>> PAGE_SHIFT
];
204 printk(" pte = %08lx", page
);
208 die("Kernel access of bad area", regs
, signr
);
212 * We ran out of memory, or some other thing happened to us
213 * that made us unable to handle the page fault gracefully.
216 up_read(&mm
->mmap_sem
);
217 if (is_global_init(current
)) {
219 down_read(&mm
->mmap_sem
);
222 printk("VM: Killing process %s\n", tsk
->comm
);
224 do_group_exit(SIGKILL
);
228 up_read(&mm
->mmap_sem
);
230 /* Kernel mode? Handle exceptions or die */
233 if (!user_mode(regs
))
237 printk("%s%s[%d]: bus error at %08lx pc %08lx "
238 "sp %08lx ecr %lu\n",
239 is_global_init(tsk
) ? KERN_EMERG
: KERN_INFO
,
240 tsk
->comm
, tsk
->pid
, address
, regs
->pc
,
243 _exception(SIGBUS
, regs
, BUS_ADRERR
, address
);
246 asmlinkage
void do_bus_error(unsigned long addr
, int write_access
,
247 struct pt_regs
*regs
)
250 "Bus error at physical address 0x%08lx (%s access)\n",
251 addr
, write_access
? "write" : "read");
252 printk(KERN_INFO
"DTLB dump:\n");
254 die("Bus Error", regs
, SIGKILL
);
258 * This functionality is currently not possible to implement because
259 * we're using segmentation to ensure a fixed mapping of the kernel
260 * virtual address space.
262 * It would be possible to implement this, but it would require us to
263 * disable segmentation at startup and load the kernel mappings into
264 * the TLB like any other pages. There will be lots of trickery to
265 * avoid recursive invocation of the TLB miss handler, though...
267 #ifdef CONFIG_DEBUG_PAGEALLOC
268 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
272 EXPORT_SYMBOL(kernel_map_pages
);