Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / arch / sh / mm / tlbflush_64.c
blobbfd331281b9f774077e51d771ee42d9b2e3f8ba2
1 /*
2 * arch/sh/mm/tlb-flush_64.c
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
6 * Copyright (C) 2003 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
12 #include <linux/signal.h>
13 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <asm/system.h>
25 #include <asm/io.h>
26 #include <asm/tlb.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgalloc.h>
29 #include <asm/mmu_context.h>
31 extern void die(const char *,struct pt_regs *,long);
33 #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
34 #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
36 static inline void print_prots(pgprot_t prot)
38 printk("prot is 0x%08lx\n",pgprot_val(prot));
40 printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
41 PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
44 static inline void print_vma(struct vm_area_struct *vma)
46 printk("vma start 0x%08lx\n", vma->vm_start);
47 printk("vma end 0x%08lx\n", vma->vm_end);
49 print_prots(vma->vm_page_prot);
50 printk("vm_flags 0x%08lx\n", vma->vm_flags);
53 static inline void print_task(struct task_struct *tsk)
55 printk("Task pid %d\n", task_pid_nr(tsk));
58 static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
60 pgd_t *dir;
61 pud_t *pud;
62 pmd_t *pmd;
63 pte_t *pte;
64 pte_t entry;
66 dir = pgd_offset(mm, address);
67 if (pgd_none(*dir))
68 return NULL;
70 pud = pud_offset(dir, address);
71 if (pud_none(*pud))
72 return NULL;
74 pmd = pmd_offset(pud, address);
75 if (pmd_none(*pmd))
76 return NULL;
78 pte = pte_offset_kernel(pmd, address);
79 entry = *pte;
80 if (pte_none(entry) || !pte_present(entry))
81 return NULL;
83 return pte;
87 * This routine handles page faults. It determines the address,
88 * and the problem, and then passes it off to one of the appropriate
89 * routines.
91 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
92 unsigned long textaccess, unsigned long address)
94 struct task_struct *tsk;
95 struct mm_struct *mm;
96 struct vm_area_struct * vma;
97 const struct exception_table_entry *fixup;
98 pte_t *pte;
99 int fault;
101 /* SIM
102 * Note this is now called with interrupts still disabled
103 * This is to cope with being called for a missing IO port
104 * address with interrupts disabled. This should be fixed as
105 * soon as we have a better 'fast path' miss handler.
107 * Plus take care how you try and debug this stuff.
108 * For example, writing debug data to a port which you
109 * have just faulted on is not going to work.
112 tsk = current;
113 mm = tsk->mm;
115 /* Not an IO address, so reenable interrupts */
116 local_irq_enable();
119 * If we're in an interrupt or have no user
120 * context, we must not take the fault..
122 if (in_atomic() || !mm)
123 goto no_context;
125 /* TLB misses upon some cache flushes get done under cli() */
126 down_read(&mm->mmap_sem);
128 vma = find_vma(mm, address);
130 if (!vma) {
131 #ifdef DEBUG_FAULT
132 print_task(tsk);
133 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
134 <<<<<<< HEAD:arch/sh/mm/tlbflush_64.c
135 __FUNCTION__,__LINE__,
136 =======
137 __func__, __LINE__,
138 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/sh/mm/tlbflush_64.c
139 address,regs->pc,textaccess,writeaccess);
140 show_regs(regs);
141 #endif
142 goto bad_area;
144 if (vma->vm_start <= address) {
145 goto good_area;
148 if (!(vma->vm_flags & VM_GROWSDOWN)) {
149 #ifdef DEBUG_FAULT
150 print_task(tsk);
151 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
152 <<<<<<< HEAD:arch/sh/mm/tlbflush_64.c
153 __FUNCTION__,__LINE__,
154 =======
155 __func__, __LINE__,
156 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/sh/mm/tlbflush_64.c
157 address,regs->pc,textaccess,writeaccess);
158 show_regs(regs);
160 print_vma(vma);
161 #endif
162 goto bad_area;
164 if (expand_stack(vma, address)) {
165 #ifdef DEBUG_FAULT
166 print_task(tsk);
167 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
168 <<<<<<< HEAD:arch/sh/mm/tlbflush_64.c
169 __FUNCTION__,__LINE__,
170 =======
171 __func__, __LINE__,
172 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/sh/mm/tlbflush_64.c
173 address,regs->pc,textaccess,writeaccess);
174 show_regs(regs);
175 #endif
176 goto bad_area;
179 * Ok, we have a good vm_area for this memory access, so
180 * we can handle it..
182 good_area:
183 if (textaccess) {
184 if (!(vma->vm_flags & VM_EXEC))
185 goto bad_area;
186 } else {
187 if (writeaccess) {
188 if (!(vma->vm_flags & VM_WRITE))
189 goto bad_area;
190 } else {
191 if (!(vma->vm_flags & VM_READ))
192 goto bad_area;
197 * If for any reason at all we couldn't handle the fault,
198 * make sure we exit gracefully rather than endlessly redo
199 * the fault.
201 survive:
202 fault = handle_mm_fault(mm, vma, address, writeaccess);
203 if (unlikely(fault & VM_FAULT_ERROR)) {
204 if (fault & VM_FAULT_OOM)
205 goto out_of_memory;
206 else if (fault & VM_FAULT_SIGBUS)
207 goto do_sigbus;
208 BUG();
210 if (fault & VM_FAULT_MAJOR)
211 tsk->maj_flt++;
212 else
213 tsk->min_flt++;
215 /* If we get here, the page fault has been handled. Do the TLB refill
216 now from the newly-setup PTE, to avoid having to fault again right
217 away on the same instruction. */
218 pte = lookup_pte (mm, address);
219 if (!pte) {
220 /* From empirical evidence, we can get here, due to
221 !pte_present(pte). (e.g. if a swap-in occurs, and the page
222 is swapped back out again before the process that wanted it
223 gets rescheduled?) */
224 goto no_pte;
227 __do_tlb_refill(address, textaccess, pte);
229 no_pte:
231 up_read(&mm->mmap_sem);
232 return;
235 * Something tried to access memory that isn't in our memory map..
236 * Fix it, but check if it's kernel or user first..
238 bad_area:
239 #ifdef DEBUG_FAULT
240 printk("fault:bad area\n");
241 #endif
242 up_read(&mm->mmap_sem);
244 if (user_mode(regs)) {
245 static int count=0;
246 siginfo_t info;
247 if (count < 4) {
248 /* This is really to help debug faults when starting
249 * usermode, so only need a few */
250 count++;
251 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
252 address, task_pid_nr(current), current->comm,
253 (unsigned long) regs->pc);
254 #if 0
255 show_regs(regs);
256 #endif
258 if (is_global_init(tsk)) {
259 panic("INIT had user mode bad_area\n");
261 tsk->thread.address = address;
262 tsk->thread.error_code = writeaccess;
263 info.si_signo = SIGSEGV;
264 info.si_errno = 0;
265 info.si_addr = (void *) address;
266 force_sig_info(SIGSEGV, &info, tsk);
267 return;
270 no_context:
271 #ifdef DEBUG_FAULT
272 printk("fault:No context\n");
273 #endif
274 /* Are we prepared to handle this kernel fault? */
275 fixup = search_exception_tables(regs->pc);
276 if (fixup) {
277 regs->pc = fixup->fixup;
278 return;
282 * Oops. The kernel tried to access some bad page. We'll have to
283 * terminate things with extreme prejudice.
286 if (address < PAGE_SIZE)
287 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
288 else
289 printk(KERN_ALERT "Unable to handle kernel paging request");
290 printk(" at virtual address %08lx\n", address);
291 printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
292 die("Oops", regs, writeaccess);
293 do_exit(SIGKILL);
296 * We ran out of memory, or some other thing happened to us that made
297 * us unable to handle the page fault gracefully.
299 out_of_memory:
300 if (is_global_init(current)) {
301 panic("INIT out of memory\n");
302 yield();
303 goto survive;
305 printk("fault:Out of memory\n");
306 up_read(&mm->mmap_sem);
307 if (is_global_init(current)) {
308 yield();
309 down_read(&mm->mmap_sem);
310 goto survive;
312 printk("VM: killing process %s\n", tsk->comm);
313 if (user_mode(regs))
314 do_group_exit(SIGKILL);
315 goto no_context;
317 do_sigbus:
318 printk("fault:Do sigbus\n");
319 up_read(&mm->mmap_sem);
322 * Send a sigbus, regardless of whether we were in kernel
323 * or user mode.
325 tsk->thread.address = address;
326 tsk->thread.error_code = writeaccess;
327 tsk->thread.trap_no = 14;
328 force_sig(SIGBUS, tsk);
330 /* Kernel mode? Handle exceptions or die */
331 if (!user_mode(regs))
332 goto no_context;
335 void update_mmu_cache(struct vm_area_struct * vma,
336 unsigned long address, pte_t pte)
339 * This appears to get called once for every pte entry that gets
340 * established => I don't think it's efficient to try refilling the
341 * TLBs with the pages - some may not get accessed even. Also, for
342 * executable pages, it is impossible to determine reliably here which
343 * TLB they should be mapped into (or both even).
345 * So, just do nothing here and handle faults on demand. In the
346 * TLBMISS handling case, the refill is now done anyway after the pte
347 * has been fixed up, so that deals with most useful cases.
351 void local_flush_tlb_one(unsigned long asid, unsigned long page)
353 unsigned long long match, pteh=0, lpage;
354 unsigned long tlb;
357 * Sign-extend based on neff.
359 lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
360 match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
361 match |= lpage;
363 for_each_itlb_entry(tlb) {
364 asm volatile ("getcfg %1, 0, %0"
365 : "=r" (pteh)
366 : "r" (tlb) );
368 if (pteh == match) {
369 __flush_tlb_slot(tlb);
370 break;
374 for_each_dtlb_entry(tlb) {
375 asm volatile ("getcfg %1, 0, %0"
376 : "=r" (pteh)
377 : "r" (tlb) );
379 if (pteh == match) {
380 __flush_tlb_slot(tlb);
381 break;
387 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
389 unsigned long flags;
391 if (vma->vm_mm) {
392 page &= PAGE_MASK;
393 local_irq_save(flags);
394 local_flush_tlb_one(get_asid(), page);
395 local_irq_restore(flags);
399 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
400 unsigned long end)
402 unsigned long flags;
403 unsigned long long match, pteh=0, pteh_epn, pteh_low;
404 unsigned long tlb;
405 unsigned int cpu = smp_processor_id();
406 struct mm_struct *mm;
408 mm = vma->vm_mm;
409 if (cpu_context(cpu, mm) == NO_CONTEXT)
410 return;
412 local_irq_save(flags);
414 start &= PAGE_MASK;
415 end &= PAGE_MASK;
417 match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
419 /* Flush ITLB */
420 for_each_itlb_entry(tlb) {
421 asm volatile ("getcfg %1, 0, %0"
422 : "=r" (pteh)
423 : "r" (tlb) );
425 pteh_epn = pteh & PAGE_MASK;
426 pteh_low = pteh & ~PAGE_MASK;
428 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
429 __flush_tlb_slot(tlb);
432 /* Flush DTLB */
433 for_each_dtlb_entry(tlb) {
434 asm volatile ("getcfg %1, 0, %0"
435 : "=r" (pteh)
436 : "r" (tlb) );
438 pteh_epn = pteh & PAGE_MASK;
439 pteh_low = pteh & ~PAGE_MASK;
441 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
442 __flush_tlb_slot(tlb);
445 local_irq_restore(flags);
448 void local_flush_tlb_mm(struct mm_struct *mm)
450 unsigned long flags;
451 unsigned int cpu = smp_processor_id();
453 if (cpu_context(cpu, mm) == NO_CONTEXT)
454 return;
456 local_irq_save(flags);
458 cpu_context(cpu, mm) = NO_CONTEXT;
459 if (mm == current->mm)
460 activate_context(mm, cpu);
462 local_irq_restore(flags);
465 void local_flush_tlb_all(void)
467 /* Invalidate all, including shared pages, excluding fixed TLBs */
468 unsigned long flags, tlb;
470 local_irq_save(flags);
472 /* Flush each ITLB entry */
473 for_each_itlb_entry(tlb)
474 __flush_tlb_slot(tlb);
476 /* Flush each DTLB entry */
477 for_each_dtlb_entry(tlb)
478 __flush_tlb_slot(tlb);
480 local_irq_restore(flags);
483 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
485 /* FIXME: Optimize this later.. */
486 flush_tlb_all();