2 * arch/sh/mm/tlb-flush_64.c
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
6 * Copyright (C) 2003 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/signal.h>
13 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <asm/system.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgalloc.h>
29 #include <asm/mmu_context.h>
31 extern void die(const char *,struct pt_regs
*,long);
33 #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
34 #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
36 static inline void print_prots(pgprot_t prot
)
38 printk("prot is 0x%08lx\n",pgprot_val(prot
));
40 printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED
),PPROT(_PAGE_READ
),
41 PPROT(_PAGE_EXECUTE
),PPROT(_PAGE_WRITE
),PPROT(_PAGE_USER
));
44 static inline void print_vma(struct vm_area_struct
*vma
)
46 printk("vma start 0x%08lx\n", vma
->vm_start
);
47 printk("vma end 0x%08lx\n", vma
->vm_end
);
49 print_prots(vma
->vm_page_prot
);
50 printk("vm_flags 0x%08lx\n", vma
->vm_flags
);
53 static inline void print_task(struct task_struct
*tsk
)
55 printk("Task pid %d\n", task_pid_nr(tsk
));
58 static pte_t
*lookup_pte(struct mm_struct
*mm
, unsigned long address
)
66 dir
= pgd_offset(mm
, address
);
70 pud
= pud_offset(dir
, address
);
74 pmd
= pmd_offset(pud
, address
);
78 pte
= pte_offset_kernel(pmd
, address
);
80 if (pte_none(entry
) || !pte_present(entry
))
87 * This routine handles page faults. It determines the address,
88 * and the problem, and then passes it off to one of the appropriate
91 asmlinkage
void do_page_fault(struct pt_regs
*regs
, unsigned long writeaccess
,
92 unsigned long textaccess
, unsigned long address
)
94 struct task_struct
*tsk
;
96 struct vm_area_struct
* vma
;
97 const struct exception_table_entry
*fixup
;
102 * Note this is now called with interrupts still disabled
103 * This is to cope with being called for a missing IO port
104 * address with interrupts disabled. This should be fixed as
105 * soon as we have a better 'fast path' miss handler.
107 * Plus take care how you try and debug this stuff.
108 * For example, writing debug data to a port which you
109 * have just faulted on is not going to work.
115 /* Not an IO address, so reenable interrupts */
119 * If we're in an interrupt or have no user
120 * context, we must not take the fault..
122 if (in_atomic() || !mm
)
125 /* TLB misses upon some cache flushes get done under cli() */
126 down_read(&mm
->mmap_sem
);
128 vma
= find_vma(mm
, address
);
133 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
134 <<<<<<< HEAD
:arch
/sh
/mm
/tlbflush_64
.c
135 __FUNCTION__
,__LINE__
,
138 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/sh
/mm
/tlbflush_64
.c
139 address
,regs
->pc
,textaccess
,writeaccess
);
144 if (vma
->vm_start
<= address
) {
148 if (!(vma
->vm_flags
& VM_GROWSDOWN
)) {
151 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
152 <<<<<<< HEAD
:arch
/sh
/mm
/tlbflush_64
.c
153 __FUNCTION__
,__LINE__
,
156 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/sh
/mm
/tlbflush_64
.c
157 address
,regs
->pc
,textaccess
,writeaccess
);
164 if (expand_stack(vma
, address
)) {
167 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
168 <<<<<<< HEAD
:arch
/sh
/mm
/tlbflush_64
.c
169 __FUNCTION__
,__LINE__
,
172 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/sh
/mm
/tlbflush_64
.c
173 address
,regs
->pc
,textaccess
,writeaccess
);
179 * Ok, we have a good vm_area for this memory access, so
184 if (!(vma
->vm_flags
& VM_EXEC
))
188 if (!(vma
->vm_flags
& VM_WRITE
))
191 if (!(vma
->vm_flags
& VM_READ
))
197 * If for any reason at all we couldn't handle the fault,
198 * make sure we exit gracefully rather than endlessly redo
202 fault
= handle_mm_fault(mm
, vma
, address
, writeaccess
);
203 if (unlikely(fault
& VM_FAULT_ERROR
)) {
204 if (fault
& VM_FAULT_OOM
)
206 else if (fault
& VM_FAULT_SIGBUS
)
210 if (fault
& VM_FAULT_MAJOR
)
215 /* If we get here, the page fault has been handled. Do the TLB refill
216 now from the newly-setup PTE, to avoid having to fault again right
217 away on the same instruction. */
218 pte
= lookup_pte (mm
, address
);
220 /* From empirical evidence, we can get here, due to
221 !pte_present(pte). (e.g. if a swap-in occurs, and the page
222 is swapped back out again before the process that wanted it
223 gets rescheduled?) */
227 __do_tlb_refill(address
, textaccess
, pte
);
231 up_read(&mm
->mmap_sem
);
235 * Something tried to access memory that isn't in our memory map..
236 * Fix it, but check if it's kernel or user first..
240 printk("fault:bad area\n");
242 up_read(&mm
->mmap_sem
);
244 if (user_mode(regs
)) {
248 /* This is really to help debug faults when starting
249 * usermode, so only need a few */
251 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
252 address
, task_pid_nr(current
), current
->comm
,
253 (unsigned long) regs
->pc
);
258 if (is_global_init(tsk
)) {
259 panic("INIT had user mode bad_area\n");
261 tsk
->thread
.address
= address
;
262 tsk
->thread
.error_code
= writeaccess
;
263 info
.si_signo
= SIGSEGV
;
265 info
.si_addr
= (void *) address
;
266 force_sig_info(SIGSEGV
, &info
, tsk
);
272 printk("fault:No context\n");
274 /* Are we prepared to handle this kernel fault? */
275 fixup
= search_exception_tables(regs
->pc
);
277 regs
->pc
= fixup
->fixup
;
282 * Oops. The kernel tried to access some bad page. We'll have to
283 * terminate things with extreme prejudice.
286 if (address
< PAGE_SIZE
)
287 printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference");
289 printk(KERN_ALERT
"Unable to handle kernel paging request");
290 printk(" at virtual address %08lx\n", address
);
291 printk(KERN_ALERT
"pc = %08Lx%08Lx\n", regs
->pc
>> 32, regs
->pc
& 0xffffffff);
292 die("Oops", regs
, writeaccess
);
296 * We ran out of memory, or some other thing happened to us that made
297 * us unable to handle the page fault gracefully.
300 if (is_global_init(current
)) {
301 panic("INIT out of memory\n");
305 printk("fault:Out of memory\n");
306 up_read(&mm
->mmap_sem
);
307 if (is_global_init(current
)) {
309 down_read(&mm
->mmap_sem
);
312 printk("VM: killing process %s\n", tsk
->comm
);
314 do_group_exit(SIGKILL
);
318 printk("fault:Do sigbus\n");
319 up_read(&mm
->mmap_sem
);
322 * Send a sigbus, regardless of whether we were in kernel
325 tsk
->thread
.address
= address
;
326 tsk
->thread
.error_code
= writeaccess
;
327 tsk
->thread
.trap_no
= 14;
328 force_sig(SIGBUS
, tsk
);
330 /* Kernel mode? Handle exceptions or die */
331 if (!user_mode(regs
))
335 void update_mmu_cache(struct vm_area_struct
* vma
,
336 unsigned long address
, pte_t pte
)
339 * This appears to get called once for every pte entry that gets
340 * established => I don't think it's efficient to try refilling the
341 * TLBs with the pages - some may not get accessed even. Also, for
342 * executable pages, it is impossible to determine reliably here which
343 * TLB they should be mapped into (or both even).
345 * So, just do nothing here and handle faults on demand. In the
346 * TLBMISS handling case, the refill is now done anyway after the pte
347 * has been fixed up, so that deals with most useful cases.
351 void local_flush_tlb_one(unsigned long asid
, unsigned long page
)
353 unsigned long long match
, pteh
=0, lpage
;
357 * Sign-extend based on neff.
359 lpage
= (page
& NEFF_SIGN
) ? (page
| NEFF_MASK
) : page
;
360 match
= (asid
<< PTEH_ASID_SHIFT
) | PTEH_VALID
;
363 for_each_itlb_entry(tlb
) {
364 asm volatile ("getcfg %1, 0, %0"
369 __flush_tlb_slot(tlb
);
374 for_each_dtlb_entry(tlb
) {
375 asm volatile ("getcfg %1, 0, %0"
380 __flush_tlb_slot(tlb
);
387 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
393 local_irq_save(flags
);
394 local_flush_tlb_one(get_asid(), page
);
395 local_irq_restore(flags
);
399 void local_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
403 unsigned long long match
, pteh
=0, pteh_epn
, pteh_low
;
405 unsigned int cpu
= smp_processor_id();
406 struct mm_struct
*mm
;
409 if (cpu_context(cpu
, mm
) == NO_CONTEXT
)
412 local_irq_save(flags
);
417 match
= (cpu_asid(cpu
, mm
) << PTEH_ASID_SHIFT
) | PTEH_VALID
;
420 for_each_itlb_entry(tlb
) {
421 asm volatile ("getcfg %1, 0, %0"
425 pteh_epn
= pteh
& PAGE_MASK
;
426 pteh_low
= pteh
& ~PAGE_MASK
;
428 if (pteh_low
== match
&& pteh_epn
>= start
&& pteh_epn
<= end
)
429 __flush_tlb_slot(tlb
);
433 for_each_dtlb_entry(tlb
) {
434 asm volatile ("getcfg %1, 0, %0"
438 pteh_epn
= pteh
& PAGE_MASK
;
439 pteh_low
= pteh
& ~PAGE_MASK
;
441 if (pteh_low
== match
&& pteh_epn
>= start
&& pteh_epn
<= end
)
442 __flush_tlb_slot(tlb
);
445 local_irq_restore(flags
);
448 void local_flush_tlb_mm(struct mm_struct
*mm
)
451 unsigned int cpu
= smp_processor_id();
453 if (cpu_context(cpu
, mm
) == NO_CONTEXT
)
456 local_irq_save(flags
);
458 cpu_context(cpu
, mm
) = NO_CONTEXT
;
459 if (mm
== current
->mm
)
460 activate_context(mm
, cpu
);
462 local_irq_restore(flags
);
465 void local_flush_tlb_all(void)
467 /* Invalidate all, including shared pages, excluding fixed TLBs */
468 unsigned long flags
, tlb
;
470 local_irq_save(flags
);
472 /* Flush each ITLB entry */
473 for_each_itlb_entry(tlb
)
474 __flush_tlb_slot(tlb
);
476 /* Flush each DTLB entry */
477 for_each_dtlb_entry(tlb
)
478 __flush_tlb_slot(tlb
);
480 local_irq_restore(flags
);
483 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
485 /* FIXME: Optimize this later.. */