2 * arch/sh/mm/tlb-flush_64.c
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
6 * Copyright (C) 2003 - 2009 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/signal.h>
13 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
22 #include <linux/smp.h>
23 #include <linux/perf_event.h>
24 #include <linux/interrupt.h>
25 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgalloc.h>
30 #include <asm/mmu_context.h>
32 extern void die(const char *,struct pt_regs
*,long);
34 #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
35 #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
37 static inline void print_prots(pgprot_t prot
)
39 printk("prot is 0x%08lx\n",pgprot_val(prot
));
41 printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED
),PPROT(_PAGE_READ
),
42 PPROT(_PAGE_EXECUTE
),PPROT(_PAGE_WRITE
),PPROT(_PAGE_USER
));
45 static inline void print_vma(struct vm_area_struct
*vma
)
47 printk("vma start 0x%08lx\n", vma
->vm_start
);
48 printk("vma end 0x%08lx\n", vma
->vm_end
);
50 print_prots(vma
->vm_page_prot
);
51 printk("vm_flags 0x%08lx\n", vma
->vm_flags
);
54 static inline void print_task(struct task_struct
*tsk
)
56 printk("Task pid %d\n", task_pid_nr(tsk
));
59 static pte_t
*lookup_pte(struct mm_struct
*mm
, unsigned long address
)
67 dir
= pgd_offset(mm
, address
);
71 pud
= pud_offset(dir
, address
);
75 pmd
= pmd_offset(pud
, address
);
79 pte
= pte_offset_kernel(pmd
, address
);
81 if (pte_none(entry
) || !pte_present(entry
))
88 * This routine handles page faults. It determines the address,
89 * and the problem, and then passes it off to one of the appropriate
92 asmlinkage
void do_page_fault(struct pt_regs
*regs
, unsigned long writeaccess
,
93 unsigned long textaccess
, unsigned long address
)
95 struct task_struct
*tsk
;
97 struct vm_area_struct
* vma
;
98 const struct exception_table_entry
*fixup
;
103 * Note this is now called with interrupts still disabled
104 * This is to cope with being called for a missing IO port
105 * address with interrupts disabled. This should be fixed as
106 * soon as we have a better 'fast path' miss handler.
108 * Plus take care how you try and debug this stuff.
109 * For example, writing debug data to a port which you
110 * have just faulted on is not going to work.
116 /* Not an IO address, so reenable interrupts */
119 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, 0, regs
, address
);
122 * If we're in an interrupt or have no user
123 * context, we must not take the fault..
125 if (in_atomic() || !mm
)
128 /* TLB misses upon some cache flushes get done under cli() */
129 down_read(&mm
->mmap_sem
);
131 vma
= find_vma(mm
, address
);
136 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
138 address
,regs
->pc
,textaccess
,writeaccess
);
143 if (vma
->vm_start
<= address
) {
147 if (!(vma
->vm_flags
& VM_GROWSDOWN
)) {
150 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
152 address
,regs
->pc
,textaccess
,writeaccess
);
159 if (expand_stack(vma
, address
)) {
162 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
164 address
,regs
->pc
,textaccess
,writeaccess
);
170 * Ok, we have a good vm_area for this memory access, so
175 if (!(vma
->vm_flags
& VM_EXEC
))
179 if (!(vma
->vm_flags
& VM_WRITE
))
182 if (!(vma
->vm_flags
& VM_READ
))
188 * If for any reason at all we couldn't handle the fault,
189 * make sure we exit gracefully rather than endlessly redo
193 fault
= handle_mm_fault(mm
, vma
, address
, writeaccess
? FAULT_FLAG_WRITE
: 0);
194 if (unlikely(fault
& VM_FAULT_ERROR
)) {
195 if (fault
& VM_FAULT_OOM
)
197 else if (fault
& VM_FAULT_SIGBUS
)
202 if (fault
& VM_FAULT_MAJOR
) {
204 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
, 1, 0,
208 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
, 1, 0,
212 /* If we get here, the page fault has been handled. Do the TLB refill
213 now from the newly-setup PTE, to avoid having to fault again right
214 away on the same instruction. */
215 pte
= lookup_pte (mm
, address
);
217 /* From empirical evidence, we can get here, due to
218 !pte_present(pte). (e.g. if a swap-in occurs, and the page
219 is swapped back out again before the process that wanted it
220 gets rescheduled?) */
224 __do_tlb_refill(address
, textaccess
, pte
);
228 up_read(&mm
->mmap_sem
);
232 * Something tried to access memory that isn't in our memory map..
233 * Fix it, but check if it's kernel or user first..
237 printk("fault:bad area\n");
239 up_read(&mm
->mmap_sem
);
241 if (user_mode(regs
)) {
245 /* This is really to help debug faults when starting
246 * usermode, so only need a few */
248 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
249 address
, task_pid_nr(current
), current
->comm
,
250 (unsigned long) regs
->pc
);
255 if (is_global_init(tsk
)) {
256 panic("INIT had user mode bad_area\n");
258 tsk
->thread
.address
= address
;
259 tsk
->thread
.error_code
= writeaccess
;
260 info
.si_signo
= SIGSEGV
;
262 info
.si_addr
= (void *) address
;
263 force_sig_info(SIGSEGV
, &info
, tsk
);
269 printk("fault:No context\n");
271 /* Are we prepared to handle this kernel fault? */
272 fixup
= search_exception_tables(regs
->pc
);
274 regs
->pc
= fixup
->fixup
;
279 * Oops. The kernel tried to access some bad page. We'll have to
280 * terminate things with extreme prejudice.
283 if (address
< PAGE_SIZE
)
284 printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference");
286 printk(KERN_ALERT
"Unable to handle kernel paging request");
287 printk(" at virtual address %08lx\n", address
);
288 printk(KERN_ALERT
"pc = %08Lx%08Lx\n", regs
->pc
>> 32, regs
->pc
& 0xffffffff);
289 die("Oops", regs
, writeaccess
);
293 * We ran out of memory, or some other thing happened to us that made
294 * us unable to handle the page fault gracefully.
297 if (is_global_init(current
)) {
298 panic("INIT out of memory\n");
302 printk("fault:Out of memory\n");
303 up_read(&mm
->mmap_sem
);
304 if (is_global_init(current
)) {
306 down_read(&mm
->mmap_sem
);
309 printk("VM: killing process %s\n", tsk
->comm
);
311 do_group_exit(SIGKILL
);
315 printk("fault:Do sigbus\n");
316 up_read(&mm
->mmap_sem
);
319 * Send a sigbus, regardless of whether we were in kernel
322 tsk
->thread
.address
= address
;
323 tsk
->thread
.error_code
= writeaccess
;
324 tsk
->thread
.trap_no
= 14;
325 force_sig(SIGBUS
, tsk
);
327 /* Kernel mode? Handle exceptions or die */
328 if (!user_mode(regs
))
332 void local_flush_tlb_one(unsigned long asid
, unsigned long page
)
334 unsigned long long match
, pteh
=0, lpage
;
338 * Sign-extend based on neff.
340 lpage
= neff_sign_extend(page
);
341 match
= (asid
<< PTEH_ASID_SHIFT
) | PTEH_VALID
;
344 for_each_itlb_entry(tlb
) {
345 asm volatile ("getcfg %1, 0, %0"
350 __flush_tlb_slot(tlb
);
355 for_each_dtlb_entry(tlb
) {
356 asm volatile ("getcfg %1, 0, %0"
361 __flush_tlb_slot(tlb
);
368 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
374 local_irq_save(flags
);
375 local_flush_tlb_one(get_asid(), page
);
376 local_irq_restore(flags
);
380 void local_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
384 unsigned long long match
, pteh
=0, pteh_epn
, pteh_low
;
386 unsigned int cpu
= smp_processor_id();
387 struct mm_struct
*mm
;
390 if (cpu_context(cpu
, mm
) == NO_CONTEXT
)
393 local_irq_save(flags
);
398 match
= (cpu_asid(cpu
, mm
) << PTEH_ASID_SHIFT
) | PTEH_VALID
;
401 for_each_itlb_entry(tlb
) {
402 asm volatile ("getcfg %1, 0, %0"
406 pteh_epn
= pteh
& PAGE_MASK
;
407 pteh_low
= pteh
& ~PAGE_MASK
;
409 if (pteh_low
== match
&& pteh_epn
>= start
&& pteh_epn
<= end
)
410 __flush_tlb_slot(tlb
);
414 for_each_dtlb_entry(tlb
) {
415 asm volatile ("getcfg %1, 0, %0"
419 pteh_epn
= pteh
& PAGE_MASK
;
420 pteh_low
= pteh
& ~PAGE_MASK
;
422 if (pteh_low
== match
&& pteh_epn
>= start
&& pteh_epn
<= end
)
423 __flush_tlb_slot(tlb
);
426 local_irq_restore(flags
);
429 void local_flush_tlb_mm(struct mm_struct
*mm
)
432 unsigned int cpu
= smp_processor_id();
434 if (cpu_context(cpu
, mm
) == NO_CONTEXT
)
437 local_irq_save(flags
);
439 cpu_context(cpu
, mm
) = NO_CONTEXT
;
440 if (mm
== current
->mm
)
441 activate_context(mm
, cpu
);
443 local_irq_restore(flags
);
446 void local_flush_tlb_all(void)
448 /* Invalidate all, including shared pages, excluding fixed TLBs */
449 unsigned long flags
, tlb
;
451 local_irq_save(flags
);
453 /* Flush each ITLB entry */
454 for_each_itlb_entry(tlb
)
455 __flush_tlb_slot(tlb
);
457 /* Flush each DTLB entry */
458 for_each_dtlb_entry(tlb
)
459 __flush_tlb_slot(tlb
);
461 local_irq_restore(flags
);
464 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
466 /* FIXME: Optimize this later.. */
470 void __update_tlb(struct vm_area_struct
*vma
, unsigned long address
, pte_t pte
)