Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / sh / mm / tlbflush_64.c
blobbd0715d5dca4c25023f4763e401a28c7791aa09d
1 /*
2 * arch/sh/mm/tlb-flush_64.c
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
6 * Copyright (C) 2003 - 2012 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
12 #include <linux/signal.h>
13 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/perf_event.h>
24 #include <linux/interrupt.h>
25 #include <asm/io.h>
26 #include <asm/tlb.h>
27 #include <linux/uaccess.h>
28 #include <asm/pgalloc.h>
29 #include <asm/mmu_context.h>
31 void local_flush_tlb_one(unsigned long asid, unsigned long page)
33 unsigned long long match, pteh=0, lpage;
34 unsigned long tlb;
37 * Sign-extend based on neff.
39 lpage = neff_sign_extend(page);
40 match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
41 match |= lpage;
43 for_each_itlb_entry(tlb) {
44 asm volatile ("getcfg %1, 0, %0"
45 : "=r" (pteh)
46 : "r" (tlb) );
48 if (pteh == match) {
49 __flush_tlb_slot(tlb);
50 break;
54 for_each_dtlb_entry(tlb) {
55 asm volatile ("getcfg %1, 0, %0"
56 : "=r" (pteh)
57 : "r" (tlb) );
59 if (pteh == match) {
60 __flush_tlb_slot(tlb);
61 break;
67 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
69 unsigned long flags;
71 if (vma->vm_mm) {
72 page &= PAGE_MASK;
73 local_irq_save(flags);
74 local_flush_tlb_one(get_asid(), page);
75 local_irq_restore(flags);
79 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
80 unsigned long end)
82 unsigned long flags;
83 unsigned long long match, pteh=0, pteh_epn, pteh_low;
84 unsigned long tlb;
85 unsigned int cpu = smp_processor_id();
86 struct mm_struct *mm;
88 mm = vma->vm_mm;
89 if (cpu_context(cpu, mm) == NO_CONTEXT)
90 return;
92 local_irq_save(flags);
94 start &= PAGE_MASK;
95 end &= PAGE_MASK;
97 match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
99 /* Flush ITLB */
100 for_each_itlb_entry(tlb) {
101 asm volatile ("getcfg %1, 0, %0"
102 : "=r" (pteh)
103 : "r" (tlb) );
105 pteh_epn = pteh & PAGE_MASK;
106 pteh_low = pteh & ~PAGE_MASK;
108 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
109 __flush_tlb_slot(tlb);
112 /* Flush DTLB */
113 for_each_dtlb_entry(tlb) {
114 asm volatile ("getcfg %1, 0, %0"
115 : "=r" (pteh)
116 : "r" (tlb) );
118 pteh_epn = pteh & PAGE_MASK;
119 pteh_low = pteh & ~PAGE_MASK;
121 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
122 __flush_tlb_slot(tlb);
125 local_irq_restore(flags);
128 void local_flush_tlb_mm(struct mm_struct *mm)
130 unsigned long flags;
131 unsigned int cpu = smp_processor_id();
133 if (cpu_context(cpu, mm) == NO_CONTEXT)
134 return;
136 local_irq_save(flags);
138 cpu_context(cpu, mm) = NO_CONTEXT;
139 if (mm == current->mm)
140 activate_context(mm, cpu);
142 local_irq_restore(flags);
145 void local_flush_tlb_all(void)
147 /* Invalidate all, including shared pages, excluding fixed TLBs */
148 unsigned long flags, tlb;
150 local_irq_save(flags);
152 /* Flush each ITLB entry */
153 for_each_itlb_entry(tlb)
154 __flush_tlb_slot(tlb);
156 /* Flush each DTLB entry */
157 for_each_dtlb_entry(tlb)
158 __flush_tlb_slot(tlb);
160 local_irq_restore(flags);
163 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
165 /* FIXME: Optimize this later.. */
166 flush_tlb_all();
169 void __flush_tlb_global(void)
171 flush_tlb_all();