Staging: hv: mousevsc: Cleanup alloc_input_device()
[zen-stable.git] / arch / score / mm / tlb-score.c
blob6fdb100244c8193076087ab3c274405d2e9d5e85
1 /*
2 * arch/score/mm/tlb-score.c
4 * Score Processor version.
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include <linux/highmem.h>
27 #include <linux/module.h>
29 #include <asm/irq.h>
30 #include <asm/mmu_context.h>
31 #include <asm/tlb.h>
33 #define TLBSIZE 32
35 unsigned long asid_cache = ASID_FIRST_VERSION;
36 EXPORT_SYMBOL(asid_cache);
38 void local_flush_tlb_all(void)
40 unsigned long flags;
41 unsigned long old_ASID;
42 int entry;
44 local_irq_save(flags);
45 old_ASID = pevn_get() & ASID_MASK;
46 pectx_set(0); /* invalid */
47 entry = tlblock_get(); /* skip locked entries*/
49 for (; entry < TLBSIZE; entry++) {
50 tlbpt_set(entry);
51 pevn_set(KSEG1);
52 barrier();
53 tlb_write_indexed();
55 pevn_set(old_ASID);
56 local_irq_restore(flags);
60 * If mm is currently active_mm, we can't really drop it. Instead,
61 * we will get a new one for it.
63 static inline void
64 drop_mmu_context(struct mm_struct *mm)
66 unsigned long flags;
68 local_irq_save(flags);
69 get_new_mmu_context(mm);
70 pevn_set(mm->context & ASID_MASK);
71 local_irq_restore(flags);
74 void local_flush_tlb_mm(struct mm_struct *mm)
76 if (mm->context != 0)
77 drop_mmu_context(mm);
80 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
81 unsigned long end)
83 struct mm_struct *mm = vma->vm_mm;
84 unsigned long vma_mm_context = mm->context;
85 if (mm->context != 0) {
86 unsigned long flags;
87 int size;
89 local_irq_save(flags);
90 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
91 if (size <= TLBSIZE) {
92 int oldpid = pevn_get() & ASID_MASK;
93 int newpid = vma_mm_context & ASID_MASK;
95 start &= PAGE_MASK;
96 end += (PAGE_SIZE - 1);
97 end &= PAGE_MASK;
98 while (start < end) {
99 int idx;
101 pevn_set(start | newpid);
102 start += PAGE_SIZE;
103 barrier();
104 tlb_probe();
105 idx = tlbpt_get();
106 pectx_set(0);
107 pevn_set(KSEG1);
108 if (idx < 0)
109 continue;
110 tlb_write_indexed();
112 pevn_set(oldpid);
113 } else {
114 /* Bigger than TLBSIZE, get new ASID directly */
115 get_new_mmu_context(mm);
116 if (mm == current->active_mm)
117 pevn_set(vma_mm_context & ASID_MASK);
119 local_irq_restore(flags);
123 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
125 unsigned long flags;
126 int size;
128 local_irq_save(flags);
129 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
130 if (size <= TLBSIZE) {
131 int pid = pevn_get();
133 start &= PAGE_MASK;
134 end += PAGE_SIZE - 1;
135 end &= PAGE_MASK;
137 while (start < end) {
138 long idx;
140 pevn_set(start);
141 start += PAGE_SIZE;
142 tlb_probe();
143 idx = tlbpt_get();
144 if (idx < 0)
145 continue;
146 pectx_set(0);
147 pevn_set(KSEG1);
148 barrier();
149 tlb_write_indexed();
151 pevn_set(pid);
152 } else {
153 local_flush_tlb_all();
156 local_irq_restore(flags);
159 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
161 if (vma && vma->vm_mm->context != 0) {
162 unsigned long flags;
163 int oldpid, newpid, idx;
164 unsigned long vma_ASID = vma->vm_mm->context;
166 newpid = vma_ASID & ASID_MASK;
167 page &= PAGE_MASK;
168 local_irq_save(flags);
169 oldpid = pevn_get() & ASID_MASK;
170 pevn_set(page | newpid);
171 barrier();
172 tlb_probe();
173 idx = tlbpt_get();
174 pectx_set(0);
175 pevn_set(KSEG1);
176 if (idx < 0) /* p_bit(31) - 1: miss, 0: hit*/
177 goto finish;
178 barrier();
179 tlb_write_indexed();
180 finish:
181 pevn_set(oldpid);
182 local_irq_restore(flags);
187 * This one is only used for pages with the global bit set so we don't care
188 * much about the ASID.
190 void local_flush_tlb_one(unsigned long page)
192 unsigned long flags;
193 int oldpid, idx;
195 local_irq_save(flags);
196 oldpid = pevn_get();
197 page &= (PAGE_MASK << 1);
198 pevn_set(page);
199 barrier();
200 tlb_probe();
201 idx = tlbpt_get();
202 pectx_set(0);
203 if (idx >= 0) {
204 /* Make sure all entries differ. */
205 pevn_set(KSEG1);
206 barrier();
207 tlb_write_indexed();
209 pevn_set(oldpid);
210 local_irq_restore(flags);
213 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
215 unsigned long flags;
216 int idx, pid;
219 * Handle debugger faulting in for debugee.
221 if (current->active_mm != vma->vm_mm)
222 return;
224 pid = pevn_get() & ASID_MASK;
226 local_irq_save(flags);
227 address &= PAGE_MASK;
228 pevn_set(address | pid);
229 barrier();
230 tlb_probe();
231 idx = tlbpt_get();
232 pectx_set(pte_val(pte));
233 pevn_set(address | pid);
234 if (idx < 0)
235 tlb_write_random();
236 else
237 tlb_write_indexed();
239 pevn_set(pid);
240 local_irq_restore(flags);
243 void __cpuinit tlb_init(void)
245 tlblock_set(0);
246 local_flush_tlb_all();
247 memcpy((void *)(EXCEPTION_VECTOR_BASE_ADDR + 0x100),
248 &score7_FTLB_refill_Handler, 0xFC);
249 flush_icache_range(EXCEPTION_VECTOR_BASE_ADDR + 0x100,
250 EXCEPTION_VECTOR_BASE_ADDR + 0x1FC);