Initial commit
[wrt350n-kernel.git] / arch / mips / mm / tlb-r4k.c
blob74ae0348cc92533d483f0f2597d8429bfb4d07b7
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
15 #include <asm/cpu.h>
16 #include <asm/bootinfo.h>
17 #include <asm/mmu_context.h>
18 #include <asm/pgtable.h>
19 #include <asm/system.h>
21 extern void build_tlb_refill_handler(void);
24 * Make sure all entries differ. If they're not different
25 * MIPS32 will take revenge ...
27 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29 /* Atomicity and interruptability */
30 #ifdef CONFIG_MIPS_MT_SMTC
32 #include <asm/smtc.h>
33 #include <asm/mipsmtregs.h>
35 #define ENTER_CRITICAL(flags) \
36 { \
37 unsigned int mvpflags; \
38 local_irq_save(flags);\
39 mvpflags = dvpe()
40 #define EXIT_CRITICAL(flags) \
41 evpe(mvpflags); \
42 local_irq_restore(flags); \
44 #else
46 #define ENTER_CRITICAL(flags) local_irq_save(flags)
47 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
49 #endif /* CONFIG_MIPS_MT_SMTC */
51 #if defined(CONFIG_CPU_LOONGSON2)
53 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
54 * unfortrunately, itlb is not totally transparent to software.
56 #define FLUSH_ITLB write_c0_diag(4);
58 #define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); }
60 #else
62 #define FLUSH_ITLB
63 #define FLUSH_ITLB_VM(vma)
65 #endif
67 void local_flush_tlb_all(void)
69 unsigned long flags;
70 unsigned long old_ctx;
71 int entry;
73 ENTER_CRITICAL(flags);
74 /* Save old context and create impossible VPN2 value */
75 old_ctx = read_c0_entryhi();
76 write_c0_entrylo0(0);
77 write_c0_entrylo1(0);
79 entry = read_c0_wired();
81 /* Blast 'em all away. */
82 while (entry < current_cpu_data.tlbsize) {
83 /* Make sure all entries differ. */
84 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
85 write_c0_index(entry);
86 mtc0_tlbw_hazard();
87 tlb_write_indexed();
88 entry++;
90 tlbw_use_hazard();
91 write_c0_entryhi(old_ctx);
92 FLUSH_ITLB;
93 EXIT_CRITICAL(flags);
96 /* All entries common to a mm share an asid. To effectively flush
97 these entries, we just bump the asid. */
98 void local_flush_tlb_mm(struct mm_struct *mm)
100 int cpu;
102 preempt_disable();
104 cpu = smp_processor_id();
106 if (cpu_context(cpu, mm) != 0) {
107 drop_mmu_context(mm, cpu);
110 preempt_enable();
113 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
114 unsigned long end)
116 struct mm_struct *mm = vma->vm_mm;
117 int cpu = smp_processor_id();
119 if (cpu_context(cpu, mm) != 0) {
120 unsigned long flags;
121 int size;
123 ENTER_CRITICAL(flags);
124 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
125 size = (size + 1) >> 1;
126 if (size <= current_cpu_data.tlbsize/2) {
127 int oldpid = read_c0_entryhi();
128 int newpid = cpu_asid(cpu, mm);
130 start &= (PAGE_MASK << 1);
131 end += ((PAGE_SIZE << 1) - 1);
132 end &= (PAGE_MASK << 1);
133 while (start < end) {
134 int idx;
136 write_c0_entryhi(start | newpid);
137 start += (PAGE_SIZE << 1);
138 mtc0_tlbw_hazard();
139 tlb_probe();
140 tlb_probe_hazard();
141 idx = read_c0_index();
142 write_c0_entrylo0(0);
143 write_c0_entrylo1(0);
144 if (idx < 0)
145 continue;
146 /* Make sure all entries differ. */
147 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
148 mtc0_tlbw_hazard();
149 tlb_write_indexed();
151 tlbw_use_hazard();
152 write_c0_entryhi(oldpid);
153 } else {
154 drop_mmu_context(mm, cpu);
156 FLUSH_ITLB;
157 EXIT_CRITICAL(flags);
161 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
163 unsigned long flags;
164 int size;
166 ENTER_CRITICAL(flags);
167 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
168 size = (size + 1) >> 1;
169 if (size <= current_cpu_data.tlbsize / 2) {
170 int pid = read_c0_entryhi();
172 start &= (PAGE_MASK << 1);
173 end += ((PAGE_SIZE << 1) - 1);
174 end &= (PAGE_MASK << 1);
176 while (start < end) {
177 int idx;
179 write_c0_entryhi(start);
180 start += (PAGE_SIZE << 1);
181 mtc0_tlbw_hazard();
182 tlb_probe();
183 tlb_probe_hazard();
184 idx = read_c0_index();
185 write_c0_entrylo0(0);
186 write_c0_entrylo1(0);
187 if (idx < 0)
188 continue;
189 /* Make sure all entries differ. */
190 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
191 mtc0_tlbw_hazard();
192 tlb_write_indexed();
194 tlbw_use_hazard();
195 write_c0_entryhi(pid);
196 } else {
197 local_flush_tlb_all();
199 FLUSH_ITLB;
200 EXIT_CRITICAL(flags);
203 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
205 int cpu = smp_processor_id();
207 if (cpu_context(cpu, vma->vm_mm) != 0) {
208 unsigned long flags;
209 int oldpid, newpid, idx;
211 newpid = cpu_asid(cpu, vma->vm_mm);
212 page &= (PAGE_MASK << 1);
213 ENTER_CRITICAL(flags);
214 oldpid = read_c0_entryhi();
215 write_c0_entryhi(page | newpid);
216 mtc0_tlbw_hazard();
217 tlb_probe();
218 tlb_probe_hazard();
219 idx = read_c0_index();
220 write_c0_entrylo0(0);
221 write_c0_entrylo1(0);
222 if (idx < 0)
223 goto finish;
224 /* Make sure all entries differ. */
225 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
226 mtc0_tlbw_hazard();
227 tlb_write_indexed();
228 tlbw_use_hazard();
230 finish:
231 write_c0_entryhi(oldpid);
232 FLUSH_ITLB_VM(vma);
233 EXIT_CRITICAL(flags);
238 * This one is only used for pages with the global bit set so we don't care
239 * much about the ASID.
241 void local_flush_tlb_one(unsigned long page)
243 unsigned long flags;
244 int oldpid, idx;
246 ENTER_CRITICAL(flags);
247 oldpid = read_c0_entryhi();
248 page &= (PAGE_MASK << 1);
249 write_c0_entryhi(page);
250 mtc0_tlbw_hazard();
251 tlb_probe();
252 tlb_probe_hazard();
253 idx = read_c0_index();
254 write_c0_entrylo0(0);
255 write_c0_entrylo1(0);
256 if (idx >= 0) {
257 /* Make sure all entries differ. */
258 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
259 mtc0_tlbw_hazard();
260 tlb_write_indexed();
261 tlbw_use_hazard();
263 write_c0_entryhi(oldpid);
264 FLUSH_ITLB;
265 EXIT_CRITICAL(flags);
269 * We will need multiple versions of update_mmu_cache(), one that just
270 * updates the TLB with the new pte(s), and another which also checks
271 * for the R4k "end of page" hardware bug and does the needy.
273 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
275 unsigned long flags;
276 pgd_t *pgdp;
277 pud_t *pudp;
278 pmd_t *pmdp;
279 pte_t *ptep;
280 int idx, pid;
283 * Handle debugger faulting in for debugee.
285 if (current->active_mm != vma->vm_mm)
286 return;
288 ENTER_CRITICAL(flags);
290 pid = read_c0_entryhi() & ASID_MASK;
291 address &= (PAGE_MASK << 1);
292 write_c0_entryhi(address | pid);
293 pgdp = pgd_offset(vma->vm_mm, address);
294 mtc0_tlbw_hazard();
295 tlb_probe();
296 tlb_probe_hazard();
297 pudp = pud_offset(pgdp, address);
298 pmdp = pmd_offset(pudp, address);
299 idx = read_c0_index();
300 ptep = pte_offset_map(pmdp, address);
302 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
303 write_c0_entrylo0(ptep->pte_high);
304 ptep++;
305 write_c0_entrylo1(ptep->pte_high);
306 #else
307 write_c0_entrylo0(pte_val(*ptep++) >> 6);
308 write_c0_entrylo1(pte_val(*ptep) >> 6);
309 #endif
310 mtc0_tlbw_hazard();
311 if (idx < 0)
312 tlb_write_random();
313 else
314 tlb_write_indexed();
315 tlbw_use_hazard();
316 FLUSH_ITLB_VM(vma);
317 EXIT_CRITICAL(flags);
320 #if 0
321 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
322 unsigned long address, pte_t pte)
324 unsigned long flags;
325 unsigned int asid;
326 pgd_t *pgdp;
327 pmd_t *pmdp;
328 pte_t *ptep;
329 int idx;
331 ENTER_CRITICAL(flags);
332 address &= (PAGE_MASK << 1);
333 asid = read_c0_entryhi() & ASID_MASK;
334 write_c0_entryhi(address | asid);
335 pgdp = pgd_offset(vma->vm_mm, address);
336 mtc0_tlbw_hazard();
337 tlb_probe();
338 tlb_probe_hazard();
339 pmdp = pmd_offset(pgdp, address);
340 idx = read_c0_index();
341 ptep = pte_offset_map(pmdp, address);
342 write_c0_entrylo0(pte_val(*ptep++) >> 6);
343 write_c0_entrylo1(pte_val(*ptep) >> 6);
344 mtc0_tlbw_hazard();
345 if (idx < 0)
346 tlb_write_random();
347 else
348 tlb_write_indexed();
349 tlbw_use_hazard();
350 EXIT_CRITICAL(flags);
352 #endif
354 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
355 unsigned long entryhi, unsigned long pagemask)
357 unsigned long flags;
358 unsigned long wired;
359 unsigned long old_pagemask;
360 unsigned long old_ctx;
362 ENTER_CRITICAL(flags);
363 /* Save old context and create impossible VPN2 value */
364 old_ctx = read_c0_entryhi();
365 old_pagemask = read_c0_pagemask();
366 wired = read_c0_wired();
367 write_c0_wired(wired + 1);
368 write_c0_index(wired);
369 tlbw_use_hazard(); /* What is the hazard here? */
370 write_c0_pagemask(pagemask);
371 write_c0_entryhi(entryhi);
372 write_c0_entrylo0(entrylo0);
373 write_c0_entrylo1(entrylo1);
374 mtc0_tlbw_hazard();
375 tlb_write_indexed();
376 tlbw_use_hazard();
378 write_c0_entryhi(old_ctx);
379 tlbw_use_hazard(); /* What is the hazard here? */
380 write_c0_pagemask(old_pagemask);
381 local_flush_tlb_all();
382 EXIT_CRITICAL(flags);
386 * Used for loading TLB entries before trap_init() has started, when we
387 * don't actually want to add a wired entry which remains throughout the
388 * lifetime of the system
391 static int temp_tlb_entry __initdata;
393 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
394 unsigned long entryhi, unsigned long pagemask)
396 int ret = 0;
397 unsigned long flags;
398 unsigned long wired;
399 unsigned long old_pagemask;
400 unsigned long old_ctx;
402 ENTER_CRITICAL(flags);
403 /* Save old context and create impossible VPN2 value */
404 old_ctx = read_c0_entryhi();
405 old_pagemask = read_c0_pagemask();
406 wired = read_c0_wired();
407 if (--temp_tlb_entry < wired) {
408 printk(KERN_WARNING
409 "No TLB space left for add_temporary_entry\n");
410 ret = -ENOSPC;
411 goto out;
414 write_c0_index(temp_tlb_entry);
415 write_c0_pagemask(pagemask);
416 write_c0_entryhi(entryhi);
417 write_c0_entrylo0(entrylo0);
418 write_c0_entrylo1(entrylo1);
419 mtc0_tlbw_hazard();
420 tlb_write_indexed();
421 tlbw_use_hazard();
423 write_c0_entryhi(old_ctx);
424 write_c0_pagemask(old_pagemask);
425 out:
426 EXIT_CRITICAL(flags);
427 return ret;
430 static void __init probe_tlb(unsigned long config)
432 struct cpuinfo_mips *c = &current_cpu_data;
433 unsigned int reg;
436 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
437 * is not supported, we assume R4k style. Cpu probing already figured
438 * out the number of tlb entries.
440 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
441 return;
442 #ifdef CONFIG_MIPS_MT_SMTC
444 * If TLB is shared in SMTC system, total size already
445 * has been calculated and written into cpu_data tlbsize
447 if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
448 return;
449 #endif /* CONFIG_MIPS_MT_SMTC */
451 reg = read_c0_config1();
452 if (!((config >> 7) & 3))
453 panic("No TLB present");
455 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
458 static int __initdata ntlb = 0;
459 static int __init set_ntlb(char *str)
461 get_option(&str, &ntlb);
462 return 1;
465 __setup("ntlb=", set_ntlb);
467 void __init tlb_init(void)
469 unsigned int config = read_c0_config();
472 * You should never change this register:
473 * - On R4600 1.7 the tlbp never hits for pages smaller than
474 * the value in the c0_pagemask register.
475 * - The entire mm handling assumes the c0_pagemask register to
476 * be set for 4kb pages.
478 probe_tlb(config);
479 write_c0_pagemask(PM_DEFAULT_MASK);
480 write_c0_wired(0);
481 write_c0_framemask(0);
482 temp_tlb_entry = current_cpu_data.tlbsize - 1;
484 /* From this point on the ARC firmware is dead. */
485 local_flush_tlb_all();
487 /* Did I tell you that ARC SUCKS? */
489 if (ntlb) {
490 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
491 int wired = current_cpu_data.tlbsize - ntlb;
492 write_c0_wired(wired);
493 write_c0_index(wired-1);
494 printk("Restricting TLB to %d entries\n", ntlb);
495 } else
496 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
499 build_tlb_refill_handler();