[PATCH] briq_panel: read() and write() get __user pointers, damnit
[linux-2.6/verdex.git] / arch / mips / mm / tlb-r4k.c
blob2cde1b77244304fdd530885c0f21e567e5fb2b56
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
15 #include <asm/cpu.h>
16 #include <asm/bootinfo.h>
17 #include <asm/mmu_context.h>
18 #include <asm/pgtable.h>
19 #include <asm/system.h>
21 extern void build_tlb_refill_handler(void);
24 * Make sure all entries differ. If they're not different
25 * MIPS32 will take revenge ...
27 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29 /* CP0 hazard avoidance. */
30 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
31 "nop; nop; nop; nop; nop; nop;\n\t" \
32 ".set reorder\n\t")
34 /* Atomicity and interruptability */
35 #ifdef CONFIG_MIPS_MT_SMTC
37 #include <asm/smtc.h>
38 #include <asm/mipsmtregs.h>
40 #define ENTER_CRITICAL(flags) \
41 { \
42 unsigned int mvpflags; \
43 local_irq_save(flags);\
44 mvpflags = dvpe()
45 #define EXIT_CRITICAL(flags) \
46 evpe(mvpflags); \
47 local_irq_restore(flags); \
49 #else
51 #define ENTER_CRITICAL(flags) local_irq_save(flags)
52 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
54 #endif /* CONFIG_MIPS_MT_SMTC */
56 void local_flush_tlb_all(void)
58 unsigned long flags;
59 unsigned long old_ctx;
60 int entry;
62 ENTER_CRITICAL(flags);
63 /* Save old context and create impossible VPN2 value */
64 old_ctx = read_c0_entryhi();
65 write_c0_entrylo0(0);
66 write_c0_entrylo1(0);
68 entry = read_c0_wired();
70 /* Blast 'em all away. */
71 while (entry < current_cpu_data.tlbsize) {
72 /* Make sure all entries differ. */
73 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
74 write_c0_index(entry);
75 mtc0_tlbw_hazard();
76 tlb_write_indexed();
77 entry++;
79 tlbw_use_hazard();
80 write_c0_entryhi(old_ctx);
81 EXIT_CRITICAL(flags);
84 /* All entries common to a mm share an asid. To effectively flush
85 these entries, we just bump the asid. */
86 void local_flush_tlb_mm(struct mm_struct *mm)
88 int cpu;
90 preempt_disable();
92 cpu = smp_processor_id();
94 if (cpu_context(cpu, mm) != 0) {
95 drop_mmu_context(mm, cpu);
98 preempt_enable();
101 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
102 unsigned long end)
104 struct mm_struct *mm = vma->vm_mm;
105 int cpu = smp_processor_id();
107 if (cpu_context(cpu, mm) != 0) {
108 unsigned long flags;
109 int size;
111 ENTER_CRITICAL(flags);
112 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
113 size = (size + 1) >> 1;
114 local_irq_save(flags);
115 if (size <= current_cpu_data.tlbsize/2) {
116 int oldpid = read_c0_entryhi();
117 int newpid = cpu_asid(cpu, mm);
119 start &= (PAGE_MASK << 1);
120 end += ((PAGE_SIZE << 1) - 1);
121 end &= (PAGE_MASK << 1);
122 while (start < end) {
123 int idx;
125 write_c0_entryhi(start | newpid);
126 start += (PAGE_SIZE << 1);
127 mtc0_tlbw_hazard();
128 tlb_probe();
129 BARRIER;
130 idx = read_c0_index();
131 write_c0_entrylo0(0);
132 write_c0_entrylo1(0);
133 if (idx < 0)
134 continue;
135 /* Make sure all entries differ. */
136 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
137 mtc0_tlbw_hazard();
138 tlb_write_indexed();
140 tlbw_use_hazard();
141 write_c0_entryhi(oldpid);
142 } else {
143 drop_mmu_context(mm, cpu);
145 EXIT_CRITICAL(flags);
149 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
151 unsigned long flags;
152 int size;
154 ENTER_CRITICAL(flags);
155 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
156 size = (size + 1) >> 1;
157 if (size <= current_cpu_data.tlbsize / 2) {
158 int pid = read_c0_entryhi();
160 start &= (PAGE_MASK << 1);
161 end += ((PAGE_SIZE << 1) - 1);
162 end &= (PAGE_MASK << 1);
164 while (start < end) {
165 int idx;
167 write_c0_entryhi(start);
168 start += (PAGE_SIZE << 1);
169 mtc0_tlbw_hazard();
170 tlb_probe();
171 BARRIER;
172 idx = read_c0_index();
173 write_c0_entrylo0(0);
174 write_c0_entrylo1(0);
175 if (idx < 0)
176 continue;
177 /* Make sure all entries differ. */
178 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
179 mtc0_tlbw_hazard();
180 tlb_write_indexed();
182 tlbw_use_hazard();
183 write_c0_entryhi(pid);
184 } else {
185 local_flush_tlb_all();
187 EXIT_CRITICAL(flags);
190 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
192 int cpu = smp_processor_id();
194 if (cpu_context(cpu, vma->vm_mm) != 0) {
195 unsigned long flags;
196 int oldpid, newpid, idx;
198 newpid = cpu_asid(cpu, vma->vm_mm);
199 page &= (PAGE_MASK << 1);
200 ENTER_CRITICAL(flags);
201 oldpid = read_c0_entryhi();
202 write_c0_entryhi(page | newpid);
203 mtc0_tlbw_hazard();
204 tlb_probe();
205 BARRIER;
206 idx = read_c0_index();
207 write_c0_entrylo0(0);
208 write_c0_entrylo1(0);
209 if (idx < 0)
210 goto finish;
211 /* Make sure all entries differ. */
212 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
213 mtc0_tlbw_hazard();
214 tlb_write_indexed();
215 tlbw_use_hazard();
217 finish:
218 write_c0_entryhi(oldpid);
219 EXIT_CRITICAL(flags);
224 * This one is only used for pages with the global bit set so we don't care
225 * much about the ASID.
227 void local_flush_tlb_one(unsigned long page)
229 unsigned long flags;
230 int oldpid, idx;
232 ENTER_CRITICAL(flags);
233 oldpid = read_c0_entryhi();
234 page &= (PAGE_MASK << 1);
235 write_c0_entryhi(page);
236 mtc0_tlbw_hazard();
237 tlb_probe();
238 BARRIER;
239 idx = read_c0_index();
240 write_c0_entrylo0(0);
241 write_c0_entrylo1(0);
242 if (idx >= 0) {
243 /* Make sure all entries differ. */
244 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
245 mtc0_tlbw_hazard();
246 tlb_write_indexed();
247 tlbw_use_hazard();
249 write_c0_entryhi(oldpid);
251 EXIT_CRITICAL(flags);
255 * We will need multiple versions of update_mmu_cache(), one that just
256 * updates the TLB with the new pte(s), and another which also checks
257 * for the R4k "end of page" hardware bug and does the needy.
259 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
261 unsigned long flags;
262 pgd_t *pgdp;
263 pud_t *pudp;
264 pmd_t *pmdp;
265 pte_t *ptep;
266 int idx, pid;
269 * Handle debugger faulting in for debugee.
271 if (current->active_mm != vma->vm_mm)
272 return;
274 ENTER_CRITICAL(flags);
276 pid = read_c0_entryhi() & ASID_MASK;
277 address &= (PAGE_MASK << 1);
278 write_c0_entryhi(address | pid);
279 pgdp = pgd_offset(vma->vm_mm, address);
280 mtc0_tlbw_hazard();
281 tlb_probe();
282 BARRIER;
283 pudp = pud_offset(pgdp, address);
284 pmdp = pmd_offset(pudp, address);
285 idx = read_c0_index();
286 ptep = pte_offset_map(pmdp, address);
288 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
289 write_c0_entrylo0(ptep->pte_high);
290 ptep++;
291 write_c0_entrylo1(ptep->pte_high);
292 #else
293 write_c0_entrylo0(pte_val(*ptep++) >> 6);
294 write_c0_entrylo1(pte_val(*ptep) >> 6);
295 #endif
296 mtc0_tlbw_hazard();
297 if (idx < 0)
298 tlb_write_random();
299 else
300 tlb_write_indexed();
301 tlbw_use_hazard();
302 EXIT_CRITICAL(flags);
305 #if 0
306 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
307 unsigned long address, pte_t pte)
309 unsigned long flags;
310 unsigned int asid;
311 pgd_t *pgdp;
312 pmd_t *pmdp;
313 pte_t *ptep;
314 int idx;
316 ENTER_CRITICAL(flags);
317 address &= (PAGE_MASK << 1);
318 asid = read_c0_entryhi() & ASID_MASK;
319 write_c0_entryhi(address | asid);
320 pgdp = pgd_offset(vma->vm_mm, address);
321 mtc0_tlbw_hazard();
322 tlb_probe();
323 BARRIER;
324 pmdp = pmd_offset(pgdp, address);
325 idx = read_c0_index();
326 ptep = pte_offset_map(pmdp, address);
327 write_c0_entrylo0(pte_val(*ptep++) >> 6);
328 write_c0_entrylo1(pte_val(*ptep) >> 6);
329 mtc0_tlbw_hazard();
330 if (idx < 0)
331 tlb_write_random();
332 else
333 tlb_write_indexed();
334 tlbw_use_hazard();
335 EXIT_CRITICAL(flags);
337 #endif
339 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
340 unsigned long entryhi, unsigned long pagemask)
342 unsigned long flags;
343 unsigned long wired;
344 unsigned long old_pagemask;
345 unsigned long old_ctx;
347 ENTER_CRITICAL(flags);
348 /* Save old context and create impossible VPN2 value */
349 old_ctx = read_c0_entryhi();
350 old_pagemask = read_c0_pagemask();
351 wired = read_c0_wired();
352 write_c0_wired(wired + 1);
353 write_c0_index(wired);
354 BARRIER;
355 write_c0_pagemask(pagemask);
356 write_c0_entryhi(entryhi);
357 write_c0_entrylo0(entrylo0);
358 write_c0_entrylo1(entrylo1);
359 mtc0_tlbw_hazard();
360 tlb_write_indexed();
361 tlbw_use_hazard();
363 write_c0_entryhi(old_ctx);
364 BARRIER;
365 write_c0_pagemask(old_pagemask);
366 local_flush_tlb_all();
367 EXIT_CRITICAL(flags);
371 * Used for loading TLB entries before trap_init() has started, when we
372 * don't actually want to add a wired entry which remains throughout the
373 * lifetime of the system
376 static int temp_tlb_entry __initdata;
378 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
379 unsigned long entryhi, unsigned long pagemask)
381 int ret = 0;
382 unsigned long flags;
383 unsigned long wired;
384 unsigned long old_pagemask;
385 unsigned long old_ctx;
387 ENTER_CRITICAL(flags);
388 /* Save old context and create impossible VPN2 value */
389 old_ctx = read_c0_entryhi();
390 old_pagemask = read_c0_pagemask();
391 wired = read_c0_wired();
392 if (--temp_tlb_entry < wired) {
393 printk(KERN_WARNING
394 "No TLB space left for add_temporary_entry\n");
395 ret = -ENOSPC;
396 goto out;
399 write_c0_index(temp_tlb_entry);
400 write_c0_pagemask(pagemask);
401 write_c0_entryhi(entryhi);
402 write_c0_entrylo0(entrylo0);
403 write_c0_entrylo1(entrylo1);
404 mtc0_tlbw_hazard();
405 tlb_write_indexed();
406 tlbw_use_hazard();
408 write_c0_entryhi(old_ctx);
409 write_c0_pagemask(old_pagemask);
410 out:
411 EXIT_CRITICAL(flags);
412 return ret;
415 static void __init probe_tlb(unsigned long config)
417 struct cpuinfo_mips *c = &current_cpu_data;
418 unsigned int reg;
421 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
422 * is not supported, we assume R4k style. Cpu probing already figured
423 * out the number of tlb entries.
425 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
426 return;
427 #ifdef CONFIG_MIPS_MT_SMTC
429 * If TLB is shared in SMTC system, total size already
430 * has been calculated and written into cpu_data tlbsize
432 if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
433 return;
434 #endif /* CONFIG_MIPS_MT_SMTC */
436 reg = read_c0_config1();
437 if (!((config >> 7) & 3))
438 panic("No TLB present");
440 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
443 static int __initdata ntlb = 0;
444 static int __init set_ntlb(char *str)
446 get_option(&str, &ntlb);
447 return 1;
450 __setup("ntlb=", set_ntlb);
452 void __init tlb_init(void)
454 unsigned int config = read_c0_config();
457 * You should never change this register:
458 * - On R4600 1.7 the tlbp never hits for pages smaller than
459 * the value in the c0_pagemask register.
460 * - The entire mm handling assumes the c0_pagemask register to
461 * be set for 4kb pages.
463 probe_tlb(config);
464 write_c0_pagemask(PM_DEFAULT_MASK);
465 write_c0_wired(0);
466 write_c0_framemask(0);
467 temp_tlb_entry = current_cpu_data.tlbsize - 1;
469 /* From this point on the ARC firmware is dead. */
470 local_flush_tlb_all();
472 /* Did I tell you that ARC SUCKS? */
474 if (ntlb) {
475 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
476 int wired = current_cpu_data.tlbsize - ntlb;
477 write_c0_wired(wired);
478 write_c0_index(wired-1);
479 printk ("Restricting TLB to %d entries\n", ntlb);
480 } else
481 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
484 build_tlb_refill_handler();