2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
17 #include <asm/bootinfo.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgtable.h>
20 #include <asm/system.h>
22 extern void build_tlb_refill_handler(void);
24 /* CP0 hazard avoidance. */
25 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
26 "nop; nop; nop; nop; nop; nop;\n\t" \
29 void local_flush_tlb_all(void)
32 unsigned long old_ctx
;
35 local_irq_save(flags
);
36 /* Save old context and create impossible VPN2 value */
37 old_ctx
= read_c0_entryhi();
41 entry
= read_c0_wired();
43 /* Blast 'em all away. */
44 while (entry
< current_cpu_data
.tlbsize
) {
46 * Make sure all entries differ. If they're not different
47 * MIPS32 will take revenge ...
49 write_c0_entryhi(CKSEG0
+ (entry
<< (PAGE_SHIFT
+ 1)));
50 write_c0_index(entry
);
56 write_c0_entryhi(old_ctx
);
57 local_irq_restore(flags
);
60 void local_flush_tlb_mm(struct mm_struct
*mm
)
62 int cpu
= smp_processor_id();
64 if (cpu_context(cpu
, mm
) != 0)
65 drop_mmu_context(mm
,cpu
);
68 void local_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
71 struct mm_struct
*mm
= vma
->vm_mm
;
72 int cpu
= smp_processor_id();
74 if (cpu_context(cpu
, mm
) != 0) {
78 local_irq_save(flags
);
79 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
80 size
= (size
+ 1) >> 1;
81 if (size
<= current_cpu_data
.tlbsize
/2) {
82 int oldpid
= read_c0_entryhi();
83 int newpid
= cpu_asid(cpu
, mm
);
85 start
&= (PAGE_MASK
<< 1);
86 end
+= ((PAGE_SIZE
<< 1) - 1);
87 end
&= (PAGE_MASK
<< 1);
91 write_c0_entryhi(start
| newpid
);
92 start
+= (PAGE_SIZE
<< 1);
96 idx
= read_c0_index();
101 /* Make sure all entries differ. */
102 write_c0_entryhi(CKSEG0
+
103 (idx
<< (PAGE_SHIFT
+ 1)));
108 write_c0_entryhi(oldpid
);
110 drop_mmu_context(mm
, cpu
);
112 local_irq_restore(flags
);
116 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
121 local_irq_save(flags
);
122 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
123 size
= (size
+ 1) >> 1;
124 if (size
<= current_cpu_data
.tlbsize
/ 2) {
125 int pid
= read_c0_entryhi();
127 start
&= (PAGE_MASK
<< 1);
128 end
+= ((PAGE_SIZE
<< 1) - 1);
129 end
&= (PAGE_MASK
<< 1);
131 while (start
< end
) {
134 write_c0_entryhi(start
);
135 start
+= (PAGE_SIZE
<< 1);
139 idx
= read_c0_index();
140 write_c0_entrylo0(0);
141 write_c0_entrylo1(0);
144 /* Make sure all entries differ. */
145 write_c0_entryhi(CKSEG0
+ (idx
<< (PAGE_SHIFT
+ 1)));
150 write_c0_entryhi(pid
);
152 local_flush_tlb_all();
154 local_irq_restore(flags
);
157 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
159 int cpu
= smp_processor_id();
161 if (cpu_context(cpu
, vma
->vm_mm
) != 0) {
163 int oldpid
, newpid
, idx
;
165 newpid
= cpu_asid(cpu
, vma
->vm_mm
);
166 page
&= (PAGE_MASK
<< 1);
167 local_irq_save(flags
);
168 oldpid
= read_c0_entryhi();
169 write_c0_entryhi(page
| newpid
);
173 idx
= read_c0_index();
174 write_c0_entrylo0(0);
175 write_c0_entrylo1(0);
178 /* Make sure all entries differ. */
179 write_c0_entryhi(CKSEG0
+ (idx
<< (PAGE_SHIFT
+ 1)));
185 write_c0_entryhi(oldpid
);
186 local_irq_restore(flags
);
191 * This one is only used for pages with the global bit set so we don't care
192 * much about the ASID.
194 void local_flush_tlb_one(unsigned long page
)
199 local_irq_save(flags
);
200 page
&= (PAGE_MASK
<< 1);
201 oldpid
= read_c0_entryhi();
202 write_c0_entryhi(page
);
206 idx
= read_c0_index();
207 write_c0_entrylo0(0);
208 write_c0_entrylo1(0);
210 /* Make sure all entries differ. */
211 write_c0_entryhi(CKSEG0
+ (idx
<< (PAGE_SHIFT
+ 1)));
216 write_c0_entryhi(oldpid
);
218 local_irq_restore(flags
);
222 * We will need multiple versions of update_mmu_cache(), one that just
223 * updates the TLB with the new pte(s), and another which also checks
224 * for the R4k "end of page" hardware bug and does the needy.
226 void __update_tlb(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
235 * Handle debugger faulting in for debugee.
237 if (current
->active_mm
!= vma
->vm_mm
)
240 pid
= read_c0_entryhi() & ASID_MASK
;
242 local_irq_save(flags
);
243 address
&= (PAGE_MASK
<< 1);
244 write_c0_entryhi(address
| pid
);
245 pgdp
= pgd_offset(vma
->vm_mm
, address
);
249 pmdp
= pmd_offset(pgdp
, address
);
250 idx
= read_c0_index();
251 ptep
= pte_offset_map(pmdp
, address
);
253 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
254 write_c0_entrylo0(ptep
->pte_high
);
256 write_c0_entrylo1(ptep
->pte_high
);
258 write_c0_entrylo0(pte_val(*ptep
++) >> 6);
259 write_c0_entrylo1(pte_val(*ptep
) >> 6);
261 write_c0_entryhi(address
| pid
);
268 write_c0_entryhi(pid
);
269 local_irq_restore(flags
);
273 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct
* vma
,
274 unsigned long address
, pte_t pte
)
283 local_irq_save(flags
);
284 address
&= (PAGE_MASK
<< 1);
285 asid
= read_c0_entryhi() & ASID_MASK
;
286 write_c0_entryhi(address
| asid
);
287 pgdp
= pgd_offset(vma
->vm_mm
, address
);
291 pmdp
= pmd_offset(pgdp
, address
);
292 idx
= read_c0_index();
293 ptep
= pte_offset_map(pmdp
, address
);
294 write_c0_entrylo0(pte_val(*ptep
++) >> 6);
295 write_c0_entrylo1(pte_val(*ptep
) >> 6);
302 local_irq_restore(flags
);
306 void __init
add_wired_entry(unsigned long entrylo0
, unsigned long entrylo1
,
307 unsigned long entryhi
, unsigned long pagemask
)
311 unsigned long old_pagemask
;
312 unsigned long old_ctx
;
314 local_irq_save(flags
);
315 /* Save old context and create impossible VPN2 value */
316 old_ctx
= read_c0_entryhi();
317 old_pagemask
= read_c0_pagemask();
318 wired
= read_c0_wired();
319 write_c0_wired(wired
+ 1);
320 write_c0_index(wired
);
322 write_c0_pagemask(pagemask
);
323 write_c0_entryhi(entryhi
);
324 write_c0_entrylo0(entrylo0
);
325 write_c0_entrylo1(entrylo1
);
330 write_c0_entryhi(old_ctx
);
332 write_c0_pagemask(old_pagemask
);
333 local_flush_tlb_all();
334 local_irq_restore(flags
);
338 * Used for loading TLB entries before trap_init() has started, when we
339 * don't actually want to add a wired entry which remains throughout the
340 * lifetime of the system
343 static int temp_tlb_entry __initdata
;
345 __init
int add_temporary_entry(unsigned long entrylo0
, unsigned long entrylo1
,
346 unsigned long entryhi
, unsigned long pagemask
)
351 unsigned long old_pagemask
;
352 unsigned long old_ctx
;
354 local_irq_save(flags
);
355 /* Save old context and create impossible VPN2 value */
356 old_ctx
= read_c0_entryhi();
357 old_pagemask
= read_c0_pagemask();
358 wired
= read_c0_wired();
359 if (--temp_tlb_entry
< wired
) {
360 printk(KERN_WARNING
"No TLB space left for add_temporary_entry\n");
365 write_c0_index(temp_tlb_entry
);
366 write_c0_pagemask(pagemask
);
367 write_c0_entryhi(entryhi
);
368 write_c0_entrylo0(entrylo0
);
369 write_c0_entrylo1(entrylo1
);
374 write_c0_entryhi(old_ctx
);
375 write_c0_pagemask(old_pagemask
);
377 local_irq_restore(flags
);
381 static void __init
probe_tlb(unsigned long config
)
383 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
387 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
388 * is not supported, we assume R4k style. Cpu probing already figured
389 * out the number of tlb entries.
391 if ((c
->processor_id
& 0xff0000) == PRID_COMP_LEGACY
)
394 reg
= read_c0_config1();
395 if (!((config
>> 7) & 3))
396 panic("No TLB present");
398 c
->tlbsize
= ((reg
>> 25) & 0x3f) + 1;
401 void __init
tlb_init(void)
403 unsigned int config
= read_c0_config();
406 * You should never change this register:
407 * - On R4600 1.7 the tlbp never hits for pages smaller than
408 * the value in the c0_pagemask register.
409 * - The entire mm handling assumes the c0_pagemask register to
410 * be set for 4kb pages.
413 write_c0_pagemask(PM_DEFAULT_MASK
);
415 temp_tlb_entry
= current_cpu_data
.tlbsize
- 1;
416 local_flush_tlb_all();
418 build_tlb_refill_handler();