2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
17 #include <asm/bootinfo.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgtable.h>
20 #include <asm/system.h>
22 extern void build_tlb_refill_handler(void);
25 * Make sure all entries differ. If they're not different
26 * MIPS32 will take revenge ...
28 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
30 /* CP0 hazard avoidance. */
31 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
32 "nop; nop; nop; nop; nop; nop;\n\t" \
35 void local_flush_tlb_all(void)
38 unsigned long old_ctx
;
41 local_irq_save(flags
);
42 /* Save old context and create impossible VPN2 value */
43 old_ctx
= read_c0_entryhi();
47 entry
= read_c0_wired();
49 /* Blast 'em all away. */
50 while (entry
< current_cpu_data
.tlbsize
) {
51 /* Make sure all entries differ. */
52 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
53 write_c0_index(entry
);
59 write_c0_entryhi(old_ctx
);
60 local_irq_restore(flags
);
63 /* All entries common to a mm share an asid. To effectively flush
64 these entries, we just bump the asid. */
65 void local_flush_tlb_mm(struct mm_struct
*mm
)
71 cpu
= smp_processor_id();
73 if (cpu_context(cpu
, mm
) != 0) {
74 drop_mmu_context(mm
, cpu
);
80 void local_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
83 struct mm_struct
*mm
= vma
->vm_mm
;
84 int cpu
= smp_processor_id();
86 if (cpu_context(cpu
, mm
) != 0) {
90 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
91 size
= (size
+ 1) >> 1;
92 local_irq_save(flags
);
93 if (size
<= current_cpu_data
.tlbsize
/2) {
94 int oldpid
= read_c0_entryhi();
95 int newpid
= cpu_asid(cpu
, mm
);
97 start
&= (PAGE_MASK
<< 1);
98 end
+= ((PAGE_SIZE
<< 1) - 1);
99 end
&= (PAGE_MASK
<< 1);
100 while (start
< end
) {
103 write_c0_entryhi(start
| newpid
);
104 start
+= (PAGE_SIZE
<< 1);
108 idx
= read_c0_index();
109 write_c0_entrylo0(0);
110 write_c0_entrylo1(0);
113 /* Make sure all entries differ. */
114 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
119 write_c0_entryhi(oldpid
);
121 drop_mmu_context(mm
, cpu
);
123 local_irq_restore(flags
);
127 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
132 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
133 size
= (size
+ 1) >> 1;
134 local_irq_save(flags
);
135 if (size
<= current_cpu_data
.tlbsize
/ 2) {
136 int pid
= read_c0_entryhi();
138 start
&= (PAGE_MASK
<< 1);
139 end
+= ((PAGE_SIZE
<< 1) - 1);
140 end
&= (PAGE_MASK
<< 1);
142 while (start
< end
) {
145 write_c0_entryhi(start
);
146 start
+= (PAGE_SIZE
<< 1);
150 idx
= read_c0_index();
151 write_c0_entrylo0(0);
152 write_c0_entrylo1(0);
155 /* Make sure all entries differ. */
156 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
161 write_c0_entryhi(pid
);
163 local_flush_tlb_all();
165 local_irq_restore(flags
);
168 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
170 int cpu
= smp_processor_id();
172 if (cpu_context(cpu
, vma
->vm_mm
) != 0) {
174 int oldpid
, newpid
, idx
;
176 newpid
= cpu_asid(cpu
, vma
->vm_mm
);
177 page
&= (PAGE_MASK
<< 1);
178 local_irq_save(flags
);
179 oldpid
= read_c0_entryhi();
180 write_c0_entryhi(page
| newpid
);
184 idx
= read_c0_index();
185 write_c0_entrylo0(0);
186 write_c0_entrylo1(0);
189 /* Make sure all entries differ. */
190 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
196 write_c0_entryhi(oldpid
);
197 local_irq_restore(flags
);
202 * This one is only used for pages with the global bit set so we don't care
203 * much about the ASID.
205 void local_flush_tlb_one(unsigned long page
)
210 local_irq_save(flags
);
211 oldpid
= read_c0_entryhi();
212 page
&= (PAGE_MASK
<< 1);
213 write_c0_entryhi(page
);
217 idx
= read_c0_index();
218 write_c0_entrylo0(0);
219 write_c0_entrylo1(0);
221 /* Make sure all entries differ. */
222 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
227 write_c0_entryhi(oldpid
);
229 local_irq_restore(flags
);
233 * We will need multiple versions of update_mmu_cache(), one that just
234 * updates the TLB with the new pte(s), and another which also checks
235 * for the R4k "end of page" hardware bug and does the needy.
237 void __update_tlb(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
247 * Handle debugger faulting in for debugee.
249 if (current
->active_mm
!= vma
->vm_mm
)
252 local_irq_save(flags
);
254 pid
= read_c0_entryhi() & ASID_MASK
;
255 address
&= (PAGE_MASK
<< 1);
256 write_c0_entryhi(address
| pid
);
257 pgdp
= pgd_offset(vma
->vm_mm
, address
);
261 pudp
= pud_offset(pgdp
, address
);
262 pmdp
= pmd_offset(pudp
, address
);
263 idx
= read_c0_index();
264 ptep
= pte_offset_map(pmdp
, address
);
266 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
267 write_c0_entrylo0(ptep
->pte_high
);
269 write_c0_entrylo1(ptep
->pte_high
);
271 write_c0_entrylo0(pte_val(*ptep
++) >> 6);
272 write_c0_entrylo1(pte_val(*ptep
) >> 6);
280 local_irq_restore(flags
);
284 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct
* vma
,
285 unsigned long address
, pte_t pte
)
294 local_irq_save(flags
);
295 address
&= (PAGE_MASK
<< 1);
296 asid
= read_c0_entryhi() & ASID_MASK
;
297 write_c0_entryhi(address
| asid
);
298 pgdp
= pgd_offset(vma
->vm_mm
, address
);
302 pmdp
= pmd_offset(pgdp
, address
);
303 idx
= read_c0_index();
304 ptep
= pte_offset_map(pmdp
, address
);
305 write_c0_entrylo0(pte_val(*ptep
++) >> 6);
306 write_c0_entrylo1(pte_val(*ptep
) >> 6);
313 local_irq_restore(flags
);
317 void __init
add_wired_entry(unsigned long entrylo0
, unsigned long entrylo1
,
318 unsigned long entryhi
, unsigned long pagemask
)
322 unsigned long old_pagemask
;
323 unsigned long old_ctx
;
325 local_irq_save(flags
);
326 /* Save old context and create impossible VPN2 value */
327 old_ctx
= read_c0_entryhi();
328 old_pagemask
= read_c0_pagemask();
329 wired
= read_c0_wired();
330 write_c0_wired(wired
+ 1);
331 write_c0_index(wired
);
333 write_c0_pagemask(pagemask
);
334 write_c0_entryhi(entryhi
);
335 write_c0_entrylo0(entrylo0
);
336 write_c0_entrylo1(entrylo1
);
341 write_c0_entryhi(old_ctx
);
343 write_c0_pagemask(old_pagemask
);
344 local_flush_tlb_all();
345 local_irq_restore(flags
);
349 * Used for loading TLB entries before trap_init() has started, when we
350 * don't actually want to add a wired entry which remains throughout the
351 * lifetime of the system
354 static int temp_tlb_entry __initdata
;
356 __init
int add_temporary_entry(unsigned long entrylo0
, unsigned long entrylo1
,
357 unsigned long entryhi
, unsigned long pagemask
)
362 unsigned long old_pagemask
;
363 unsigned long old_ctx
;
365 local_irq_save(flags
);
366 /* Save old context and create impossible VPN2 value */
367 old_ctx
= read_c0_entryhi();
368 old_pagemask
= read_c0_pagemask();
369 wired
= read_c0_wired();
370 if (--temp_tlb_entry
< wired
) {
372 "No TLB space left for add_temporary_entry\n");
377 write_c0_index(temp_tlb_entry
);
378 write_c0_pagemask(pagemask
);
379 write_c0_entryhi(entryhi
);
380 write_c0_entrylo0(entrylo0
);
381 write_c0_entrylo1(entrylo1
);
386 write_c0_entryhi(old_ctx
);
387 write_c0_pagemask(old_pagemask
);
389 local_irq_restore(flags
);
393 static void __init
probe_tlb(unsigned long config
)
395 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
399 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
400 * is not supported, we assume R4k style. Cpu probing already figured
401 * out the number of tlb entries.
403 if ((c
->processor_id
& 0xff0000) == PRID_COMP_LEGACY
)
406 reg
= read_c0_config1();
407 if (!((config
>> 7) & 3))
408 panic("No TLB present");
410 c
->tlbsize
= ((reg
>> 25) & 0x3f) + 1;
413 void __init
tlb_init(void)
415 unsigned int config
= read_c0_config();
418 * You should never change this register:
419 * - On R4600 1.7 the tlbp never hits for pages smaller than
420 * the value in the c0_pagemask register.
421 * - The entire mm handling assumes the c0_pagemask register to
422 * be set for 4kb pages.
425 write_c0_pagemask(PM_DEFAULT_MASK
);
427 write_c0_framemask(0);
428 temp_tlb_entry
= current_cpu_data
.tlbsize
- 1;
430 /* From this point on the ARC firmware is dead. */
431 local_flush_tlb_all();
433 /* Did I tell you that ARC SUCKS? */
435 build_tlb_refill_handler();