2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/cpu_pm.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/hugetlb.h>
17 #include <linux/module.h>
20 #include <asm/cpu-type.h>
21 #include <asm/bootinfo.h>
22 #include <asm/mmu_context.h>
23 #include <asm/pgtable.h>
25 #include <asm/tlbmisc.h>
27 extern void build_tlb_refill_handler(void);
30 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
31 * unfortunately, itlb is not totally transparent to software.
33 static inline void flush_itlb(void)
35 switch (current_cpu_type()) {
45 static inline void flush_itlb_vm(struct vm_area_struct
*vma
)
47 if (vma
->vm_flags
& VM_EXEC
)
51 void local_flush_tlb_all(void)
54 unsigned long old_ctx
;
55 int entry
, ftlbhighset
;
57 local_irq_save(flags
);
58 /* Save old context and create impossible VPN2 value */
59 old_ctx
= read_c0_entryhi();
64 entry
= read_c0_wired();
66 /* Blast 'em all away. */
68 if (current_cpu_data
.tlbsizevtlb
) {
71 tlbinvf(); /* invalidate VTLB */
73 ftlbhighset
= current_cpu_data
.tlbsizevtlb
+
74 current_cpu_data
.tlbsizeftlbsets
;
75 for (entry
= current_cpu_data
.tlbsizevtlb
;
78 write_c0_index(entry
);
80 tlbinvf(); /* invalidate one FTLB set */
83 while (entry
< current_cpu_data
.tlbsize
) {
84 /* Make sure all entries differ. */
85 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
86 write_c0_index(entry
);
93 write_c0_entryhi(old_ctx
);
96 local_irq_restore(flags
);
98 EXPORT_SYMBOL(local_flush_tlb_all
);
100 /* All entries common to a mm share an asid. To effectively flush
101 these entries, we just bump the asid. */
102 void local_flush_tlb_mm(struct mm_struct
*mm
)
108 cpu
= smp_processor_id();
110 if (cpu_context(cpu
, mm
) != 0) {
111 drop_mmu_context(mm
, cpu
);
117 void local_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
120 struct mm_struct
*mm
= vma
->vm_mm
;
121 int cpu
= smp_processor_id();
123 if (cpu_context(cpu
, mm
) != 0) {
124 unsigned long size
, flags
;
126 local_irq_save(flags
);
127 start
= round_down(start
, PAGE_SIZE
<< 1);
128 end
= round_up(end
, PAGE_SIZE
<< 1);
129 size
= (end
- start
) >> (PAGE_SHIFT
+ 1);
130 if (size
<= (current_cpu_data
.tlbsizeftlbsets
?
131 current_cpu_data
.tlbsize
/ 8 :
132 current_cpu_data
.tlbsize
/ 2)) {
133 int oldpid
= read_c0_entryhi();
134 int newpid
= cpu_asid(cpu
, mm
);
137 while (start
< end
) {
140 write_c0_entryhi(start
| newpid
);
141 start
+= (PAGE_SIZE
<< 1);
145 idx
= read_c0_index();
146 write_c0_entrylo0(0);
147 write_c0_entrylo1(0);
150 /* Make sure all entries differ. */
151 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
156 write_c0_entryhi(oldpid
);
159 drop_mmu_context(mm
, cpu
);
162 local_irq_restore(flags
);
166 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
168 unsigned long size
, flags
;
170 local_irq_save(flags
);
171 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
172 size
= (size
+ 1) >> 1;
173 if (size
<= (current_cpu_data
.tlbsizeftlbsets
?
174 current_cpu_data
.tlbsize
/ 8 :
175 current_cpu_data
.tlbsize
/ 2)) {
176 int pid
= read_c0_entryhi();
178 start
&= (PAGE_MASK
<< 1);
179 end
+= ((PAGE_SIZE
<< 1) - 1);
180 end
&= (PAGE_MASK
<< 1);
183 while (start
< end
) {
186 write_c0_entryhi(start
);
187 start
+= (PAGE_SIZE
<< 1);
191 idx
= read_c0_index();
192 write_c0_entrylo0(0);
193 write_c0_entrylo1(0);
196 /* Make sure all entries differ. */
197 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
202 write_c0_entryhi(pid
);
205 local_flush_tlb_all();
208 local_irq_restore(flags
);
211 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
213 int cpu
= smp_processor_id();
215 if (cpu_context(cpu
, vma
->vm_mm
) != 0) {
217 int oldpid
, newpid
, idx
;
219 newpid
= cpu_asid(cpu
, vma
->vm_mm
);
220 page
&= (PAGE_MASK
<< 1);
221 local_irq_save(flags
);
222 oldpid
= read_c0_entryhi();
224 write_c0_entryhi(page
| newpid
);
228 idx
= read_c0_index();
229 write_c0_entrylo0(0);
230 write_c0_entrylo1(0);
233 /* Make sure all entries differ. */
234 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
240 write_c0_entryhi(oldpid
);
243 local_irq_restore(flags
);
248 * This one is only used for pages with the global bit set so we don't care
249 * much about the ASID.
251 void local_flush_tlb_one(unsigned long page
)
256 local_irq_save(flags
);
257 oldpid
= read_c0_entryhi();
259 page
&= (PAGE_MASK
<< 1);
260 write_c0_entryhi(page
);
264 idx
= read_c0_index();
265 write_c0_entrylo0(0);
266 write_c0_entrylo1(0);
268 /* Make sure all entries differ. */
269 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
274 write_c0_entryhi(oldpid
);
277 local_irq_restore(flags
);
281 * We will need multiple versions of update_mmu_cache(), one that just
282 * updates the TLB with the new pte(s), and another which also checks
283 * for the R4k "end of page" hardware bug and does the needy.
285 void __update_tlb(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
295 * Handle debugger faulting in for debugee.
297 if (current
->active_mm
!= vma
->vm_mm
)
300 local_irq_save(flags
);
303 pid
= read_c0_entryhi() & ASID_MASK
;
304 address
&= (PAGE_MASK
<< 1);
305 write_c0_entryhi(address
| pid
);
306 pgdp
= pgd_offset(vma
->vm_mm
, address
);
310 pudp
= pud_offset(pgdp
, address
);
311 pmdp
= pmd_offset(pudp
, address
);
312 idx
= read_c0_index();
313 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
314 /* this could be a huge page */
315 if (pmd_huge(*pmdp
)) {
317 write_c0_pagemask(PM_HUGE_MASK
);
318 ptep
= (pte_t
*)pmdp
;
319 lo
= pte_to_entrylo(pte_val(*ptep
));
320 write_c0_entrylo0(lo
);
321 write_c0_entrylo1(lo
+ (HPAGE_SIZE
>> 7));
329 write_c0_pagemask(PM_DEFAULT_MASK
);
333 ptep
= pte_offset_map(pmdp
, address
);
335 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
336 write_c0_entrylo0(ptep
->pte_high
);
338 write_c0_entrylo1(ptep
->pte_high
);
340 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep
++)));
341 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep
)));
352 local_irq_restore(flags
);
355 void add_wired_entry(unsigned long entrylo0
, unsigned long entrylo1
,
356 unsigned long entryhi
, unsigned long pagemask
)
360 unsigned long old_pagemask
;
361 unsigned long old_ctx
;
363 local_irq_save(flags
);
364 /* Save old context and create impossible VPN2 value */
365 old_ctx
= read_c0_entryhi();
367 old_pagemask
= read_c0_pagemask();
368 wired
= read_c0_wired();
369 write_c0_wired(wired
+ 1);
370 write_c0_index(wired
);
371 tlbw_use_hazard(); /* What is the hazard here? */
372 write_c0_pagemask(pagemask
);
373 write_c0_entryhi(entryhi
);
374 write_c0_entrylo0(entrylo0
);
375 write_c0_entrylo1(entrylo1
);
380 write_c0_entryhi(old_ctx
);
381 tlbw_use_hazard(); /* What is the hazard here? */
383 write_c0_pagemask(old_pagemask
);
384 local_flush_tlb_all();
385 local_irq_restore(flags
);
388 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
390 int __init
has_transparent_hugepage(void)
395 local_irq_save(flags
);
396 write_c0_pagemask(PM_HUGE_MASK
);
397 back_to_back_c0_hazard();
398 mask
= read_c0_pagemask();
399 write_c0_pagemask(PM_DEFAULT_MASK
);
401 local_irq_restore(flags
);
403 return mask
== PM_HUGE_MASK
;
406 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
409 * Used for loading TLB entries before trap_init() has started, when we
410 * don't actually want to add a wired entry which remains throughout the
411 * lifetime of the system
414 int temp_tlb_entry __cpuinitdata
;
416 __init
int add_temporary_entry(unsigned long entrylo0
, unsigned long entrylo1
,
417 unsigned long entryhi
, unsigned long pagemask
)
422 unsigned long old_pagemask
;
423 unsigned long old_ctx
;
425 local_irq_save(flags
);
426 /* Save old context and create impossible VPN2 value */
428 old_ctx
= read_c0_entryhi();
429 old_pagemask
= read_c0_pagemask();
430 wired
= read_c0_wired();
431 if (--temp_tlb_entry
< wired
) {
433 "No TLB space left for add_temporary_entry\n");
438 write_c0_index(temp_tlb_entry
);
439 write_c0_pagemask(pagemask
);
440 write_c0_entryhi(entryhi
);
441 write_c0_entrylo0(entrylo0
);
442 write_c0_entrylo1(entrylo1
);
447 write_c0_entryhi(old_ctx
);
448 write_c0_pagemask(old_pagemask
);
451 local_irq_restore(flags
);
456 static int __init
set_ntlb(char *str
)
458 get_option(&str
, &ntlb
);
462 __setup("ntlb=", set_ntlb
);
465 * Configure TLB (for init or after a CPU has been powered off).
467 static void r4k_tlb_configure(void)
470 * You should never change this register:
471 * - On R4600 1.7 the tlbp never hits for pages smaller than
472 * the value in the c0_pagemask register.
473 * - The entire mm handling assumes the c0_pagemask register to
474 * be set to fixed-size pages.
476 write_c0_pagemask(PM_DEFAULT_MASK
);
478 if (current_cpu_type() == CPU_R10000
||
479 current_cpu_type() == CPU_R12000
||
480 current_cpu_type() == CPU_R14000
)
481 write_c0_framemask(0);
485 * Enable the no read, no exec bits, and enable large virtual
488 u32 pg
= PG_RIE
| PG_XIE
;
494 write_c0_pagegrain(pg
);
497 temp_tlb_entry
= current_cpu_data
.tlbsize
- 1;
499 /* From this point on the ARC firmware is dead. */
500 local_flush_tlb_all();
502 /* Did I tell you that ARC SUCKS? */
510 if (ntlb
> 1 && ntlb
<= current_cpu_data
.tlbsize
) {
511 int wired
= current_cpu_data
.tlbsize
- ntlb
;
512 write_c0_wired(wired
);
513 write_c0_index(wired
-1);
514 printk("Restricting TLB to %d entries\n", ntlb
);
516 printk("Ignoring invalid argument ntlb=%d\n", ntlb
);
519 build_tlb_refill_handler();
522 static int r4k_tlb_pm_notifier(struct notifier_block
*self
, unsigned long cmd
,
526 case CPU_PM_ENTER_FAILED
:
535 static struct notifier_block r4k_tlb_pm_notifier_block
= {
536 .notifier_call
= r4k_tlb_pm_notifier
,
539 static int __init
r4k_tlb_init_pm(void)
541 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block
);
543 arch_initcall(r4k_tlb_init_pm
);