2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/cpu_pm.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/hugetlb.h>
17 #include <linux/module.h>
20 #include <asm/cpu-type.h>
21 #include <asm/bootinfo.h>
22 #include <asm/mmu_context.h>
23 #include <asm/pgtable.h>
25 #include <asm/tlbmisc.h>
27 extern void build_tlb_refill_handler(void);
30 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
31 * unfortunately, itlb is not totally transparent to software.
33 static inline void flush_itlb(void)
35 switch (current_cpu_type()) {
45 static inline void flush_itlb_vm(struct vm_area_struct
*vma
)
47 if (vma
->vm_flags
& VM_EXEC
)
51 void local_flush_tlb_all(void)
54 unsigned long old_ctx
;
55 int entry
, ftlbhighset
;
57 local_irq_save(flags
);
58 /* Save old context and create impossible VPN2 value */
59 old_ctx
= read_c0_entryhi();
64 entry
= read_c0_wired();
66 /* Blast 'em all away. */
68 if (current_cpu_data
.tlbsizevtlb
) {
71 tlbinvf(); /* invalidate VTLB */
73 ftlbhighset
= current_cpu_data
.tlbsizevtlb
+
74 current_cpu_data
.tlbsizeftlbsets
;
75 for (entry
= current_cpu_data
.tlbsizevtlb
;
78 write_c0_index(entry
);
80 tlbinvf(); /* invalidate one FTLB set */
83 while (entry
< current_cpu_data
.tlbsize
) {
84 /* Make sure all entries differ. */
85 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
86 write_c0_index(entry
);
93 write_c0_entryhi(old_ctx
);
96 local_irq_restore(flags
);
98 EXPORT_SYMBOL(local_flush_tlb_all
);
100 /* All entries common to a mm share an asid. To effectively flush
101 these entries, we just bump the asid. */
102 void local_flush_tlb_mm(struct mm_struct
*mm
)
108 cpu
= smp_processor_id();
110 if (cpu_context(cpu
, mm
) != 0) {
111 drop_mmu_context(mm
, cpu
);
117 void local_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
120 struct mm_struct
*mm
= vma
->vm_mm
;
121 int cpu
= smp_processor_id();
123 if (cpu_context(cpu
, mm
) != 0) {
124 unsigned long size
, flags
;
126 local_irq_save(flags
);
127 start
= round_down(start
, PAGE_SIZE
<< 1);
128 end
= round_up(end
, PAGE_SIZE
<< 1);
129 size
= (end
- start
) >> (PAGE_SHIFT
+ 1);
130 if (size
<= (current_cpu_data
.tlbsizeftlbsets
?
131 current_cpu_data
.tlbsize
/ 8 :
132 current_cpu_data
.tlbsize
/ 2)) {
133 int oldpid
= read_c0_entryhi();
134 int newpid
= cpu_asid(cpu
, mm
);
137 while (start
< end
) {
140 write_c0_entryhi(start
| newpid
);
141 start
+= (PAGE_SIZE
<< 1);
145 idx
= read_c0_index();
146 write_c0_entrylo0(0);
147 write_c0_entrylo1(0);
150 /* Make sure all entries differ. */
151 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
156 write_c0_entryhi(oldpid
);
159 drop_mmu_context(mm
, cpu
);
162 local_irq_restore(flags
);
166 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
168 unsigned long size
, flags
;
170 local_irq_save(flags
);
171 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
172 size
= (size
+ 1) >> 1;
173 if (size
<= (current_cpu_data
.tlbsizeftlbsets
?
174 current_cpu_data
.tlbsize
/ 8 :
175 current_cpu_data
.tlbsize
/ 2)) {
176 int pid
= read_c0_entryhi();
178 start
&= (PAGE_MASK
<< 1);
179 end
+= ((PAGE_SIZE
<< 1) - 1);
180 end
&= (PAGE_MASK
<< 1);
183 while (start
< end
) {
186 write_c0_entryhi(start
);
187 start
+= (PAGE_SIZE
<< 1);
191 idx
= read_c0_index();
192 write_c0_entrylo0(0);
193 write_c0_entrylo1(0);
196 /* Make sure all entries differ. */
197 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
202 write_c0_entryhi(pid
);
205 local_flush_tlb_all();
208 local_irq_restore(flags
);
211 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
213 int cpu
= smp_processor_id();
215 if (cpu_context(cpu
, vma
->vm_mm
) != 0) {
217 int oldpid
, newpid
, idx
;
219 newpid
= cpu_asid(cpu
, vma
->vm_mm
);
220 page
&= (PAGE_MASK
<< 1);
221 local_irq_save(flags
);
222 oldpid
= read_c0_entryhi();
224 write_c0_entryhi(page
| newpid
);
228 idx
= read_c0_index();
229 write_c0_entrylo0(0);
230 write_c0_entrylo1(0);
233 /* Make sure all entries differ. */
234 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
240 write_c0_entryhi(oldpid
);
243 local_irq_restore(flags
);
248 * This one is only used for pages with the global bit set so we don't care
249 * much about the ASID.
251 void local_flush_tlb_one(unsigned long page
)
256 local_irq_save(flags
);
257 oldpid
= read_c0_entryhi();
259 page
&= (PAGE_MASK
<< 1);
260 write_c0_entryhi(page
);
264 idx
= read_c0_index();
265 write_c0_entrylo0(0);
266 write_c0_entrylo1(0);
268 /* Make sure all entries differ. */
269 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
274 write_c0_entryhi(oldpid
);
277 local_irq_restore(flags
);
281 * We will need multiple versions of update_mmu_cache(), one that just
282 * updates the TLB with the new pte(s), and another which also checks
283 * for the R4k "end of page" hardware bug and does the needy.
285 void __update_tlb(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
295 * Handle debugger faulting in for debugee.
297 if (current
->active_mm
!= vma
->vm_mm
)
300 local_irq_save(flags
);
303 pid
= read_c0_entryhi() & ASID_MASK
;
304 address
&= (PAGE_MASK
<< 1);
305 write_c0_entryhi(address
| pid
);
306 pgdp
= pgd_offset(vma
->vm_mm
, address
);
310 pudp
= pud_offset(pgdp
, address
);
311 pmdp
= pmd_offset(pudp
, address
);
312 idx
= read_c0_index();
313 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
314 /* this could be a huge page */
315 if (pmd_huge(*pmdp
)) {
317 write_c0_pagemask(PM_HUGE_MASK
);
318 ptep
= (pte_t
*)pmdp
;
319 lo
= pte_to_entrylo(pte_val(*ptep
));
320 write_c0_entrylo0(lo
);
321 write_c0_entrylo1(lo
+ (HPAGE_SIZE
>> 7));
329 write_c0_pagemask(PM_DEFAULT_MASK
);
333 ptep
= pte_offset_map(pmdp
, address
);
335 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
337 write_c0_entrylo0(pte_to_entrylo(ptep
->pte_high
));
338 writex_c0_entrylo0(ptep
->pte_low
& _PFNX_MASK
);
340 write_c0_entrylo1(pte_to_entrylo(ptep
->pte_high
));
341 writex_c0_entrylo1(ptep
->pte_low
& _PFNX_MASK
);
343 write_c0_entrylo0(ptep
->pte_high
);
345 write_c0_entrylo1(ptep
->pte_high
);
348 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep
++)));
349 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep
)));
360 local_irq_restore(flags
);
363 void add_wired_entry(unsigned long entrylo0
, unsigned long entrylo1
,
364 unsigned long entryhi
, unsigned long pagemask
)
367 panic("Broken for XPA kernels");
371 unsigned long old_pagemask
;
372 unsigned long old_ctx
;
374 local_irq_save(flags
);
375 /* Save old context and create impossible VPN2 value */
376 old_ctx
= read_c0_entryhi();
378 old_pagemask
= read_c0_pagemask();
379 wired
= read_c0_wired();
380 write_c0_wired(wired
+ 1);
381 write_c0_index(wired
);
382 tlbw_use_hazard(); /* What is the hazard here? */
383 write_c0_pagemask(pagemask
);
384 write_c0_entryhi(entryhi
);
385 write_c0_entrylo0(entrylo0
);
386 write_c0_entrylo1(entrylo1
);
391 write_c0_entryhi(old_ctx
);
392 tlbw_use_hazard(); /* What is the hazard here? */
394 write_c0_pagemask(old_pagemask
);
395 local_flush_tlb_all();
396 local_irq_restore(flags
);
400 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
402 int __init
has_transparent_hugepage(void)
407 local_irq_save(flags
);
408 write_c0_pagemask(PM_HUGE_MASK
);
409 back_to_back_c0_hazard();
410 mask
= read_c0_pagemask();
411 write_c0_pagemask(PM_DEFAULT_MASK
);
413 local_irq_restore(flags
);
415 return mask
== PM_HUGE_MASK
;
418 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
421 * Used for loading TLB entries before trap_init() has started, when we
422 * don't actually want to add a wired entry which remains throughout the
423 * lifetime of the system
428 __init
int add_temporary_entry(unsigned long entrylo0
, unsigned long entrylo1
,
429 unsigned long entryhi
, unsigned long pagemask
)
434 unsigned long old_pagemask
;
435 unsigned long old_ctx
;
437 local_irq_save(flags
);
438 /* Save old context and create impossible VPN2 value */
440 old_ctx
= read_c0_entryhi();
441 old_pagemask
= read_c0_pagemask();
442 wired
= read_c0_wired();
443 if (--temp_tlb_entry
< wired
) {
445 "No TLB space left for add_temporary_entry\n");
450 write_c0_index(temp_tlb_entry
);
451 write_c0_pagemask(pagemask
);
452 write_c0_entryhi(entryhi
);
453 write_c0_entrylo0(entrylo0
);
454 write_c0_entrylo1(entrylo1
);
459 write_c0_entryhi(old_ctx
);
460 write_c0_pagemask(old_pagemask
);
463 local_irq_restore(flags
);
468 static int __init
set_ntlb(char *str
)
470 get_option(&str
, &ntlb
);
474 __setup("ntlb=", set_ntlb
);
477 * Configure TLB (for init or after a CPU has been powered off).
479 static void r4k_tlb_configure(void)
482 * You should never change this register:
483 * - On R4600 1.7 the tlbp never hits for pages smaller than
484 * the value in the c0_pagemask register.
485 * - The entire mm handling assumes the c0_pagemask register to
486 * be set to fixed-size pages.
488 write_c0_pagemask(PM_DEFAULT_MASK
);
490 if (current_cpu_type() == CPU_R10000
||
491 current_cpu_type() == CPU_R12000
||
492 current_cpu_type() == CPU_R14000
||
493 current_cpu_type() == CPU_R16000
)
494 write_c0_framemask(0);
498 * Enable the no read, no exec bits, and enable large physical
502 set_c0_pagegrain(PG_RIE
| PG_XIE
| PG_ELPA
);
504 set_c0_pagegrain(PG_RIE
| PG_XIE
);
508 temp_tlb_entry
= current_cpu_data
.tlbsize
- 1;
510 /* From this point on the ARC firmware is dead. */
511 local_flush_tlb_all();
513 /* Did I tell you that ARC SUCKS? */
521 if (ntlb
> 1 && ntlb
<= current_cpu_data
.tlbsize
) {
522 int wired
= current_cpu_data
.tlbsize
- ntlb
;
523 write_c0_wired(wired
);
524 write_c0_index(wired
-1);
525 printk("Restricting TLB to %d entries\n", ntlb
);
527 printk("Ignoring invalid argument ntlb=%d\n", ntlb
);
530 build_tlb_refill_handler();
533 static int r4k_tlb_pm_notifier(struct notifier_block
*self
, unsigned long cmd
,
537 case CPU_PM_ENTER_FAILED
:
546 static struct notifier_block r4k_tlb_pm_notifier_block
= {
547 .notifier_call
= r4k_tlb_pm_notifier
,
550 static int __init
r4k_tlb_init_pm(void)
552 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block
);
554 arch_initcall(r4k_tlb_init_pm
);