2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1997, 1998, 1999 Ralf Baechle (ralf@gnu.org)
7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com)
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
15 #include <asm/pgtable.h>
16 #include <asm/system.h>
17 #include <asm/mmu_context.h>
19 extern void build_tlb_refill_handler(void);
21 #define NTLB_ENTRIES 64
22 #define NTLB_ENTRIES_HALF 32
24 void local_flush_tlb_all(void)
27 unsigned long old_ctx
;
30 local_irq_save(flags
);
31 /* Save old context and create impossible VPN2 value */
32 old_ctx
= read_c0_entryhi() & ASID_MASK
;
33 write_c0_entryhi(CKSEG0
);
37 entry
= read_c0_wired();
39 /* Blast 'em all away. */
40 while (entry
< NTLB_ENTRIES
) {
41 write_c0_index(entry
);
45 write_c0_entryhi(old_ctx
);
46 local_irq_restore(flags
);
49 void local_flush_tlb_mm(struct mm_struct
*mm
)
51 int cpu
= smp_processor_id();
52 if (cpu_context(cpu
, mm
) != 0) {
53 drop_mmu_context(mm
,cpu
);
57 void local_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
60 struct mm_struct
*mm
= vma
->vm_mm
;
61 int cpu
= smp_processor_id();
63 if (cpu_context(cpu
, mm
) != 0) {
67 local_irq_save(flags
);
68 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
69 size
= (size
+ 1) >> 1;
70 if (size
<= NTLB_ENTRIES_HALF
) {
71 int oldpid
= (read_c0_entryhi() & ASID_MASK
);
72 int newpid
= (cpu_context(smp_processor_id(), mm
)
75 start
&= (PAGE_MASK
<< 1);
76 end
+= ((PAGE_SIZE
<< 1) - 1);
77 end
&= (PAGE_MASK
<< 1);
81 write_c0_entryhi(start
| newpid
);
82 start
+= (PAGE_SIZE
<< 1);
84 idx
= read_c0_index();
87 write_c0_entryhi(CKSEG0
);
92 write_c0_entryhi(oldpid
);
94 drop_mmu_context(mm
, cpu
);
96 local_irq_restore(flags
);
100 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
105 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
106 size
= (size
+ 1) >> 1;
108 local_irq_save(flags
);
109 if (size
<= NTLB_ENTRIES_HALF
) {
110 int pid
= read_c0_entryhi();
112 start
&= (PAGE_MASK
<< 1);
113 end
+= ((PAGE_SIZE
<< 1) - 1);
114 end
&= (PAGE_MASK
<< 1);
116 while (start
< end
) {
119 write_c0_entryhi(start
);
120 start
+= (PAGE_SIZE
<< 1);
122 idx
= read_c0_index();
123 write_c0_entrylo0(0);
124 write_c0_entrylo1(0);
125 write_c0_entryhi(CKSEG0
+ (idx
<< (PAGE_SHIFT
+1)));
130 write_c0_entryhi(pid
);
132 local_flush_tlb_all();
134 local_irq_restore(flags
);
137 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
139 if (cpu_context(smp_processor_id(), vma
->vm_mm
) != 0) {
141 int oldpid
, newpid
, idx
;
143 newpid
= (cpu_context(smp_processor_id(), vma
->vm_mm
) &
145 page
&= (PAGE_MASK
<< 1);
146 local_irq_save(flags
);
147 oldpid
= (read_c0_entryhi() & ASID_MASK
);
148 write_c0_entryhi(page
| newpid
);
150 idx
= read_c0_index();
151 write_c0_entrylo0(0);
152 write_c0_entrylo1(0);
153 write_c0_entryhi(CKSEG0
);
159 write_c0_entryhi(oldpid
);
160 local_irq_restore(flags
);
165 * This one is only used for pages with the global bit set so we don't care
166 * much about the ASID.
168 void local_flush_tlb_one(unsigned long page
)
173 local_irq_save(flags
);
174 page
&= (PAGE_MASK
<< 1);
175 oldpid
= read_c0_entryhi() & 0xff;
176 write_c0_entryhi(page
);
178 idx
= read_c0_index();
179 write_c0_entrylo0(0);
180 write_c0_entrylo1(0);
182 /* Make sure all entries differ. */
183 write_c0_entryhi(CKSEG0
+(idx
<<(PAGE_SHIFT
+1)));
186 write_c0_entryhi(oldpid
);
188 local_irq_restore(flags
);
191 /* XXX Simplify this. On the R10000 writing a TLB entry for an virtual
192 address that already exists will overwrite the old entry and not result
193 in TLB malfunction or TLB shutdown. */
194 void __update_tlb(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
203 * Handle debugger faulting in for debugee.
205 if (current
->active_mm
!= vma
->vm_mm
)
208 pid
= read_c0_entryhi() & ASID_MASK
;
210 if ((pid
!= (cpu_context(smp_processor_id(), vma
->vm_mm
) & ASID_MASK
))
211 || (cpu_context(smp_processor_id(), vma
->vm_mm
) == 0)) {
213 "%s: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n",
214 __FUNCTION__
, (int) (cpu_context(smp_processor_id(),
215 vma
->vm_mm
) & ASID_MASK
), pid
);
218 local_irq_save(flags
);
219 address
&= (PAGE_MASK
<< 1);
220 write_c0_entryhi(address
| (pid
));
221 pgdp
= pgd_offset(vma
->vm_mm
, address
);
223 pmdp
= pmd_offset(pgdp
, address
);
224 idx
= read_c0_index();
225 ptep
= pte_offset_map(pmdp
, address
);
226 write_c0_entrylo0(pte_val(*ptep
++) >> 6);
227 write_c0_entrylo1(pte_val(*ptep
) >> 6);
228 write_c0_entryhi(address
| pid
);
234 write_c0_entryhi(pid
);
235 local_irq_restore(flags
);
238 void __init
tlb_init(void)
241 * You should never change this register:
242 * - On R4600 1.7 the tlbp never hits for pages smaller than
243 * the value in the c0_pagemask register.
244 * - The entire mm handling assumes the c0_pagemask register to
245 * be set for 4kb pages.
247 write_c0_pagemask(PM_4K
);
249 write_c0_framemask(0);
251 /* From this point on the ARC firmware is dead. */
252 local_flush_tlb_all();
254 /* Did I tell you that ARC SUCKS? */
256 build_tlb_refill_handler();