Linux 3.17-rc2
[linux/fpc-iii.git] / arch / mips / mm / tlb-r8k.c
blob138a2ec7cc6b7785069f64401c39c9b569ff497e
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <linux/mm.h>
15 #include <asm/cpu.h>
16 #include <asm/bootinfo.h>
17 #include <asm/mmu_context.h>
18 #include <asm/pgtable.h>
20 extern void build_tlb_refill_handler(void);
22 #define TFP_TLB_SIZE 384
23 #define TFP_TLB_SET_SHIFT 7
25 /* CP0 hazard avoidance. */
26 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
27 "nop; nop; nop; nop; nop; nop;\n\t" \
28 ".set reorder\n\t")
30 void local_flush_tlb_all(void)
32 unsigned long flags;
33 unsigned long old_ctx;
34 int entry;
36 local_irq_save(flags);
37 /* Save old context and create impossible VPN2 value */
38 old_ctx = read_c0_entryhi();
39 write_c0_entrylo(0);
41 for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
42 write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
43 write_c0_vaddr(entry << PAGE_SHIFT);
44 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
45 mtc0_tlbw_hazard();
46 tlb_write();
48 tlbw_use_hazard();
49 write_c0_entryhi(old_ctx);
50 local_irq_restore(flags);
53 void local_flush_tlb_mm(struct mm_struct *mm)
55 int cpu = smp_processor_id();
57 if (cpu_context(cpu, mm) != 0)
58 drop_mmu_context(mm, cpu);
61 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
62 unsigned long end)
64 struct mm_struct *mm = vma->vm_mm;
65 int cpu = smp_processor_id();
66 unsigned long flags;
67 int oldpid, newpid, size;
69 if (!cpu_context(cpu, mm))
70 return;
72 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
73 size = (size + 1) >> 1;
75 local_irq_save(flags);
77 if (size > TFP_TLB_SIZE / 2) {
78 drop_mmu_context(mm, cpu);
79 goto out_restore;
82 oldpid = read_c0_entryhi();
83 newpid = cpu_asid(cpu, mm);
85 write_c0_entrylo(0);
87 start &= PAGE_MASK;
88 end += (PAGE_SIZE - 1);
89 end &= PAGE_MASK;
90 while (start < end) {
91 signed long idx;
93 write_c0_vaddr(start);
94 write_c0_entryhi(start);
95 start += PAGE_SIZE;
96 tlb_probe();
97 idx = read_c0_tlbset();
98 if (idx < 0)
99 continue;
101 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
102 tlb_write();
104 write_c0_entryhi(oldpid);
106 out_restore:
107 local_irq_restore(flags);
110 /* Usable for KV1 addresses only! */
111 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
113 unsigned long size, flags;
115 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
116 size = (size + 1) >> 1;
118 if (size > TFP_TLB_SIZE / 2) {
119 local_flush_tlb_all();
120 return;
123 local_irq_save(flags);
125 write_c0_entrylo(0);
127 start &= PAGE_MASK;
128 end += (PAGE_SIZE - 1);
129 end &= PAGE_MASK;
130 while (start < end) {
131 signed long idx;
133 write_c0_vaddr(start);
134 write_c0_entryhi(start);
135 start += PAGE_SIZE;
136 tlb_probe();
137 idx = read_c0_tlbset();
138 if (idx < 0)
139 continue;
141 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
142 tlb_write();
145 local_irq_restore(flags);
148 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
150 int cpu = smp_processor_id();
151 unsigned long flags;
152 int oldpid, newpid;
153 signed long idx;
155 if (!cpu_context(cpu, vma->vm_mm))
156 return;
158 newpid = cpu_asid(cpu, vma->vm_mm);
159 page &= PAGE_MASK;
160 local_irq_save(flags);
161 oldpid = read_c0_entryhi();
162 write_c0_vaddr(page);
163 write_c0_entryhi(newpid);
164 tlb_probe();
165 idx = read_c0_tlbset();
166 if (idx < 0)
167 goto finish;
169 write_c0_entrylo(0);
170 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
171 tlb_write();
173 finish:
174 write_c0_entryhi(oldpid);
175 local_irq_restore(flags);
179 * We will need multiple versions of update_mmu_cache(), one that just
180 * updates the TLB with the new pte(s), and another which also checks
181 * for the R4k "end of page" hardware bug and does the needy.
183 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
185 unsigned long flags;
186 pgd_t *pgdp;
187 pmd_t *pmdp;
188 pte_t *ptep;
189 int pid;
192 * Handle debugger faulting in for debugee.
194 if (current->active_mm != vma->vm_mm)
195 return;
197 pid = read_c0_entryhi() & ASID_MASK;
199 local_irq_save(flags);
200 address &= PAGE_MASK;
201 write_c0_vaddr(address);
202 write_c0_entryhi(pid);
203 pgdp = pgd_offset(vma->vm_mm, address);
204 pmdp = pmd_offset(pgdp, address);
205 ptep = pte_offset_map(pmdp, address);
206 tlb_probe();
208 write_c0_entrylo(pte_val(*ptep++) >> 6);
209 tlb_write();
211 write_c0_entryhi(pid);
212 local_irq_restore(flags);
215 static void probe_tlb(unsigned long config)
217 struct cpuinfo_mips *c = &current_cpu_data;
219 c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
222 void tlb_init(void)
224 unsigned int config = read_c0_config();
225 unsigned long status;
227 probe_tlb(config);
229 status = read_c0_status();
230 status &= ~(ST0_UPS | ST0_KPS);
231 #ifdef CONFIG_PAGE_SIZE_4KB
232 status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36);
233 #elif defined(CONFIG_PAGE_SIZE_8KB)
234 status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36);
235 #elif defined(CONFIG_PAGE_SIZE_16KB)
236 status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36);
237 #elif defined(CONFIG_PAGE_SIZE_64KB)
238 status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36);
239 #endif
240 write_c0_status(status);
242 write_c0_wired(0);
244 local_flush_tlb_all();
246 build_tlb_refill_handler();