PRCM: 34XX: Fix wrong shift value used in dpll4_m4x2_ck enable bit
[linux-ginger.git] / arch / avr32 / mm / tlb.c
blobcd12edbea9f26c403bdc5a3c85f034e76766fc9a
1 /*
2 * AVR32 TLB operations
4 * Copyright (C) 2004-2006 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
14 #define _TLBEHI_I 0x100
16 void show_dtlb_entry(unsigned int index)
18 unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save;
19 unsigned long flags;
21 local_irq_save(flags);
22 mmucr_save = sysreg_read(MMUCR);
23 tlbehi_save = sysreg_read(TLBEHI);
24 mmucr = mmucr_save & 0x13;
25 mmucr |= index << 14;
26 sysreg_write(MMUCR, mmucr);
28 asm volatile("tlbr" : : : "memory");
29 cpu_sync_pipeline();
31 tlbehi = sysreg_read(TLBEHI);
32 tlbelo = sysreg_read(TLBELO);
34 printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
35 index,
36 (tlbehi & 0x200)?'1':'0',
37 (tlbelo & 0x100)?'1':'0',
38 (tlbehi & 0xff),
39 (tlbehi >> 12), (tlbelo >> 12),
40 (tlbelo >> 4) & 7, (tlbelo >> 2) & 3,
41 (tlbelo & 0x200)?'1':'0',
42 (tlbelo & 0x080)?'1':'0',
43 (tlbelo & 0x001)?'1':'0',
44 (tlbelo & 0x002)?'1':'0');
46 sysreg_write(MMUCR, mmucr_save);
47 sysreg_write(TLBEHI, tlbehi_save);
48 cpu_sync_pipeline();
49 local_irq_restore(flags);
52 void dump_dtlb(void)
54 unsigned int i;
56 printk("ID V G ASID VPN PFN AP SZ C B W D\n");
57 for (i = 0; i < 32; i++)
58 show_dtlb_entry(i);
61 static unsigned long last_mmucr;
63 static inline void set_replacement_pointer(unsigned shift)
65 unsigned long mmucr, mmucr_save;
67 mmucr = mmucr_save = sysreg_read(MMUCR);
69 /* Does this mapping already exist? */
70 __asm__ __volatile__(
71 " tlbs\n"
72 " mfsr %0, %1"
73 : "=r"(mmucr)
74 : "i"(SYSREG_MMUCR));
76 if (mmucr & SYSREG_BIT(MMUCR_N)) {
77 /* Not found -- pick a not-recently-accessed entry */
78 unsigned long rp;
79 unsigned long tlbar = sysreg_read(TLBARLO);
81 rp = 32 - fls(tlbar);
82 if (rp == 32) {
83 rp = 0;
84 sysreg_write(TLBARLO, -1L);
87 mmucr &= 0x13;
88 mmucr |= (rp << shift);
90 sysreg_write(MMUCR, mmucr);
93 last_mmucr = mmucr;
96 static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid)
98 unsigned long vpn;
100 vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid;
101 sysreg_write(TLBEHI, vpn);
102 cpu_sync_pipeline();
104 set_replacement_pointer(14);
106 sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);
108 /* Let's go */
109 asm volatile("nop\n\ttlbw" : : : "memory");
110 cpu_sync_pipeline();
113 void update_mmu_cache(struct vm_area_struct *vma,
114 unsigned long address, pte_t pte)
116 unsigned long flags;
118 /* ptrace may call this routine */
119 if (vma && current->active_mm != vma->vm_mm)
120 return;
122 local_irq_save(flags);
123 update_dtlb(address, pte, get_asid());
124 local_irq_restore(flags);
127 void __flush_tlb_page(unsigned long asid, unsigned long page)
129 unsigned long mmucr, tlbehi;
131 page |= asid;
132 sysreg_write(TLBEHI, page);
133 cpu_sync_pipeline();
134 asm volatile("tlbs");
135 mmucr = sysreg_read(MMUCR);
137 if (!(mmucr & SYSREG_BIT(MMUCR_N))) {
138 unsigned long tlbarlo;
139 unsigned long entry;
141 /* Clear the "valid" bit */
142 tlbehi = sysreg_read(TLBEHI);
143 tlbehi &= ~_TLBEHI_VALID;
144 sysreg_write(TLBEHI, tlbehi);
145 cpu_sync_pipeline();
147 /* mark the entry as "not accessed" */
148 entry = (mmucr >> 14) & 0x3f;
149 tlbarlo = sysreg_read(TLBARLO);
150 tlbarlo |= (0x80000000 >> entry);
151 sysreg_write(TLBARLO, tlbarlo);
153 /* update the entry with valid bit clear */
154 asm volatile("tlbw");
155 cpu_sync_pipeline();
159 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
161 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
162 unsigned long flags, asid;
163 unsigned long saved_asid = MMU_NO_ASID;
165 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
166 page &= PAGE_MASK;
168 local_irq_save(flags);
169 if (vma->vm_mm != current->mm) {
170 saved_asid = get_asid();
171 set_asid(asid);
174 __flush_tlb_page(asid, page);
176 if (saved_asid != MMU_NO_ASID)
177 set_asid(saved_asid);
178 local_irq_restore(flags);
182 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
183 unsigned long end)
185 struct mm_struct *mm = vma->vm_mm;
187 if (mm->context != NO_CONTEXT) {
188 unsigned long flags;
189 int size;
191 local_irq_save(flags);
192 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
193 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
194 mm->context = NO_CONTEXT;
195 if (mm == current->mm)
196 activate_context(mm);
197 } else {
198 unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK;
199 unsigned long saved_asid = MMU_NO_ASID;
201 start &= PAGE_MASK;
202 end += (PAGE_SIZE - 1);
203 end &= PAGE_MASK;
204 if (mm != current->mm) {
205 saved_asid = get_asid();
206 set_asid(asid);
209 while (start < end) {
210 __flush_tlb_page(asid, start);
211 start += PAGE_SIZE;
213 if (saved_asid != MMU_NO_ASID)
214 set_asid(saved_asid);
216 local_irq_restore(flags);
221 * TODO: If this is only called for addresses > TASK_SIZE, we can probably
222 * skip the ASID stuff and just use the Global bit...
224 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
226 unsigned long flags;
227 int size;
229 local_irq_save(flags);
230 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
231 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
232 flush_tlb_all();
233 } else {
234 unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK;
235 unsigned long saved_asid = get_asid();
237 start &= PAGE_MASK;
238 end += (PAGE_SIZE - 1);
239 end &= PAGE_MASK;
240 set_asid(asid);
241 while (start < end) {
242 __flush_tlb_page(asid, start);
243 start += PAGE_SIZE;
245 set_asid(saved_asid);
247 local_irq_restore(flags);
250 void flush_tlb_mm(struct mm_struct *mm)
252 /* Invalidate all TLB entries of this process by getting a new ASID */
253 if (mm->context != NO_CONTEXT) {
254 unsigned long flags;
256 local_irq_save(flags);
257 mm->context = NO_CONTEXT;
258 if (mm == current->mm)
259 activate_context(mm);
260 local_irq_restore(flags);
264 void flush_tlb_all(void)
266 unsigned long flags;
268 local_irq_save(flags);
269 sysreg_write(MMUCR, sysreg_read(MMUCR) | SYSREG_BIT(MMUCR_I));
270 local_irq_restore(flags);
273 #ifdef CONFIG_PROC_FS
275 #include <linux/seq_file.h>
276 #include <linux/proc_fs.h>
277 #include <linux/init.h>
279 static void *tlb_start(struct seq_file *tlb, loff_t *pos)
281 static unsigned long tlb_index;
283 if (*pos >= 32)
284 return NULL;
286 tlb_index = 0;
287 return &tlb_index;
290 static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos)
292 unsigned long *index = v;
294 if (*index >= 31)
295 return NULL;
297 ++*pos;
298 ++*index;
299 return index;
302 static void tlb_stop(struct seq_file *tlb, void *v)
307 static int tlb_show(struct seq_file *tlb, void *v)
309 unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save;
310 unsigned long flags;
311 unsigned long *index = v;
313 if (*index == 0)
314 seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n");
316 BUG_ON(*index >= 32);
318 local_irq_save(flags);
319 mmucr_save = sysreg_read(MMUCR);
320 tlbehi_save = sysreg_read(TLBEHI);
321 mmucr = mmucr_save & 0x13;
322 mmucr |= *index << 14;
323 sysreg_write(MMUCR, mmucr);
325 asm volatile("tlbr" : : : "memory");
326 cpu_sync_pipeline();
328 tlbehi = sysreg_read(TLBEHI);
329 tlbelo = sysreg_read(TLBELO);
331 sysreg_write(MMUCR, mmucr_save);
332 sysreg_write(TLBEHI, tlbehi_save);
333 cpu_sync_pipeline();
334 local_irq_restore(flags);
336 seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
337 *index,
338 (tlbehi & 0x200)?'1':'0',
339 (tlbelo & 0x100)?'1':'0',
340 (tlbehi & 0xff),
341 (tlbehi >> 12), (tlbelo >> 12),
342 (tlbelo >> 4) & 7, (tlbelo >> 2) & 3,
343 (tlbelo & 0x200)?'1':'0',
344 (tlbelo & 0x080)?'1':'0',
345 (tlbelo & 0x001)?'1':'0',
346 (tlbelo & 0x002)?'1':'0');
348 return 0;
351 static const struct seq_operations tlb_ops = {
352 .start = tlb_start,
353 .next = tlb_next,
354 .stop = tlb_stop,
355 .show = tlb_show,
358 static int tlb_open(struct inode *inode, struct file *file)
360 return seq_open(file, &tlb_ops);
363 static const struct file_operations proc_tlb_operations = {
364 .open = tlb_open,
365 .read = seq_read,
366 .llseek = seq_lseek,
367 .release = seq_release,
370 static int __init proctlb_init(void)
372 proc_create("tlb", 0, NULL, &proc_tlb_operations);
373 return 0;
375 late_initcall(proctlb_init);
376 #endif /* CONFIG_PROC_FS */