vmalloc: walk vmap_areas by sorted list instead of rb_next()
[linux/fpc-iii.git] / arch / xtensa / mm / tlb.c
blobe2700b21395b16f2548da0fb809f0066a61d0bd7
1 /*
2 * arch/xtensa/mm/tlb.c
4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
12 * Joe Taylor
13 * Chris Zankel <chris@zankel.net>
14 * Marc Gauthier
17 #include <linux/mm.h>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/cacheflush.h>
24 static inline void __flush_itlb_all (void)
26 int w, i;
28 for (w = 0; w < ITLB_ARF_WAYS; w++) {
29 for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
30 int e = w + (i << PAGE_SHIFT);
31 invalidate_itlb_entry_no_isync(e);
34 asm volatile ("isync\n");
37 static inline void __flush_dtlb_all (void)
39 int w, i;
41 for (w = 0; w < DTLB_ARF_WAYS; w++) {
42 for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
43 int e = w + (i << PAGE_SHIFT);
44 invalidate_dtlb_entry_no_isync(e);
47 asm volatile ("isync\n");
51 void flush_tlb_all (void)
53 __flush_itlb_all();
54 __flush_dtlb_all();
57 /* If mm is current, we simply assign the current task a new ASID, thus,
58 * invalidating all previous tlb entries. If mm is someone else's user mapping,
59 * wie invalidate the context, thus, when that user mapping is swapped in,
60 * a new context will be assigned to it.
63 void flush_tlb_mm(struct mm_struct *mm)
65 if (mm == current->active_mm) {
66 int flags;
67 local_save_flags(flags);
68 __get_new_mmu_context(mm);
69 __load_mmu_context(mm);
70 local_irq_restore(flags);
72 else
73 mm->context = 0;
76 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
77 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
78 #if _ITLB_ENTRIES > _DTLB_ENTRIES
79 # define _TLB_ENTRIES _ITLB_ENTRIES
80 #else
81 # define _TLB_ENTRIES _DTLB_ENTRIES
82 #endif
84 void flush_tlb_range (struct vm_area_struct *vma,
85 unsigned long start, unsigned long end)
87 struct mm_struct *mm = vma->vm_mm;
88 unsigned long flags;
90 if (mm->context == NO_CONTEXT)
91 return;
93 #if 0
94 printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
95 (unsigned long)mm->context, start, end);
96 #endif
97 local_save_flags(flags);
99 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
100 int oldpid = get_rasid_register();
101 set_rasid_register (ASID_INSERT(mm->context));
102 start &= PAGE_MASK;
103 if (vma->vm_flags & VM_EXEC)
104 while(start < end) {
105 invalidate_itlb_mapping(start);
106 invalidate_dtlb_mapping(start);
107 start += PAGE_SIZE;
109 else
110 while(start < end) {
111 invalidate_dtlb_mapping(start);
112 start += PAGE_SIZE;
115 set_rasid_register(oldpid);
116 } else {
117 flush_tlb_mm(mm);
119 local_irq_restore(flags);
122 void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
124 struct mm_struct* mm = vma->vm_mm;
125 unsigned long flags;
126 int oldpid;
128 if(mm->context == NO_CONTEXT)
129 return;
131 local_save_flags(flags);
133 oldpid = get_rasid_register();
135 if (vma->vm_flags & VM_EXEC)
136 invalidate_itlb_mapping(page);
137 invalidate_dtlb_mapping(page);
139 set_rasid_register(oldpid);
141 local_irq_restore(flags);