vmalloc: walk vmap_areas by sorted list instead of rb_next()
[linux/fpc-iii.git] / arch / xtensa / include / asm / tlbflush.h
blob46d240074f747aa622711b35d0b130cedba6b812
1 /*
2 * include/asm-xtensa/tlbflush.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
11 #ifndef _XTENSA_TLBFLUSH_H
12 #define _XTENSA_TLBFLUSH_H
14 #ifdef __KERNEL__
16 #include <linux/stringify.h>
17 #include <asm/processor.h>
19 #define DTLB_WAY_PGD 7
21 #define ITLB_ARF_WAYS 4
22 #define DTLB_ARF_WAYS 4
24 #define ITLB_HIT_BIT 3
25 #define DTLB_HIT_BIT 4
27 #ifndef __ASSEMBLY__
29 /* TLB flushing:
31 * - flush_tlb_all() flushes all processes TLB entries
32 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
33 * - flush_tlb_page(mm, vmaddr) flushes a single page
34 * - flush_tlb_range(mm, start, end) flushes a range of pages
37 extern void flush_tlb_all(void);
38 extern void flush_tlb_mm(struct mm_struct*);
39 extern void flush_tlb_page(struct vm_area_struct*,unsigned long);
40 extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long);
42 #define flush_tlb_kernel_range(start,end) flush_tlb_all()
44 /* TLB operations. */
46 static inline unsigned long itlb_probe(unsigned long addr)
48 unsigned long tmp;
49 __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
50 return tmp;
53 static inline unsigned long dtlb_probe(unsigned long addr)
55 unsigned long tmp;
56 __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
57 return tmp;
60 static inline void invalidate_itlb_entry (unsigned long probe)
62 __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
65 static inline void invalidate_dtlb_entry (unsigned long probe)
67 __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
70 /* Use the .._no_isync functions with caution. Generally, these are
71 * handy for bulk invalidates followed by a single 'isync'. The
72 * caller must follow up with an 'isync', which can be relatively
73 * expensive on some Xtensa implementations.
75 static inline void invalidate_itlb_entry_no_isync (unsigned entry)
77 /* Caller must follow up with 'isync'. */
78 __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
81 static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
83 /* Caller must follow up with 'isync'. */
84 __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
87 static inline void set_itlbcfg_register (unsigned long val)
89 __asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t"
90 : : "a" (val));
93 static inline void set_dtlbcfg_register (unsigned long val)
95 __asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t"
96 : : "a" (val));
99 static inline void set_ptevaddr_register (unsigned long val)
101 __asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n"
102 : : "a" (val));
105 static inline unsigned long read_ptevaddr_register (void)
107 unsigned long tmp;
108 __asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp));
109 return tmp;
112 static inline void write_dtlb_entry (pte_t entry, int way)
114 __asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
115 : : "r" (way), "r" (entry) );
118 static inline void write_itlb_entry (pte_t entry, int way)
120 __asm__ __volatile__("witlb %1, %0; isync\n\t"
121 : : "r" (way), "r" (entry) );
124 static inline void invalidate_page_directory (void)
126 invalidate_dtlb_entry (DTLB_WAY_PGD);
127 invalidate_dtlb_entry (DTLB_WAY_PGD+1);
128 invalidate_dtlb_entry (DTLB_WAY_PGD+2);
131 static inline void invalidate_itlb_mapping (unsigned address)
133 unsigned long tlb_entry;
134 if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
135 invalidate_itlb_entry(tlb_entry);
138 static inline void invalidate_dtlb_mapping (unsigned address)
140 unsigned long tlb_entry;
141 if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
142 invalidate_dtlb_entry(tlb_entry);
145 #define check_pgt_cache() do { } while (0)
149 * DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
150 * ISA and exist only for test purposes..
151 * You may find it helpful for MMU debugging, however.
153 * 'at' is the unmodified input register
154 * 'as' is the output register, as follows (specific to the Linux config):
156 * as[31..12] contain the virtual address
157 * as[11..08] are meaningless
158 * as[07..00] contain the asid
161 static inline unsigned long read_dtlb_virtual (int way)
163 unsigned long tmp;
164 __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
165 return tmp;
168 static inline unsigned long read_dtlb_translation (int way)
170 unsigned long tmp;
171 __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
172 return tmp;
175 static inline unsigned long read_itlb_virtual (int way)
177 unsigned long tmp;
178 __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
179 return tmp;
182 static inline unsigned long read_itlb_translation (int way)
184 unsigned long tmp;
185 __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
186 return tmp;
189 #endif /* __ASSEMBLY__ */
190 #endif /* __KERNEL__ */
191 #endif /* _XTENSA_TLBFLUSH_H */