vmalloc: walk vmap_areas by sorted list instead of rb_next()
[linux/fpc-iii.git] / arch / xtensa / include / asm / cacheflush.h
blob376cd9d5f45529c48078b7a84e9c7618ad865060
1 /*
2 * include/asm-xtensa/cacheflush.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * (C) 2001 - 2007 Tensilica Inc.
9 */
11 #ifndef _XTENSA_CACHEFLUSH_H
12 #define _XTENSA_CACHEFLUSH_H
14 #ifdef __KERNEL__
16 #include <linux/mm.h>
17 #include <asm/processor.h>
18 #include <asm/page.h>
21 * Lo-level routines for cache flushing.
23 * invalidate data or instruction cache:
25 * __invalidate_icache_all()
26 * __invalidate_icache_page(adr)
27 * __invalidate_dcache_page(adr)
28 * __invalidate_icache_range(from,size)
29 * __invalidate_dcache_range(from,size)
31 * flush data cache:
33 * __flush_dcache_page(adr)
35 * flush and invalidate data cache:
37 * __flush_invalidate_dcache_all()
38 * __flush_invalidate_dcache_page(adr)
39 * __flush_invalidate_dcache_range(from,size)
41 * specials for cache aliasing:
43 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
44 * __invalidate_icache_page_alias(vaddr,paddr)
47 extern void __invalidate_dcache_all(void);
48 extern void __invalidate_icache_all(void);
49 extern void __invalidate_dcache_page(unsigned long);
50 extern void __invalidate_icache_page(unsigned long);
51 extern void __invalidate_icache_range(unsigned long, unsigned long);
52 extern void __invalidate_dcache_range(unsigned long, unsigned long);
55 #if XCHAL_DCACHE_IS_WRITEBACK
56 extern void __flush_invalidate_dcache_all(void);
57 extern void __flush_dcache_page(unsigned long);
58 extern void __flush_dcache_range(unsigned long, unsigned long);
59 extern void __flush_invalidate_dcache_page(unsigned long);
60 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
61 #else
62 # define __flush_dcache_range(p,s) do { } while(0)
63 # define __flush_dcache_page(p) do { } while(0)
64 # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
65 # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
66 #endif
68 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
69 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
70 #else
71 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
72 unsigned long phys) { }
73 #endif
74 #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
75 extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
76 #else
77 static inline void __invalidate_icache_page_alias(unsigned long virt,
78 unsigned long phys) { }
79 #endif
82 * We have physically tagged caches - nothing to do here -
83 * unless we have cache aliasing.
85 * Pages can get remapped. Because this might change the 'color' of that page,
86 * we have to flush the cache before the PTE is changed.
87 * (see also Documentation/cachetlb.txt)
90 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
92 #define flush_cache_all() \
93 do { \
94 __flush_invalidate_dcache_all(); \
95 __invalidate_icache_all(); \
96 } while (0)
98 #define flush_cache_mm(mm) flush_cache_all()
99 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
101 #define flush_cache_vmap(start,end) flush_cache_all()
102 #define flush_cache_vunmap(start,end) flush_cache_all()
104 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
105 extern void flush_dcache_page(struct page*);
106 extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
107 extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
109 #else
111 #define flush_cache_all() do { } while (0)
112 #define flush_cache_mm(mm) do { } while (0)
113 #define flush_cache_dup_mm(mm) do { } while (0)
115 #define flush_cache_vmap(start,end) do { } while (0)
116 #define flush_cache_vunmap(start,end) do { } while (0)
118 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
119 #define flush_dcache_page(page) do { } while (0)
121 #define flush_cache_page(vma,addr,pfn) do { } while (0)
122 #define flush_cache_range(vma,start,end) do { } while (0)
124 #endif
126 /* Ensure consistency between data and instruction cache. */
127 #define flush_icache_range(start,end) \
128 do { \
129 __flush_dcache_range(start, (end) - (start)); \
130 __invalidate_icache_range(start,(end) - (start)); \
131 } while (0)
133 /* This is not required, see Documentation/cachetlb.txt */
134 #define flush_icache_page(vma,page) do { } while (0)
136 #define flush_dcache_mmap_lock(mapping) do { } while (0)
137 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
139 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
141 extern void copy_to_user_page(struct vm_area_struct*, struct page*,
142 unsigned long, void*, const void*, unsigned long);
143 extern void copy_from_user_page(struct vm_area_struct*, struct page*,
144 unsigned long, void*, const void*, unsigned long);
146 #else
148 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
149 do { \
150 memcpy(dst, src, len); \
151 __flush_dcache_range((unsigned long) dst, len); \
152 __invalidate_icache_range((unsigned long) dst, len); \
153 } while (0)
155 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
156 memcpy(dst, src, len)
158 #endif
160 #define XTENSA_CACHEBLK_LOG2 29
161 #define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
162 #define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
164 #if XCHAL_HAVE_CACHEATTR
165 static inline u32 xtensa_get_cacheattr(void)
167 u32 r;
168 asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
169 return r;
172 static inline u32 xtensa_get_dtlb1(u32 addr)
174 u32 r = addr & XTENSA_CACHEBLK_MASK;
175 return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
176 & 0xF);
178 #else
179 static inline u32 xtensa_get_dtlb1(u32 addr)
181 u32 r;
182 asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
183 asm volatile(" dsync");
184 return r;
187 static inline u32 xtensa_get_cacheattr(void)
189 u32 r = 0;
190 u32 a = 0;
191 do {
192 a -= XTENSA_CACHEBLK_SIZE;
193 r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
194 } while (a);
195 return r;
197 #endif
199 static inline int xtensa_need_flush_dma_source(u32 addr)
201 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
204 static inline int xtensa_need_invalidate_dma_destination(u32 addr)
206 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
209 static inline void flush_dcache_unaligned(u32 addr, u32 size)
211 u32 cnt;
212 if (size) {
213 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
214 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
215 while (cnt--) {
216 asm volatile(" dhwb %0, 0" : : "a"(addr));
217 addr += XCHAL_DCACHE_LINESIZE;
219 asm volatile(" dsync");
223 static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
225 int cnt;
226 if (size) {
227 asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
228 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
229 - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
230 while (cnt-- > 0) {
231 asm volatile(" dhi %0, %1" : : "a"(addr),
232 "n"(XCHAL_DCACHE_LINESIZE));
233 addr += XCHAL_DCACHE_LINESIZE;
235 asm volatile(" dhwbi %0, %1" : : "a"(addr),
236 "n"(XCHAL_DCACHE_LINESIZE));
237 asm volatile(" dsync");
241 static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
243 u32 cnt;
244 if (size) {
245 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
246 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
247 while (cnt--) {
248 asm volatile(" dhwbi %0, 0" : : "a"(addr));
249 addr += XCHAL_DCACHE_LINESIZE;
251 asm volatile(" dsync");
255 #endif /* __KERNEL__ */
256 #endif /* _XTENSA_CACHEFLUSH_H */