2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * (C) 2001 - 2013 Tensilica Inc.
9 #ifndef _XTENSA_CACHEFLUSH_H
10 #define _XTENSA_CACHEFLUSH_H
13 #include <asm/processor.h>
17 * Lo-level routines for cache flushing.
19 * invalidate data or instruction cache:
21 * __invalidate_icache_all()
22 * __invalidate_icache_page(adr)
23 * __invalidate_dcache_page(adr)
24 * __invalidate_icache_range(from,size)
25 * __invalidate_dcache_range(from,size)
29 * __flush_dcache_page(adr)
31 * flush and invalidate data cache:
33 * __flush_invalidate_dcache_all()
34 * __flush_invalidate_dcache_page(adr)
35 * __flush_invalidate_dcache_range(from,size)
37 * specials for cache aliasing:
39 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
40 * __invalidate_dcache_page_alias(vaddr,paddr)
41 * __invalidate_icache_page_alias(vaddr,paddr)
44 extern void __invalidate_dcache_all(void);
45 extern void __invalidate_icache_all(void);
46 extern void __invalidate_dcache_page(unsigned long);
47 extern void __invalidate_icache_page(unsigned long);
48 extern void __invalidate_icache_range(unsigned long, unsigned long);
49 extern void __invalidate_dcache_range(unsigned long, unsigned long);
51 #if XCHAL_DCACHE_IS_WRITEBACK
52 extern void __flush_invalidate_dcache_all(void);
53 extern void __flush_dcache_page(unsigned long);
54 extern void __flush_dcache_range(unsigned long, unsigned long);
55 extern void __flush_invalidate_dcache_page(unsigned long);
56 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
58 # define __flush_dcache_range(p,s) do { } while(0)
59 # define __flush_dcache_page(p) do { } while(0)
60 # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
61 # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
64 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
65 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
66 extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
68 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt
,
69 unsigned long phys
) { }
70 static inline void __invalidate_dcache_page_alias(unsigned long virt
,
71 unsigned long phys
) { }
73 #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
74 extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
76 static inline void __invalidate_icache_page_alias(unsigned long virt
,
77 unsigned long phys
) { }
81 * We have physically tagged caches - nothing to do here -
82 * unless we have cache aliasing.
84 * Pages can get remapped. Because this might change the 'color' of that page,
85 * we have to flush the cache before the PTE is changed.
86 * (see also Documentation/cachetlb.txt)
89 #if defined(CONFIG_MMU) && \
90 ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
93 void flush_cache_all(void);
94 void flush_cache_range(struct vm_area_struct
*, ulong
, ulong
);
95 void flush_icache_range(unsigned long start
, unsigned long end
);
96 void flush_cache_page(struct vm_area_struct
*,
97 unsigned long, unsigned long);
99 #define flush_cache_all local_flush_cache_all
100 #define flush_cache_range local_flush_cache_range
101 #define flush_icache_range local_flush_icache_range
102 #define flush_cache_page local_flush_cache_page
105 #define local_flush_cache_all() \
107 __flush_invalidate_dcache_all(); \
108 __invalidate_icache_all(); \
111 #define flush_cache_mm(mm) flush_cache_all()
112 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
114 #define flush_cache_vmap(start,end) flush_cache_all()
115 #define flush_cache_vunmap(start,end) flush_cache_all()
117 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
118 extern void flush_dcache_page(struct page
*);
120 void local_flush_cache_range(struct vm_area_struct
*vma
,
121 unsigned long start
, unsigned long end
);
122 void local_flush_cache_page(struct vm_area_struct
*vma
,
123 unsigned long address
, unsigned long pfn
);
127 #define flush_cache_all() do { } while (0)
128 #define flush_cache_mm(mm) do { } while (0)
129 #define flush_cache_dup_mm(mm) do { } while (0)
131 #define flush_cache_vmap(start,end) do { } while (0)
132 #define flush_cache_vunmap(start,end) do { } while (0)
134 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
135 #define flush_dcache_page(page) do { } while (0)
137 #define flush_icache_range local_flush_icache_range
138 #define flush_cache_page(vma, addr, pfn) do { } while (0)
139 #define flush_cache_range(vma, start, end) do { } while (0)
143 /* Ensure consistency between data and instruction cache. */
144 #define local_flush_icache_range(start, end) \
146 __flush_dcache_range(start, (end) - (start)); \
147 __invalidate_icache_range(start,(end) - (start)); \
150 /* This is not required, see Documentation/cachetlb.txt */
151 #define flush_icache_page(vma,page) do { } while (0)
153 #define flush_dcache_mmap_lock(mapping) do { } while (0)
154 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
156 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
158 extern void copy_to_user_page(struct vm_area_struct
*, struct page
*,
159 unsigned long, void*, const void*, unsigned long);
160 extern void copy_from_user_page(struct vm_area_struct
*, struct page
*,
161 unsigned long, void*, const void*, unsigned long);
165 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
167 memcpy(dst, src, len); \
168 __flush_dcache_range((unsigned long) dst, len); \
169 __invalidate_icache_range((unsigned long) dst, len); \
172 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
173 memcpy(dst, src, len)
177 #define XTENSA_CACHEBLK_LOG2 29
178 #define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
179 #define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
181 #if XCHAL_HAVE_CACHEATTR
182 static inline u32
xtensa_get_cacheattr(void)
185 asm volatile(" rsr %0, cacheattr" : "=a"(r
));
189 static inline u32
xtensa_get_dtlb1(u32 addr
)
191 u32 r
= addr
& XTENSA_CACHEBLK_MASK
;
192 return r
| ((xtensa_get_cacheattr() >> (r
>> (XTENSA_CACHEBLK_LOG2
-2)))
196 static inline u32
xtensa_get_dtlb1(u32 addr
)
199 asm volatile(" rdtlb1 %0, %1" : "=a"(r
) : "a"(addr
));
200 asm volatile(" dsync");
204 static inline u32
xtensa_get_cacheattr(void)
209 a
-= XTENSA_CACHEBLK_SIZE
;
210 r
= (r
<< 4) | (xtensa_get_dtlb1(a
) & 0xF);
216 static inline int xtensa_need_flush_dma_source(u32 addr
)
218 return (xtensa_get_dtlb1(addr
) & ((1 << XCHAL_CA_BITS
) - 1)) >= 4;
221 static inline int xtensa_need_invalidate_dma_destination(u32 addr
)
223 return (xtensa_get_dtlb1(addr
) & ((1 << XCHAL_CA_BITS
) - 1)) != 2;
226 static inline void flush_dcache_unaligned(u32 addr
, u32 size
)
230 cnt
= (size
+ ((XCHAL_DCACHE_LINESIZE
- 1) & addr
)
231 + XCHAL_DCACHE_LINESIZE
- 1) / XCHAL_DCACHE_LINESIZE
;
233 asm volatile(" dhwb %0, 0" : : "a"(addr
));
234 addr
+= XCHAL_DCACHE_LINESIZE
;
236 asm volatile(" dsync");
240 static inline void invalidate_dcache_unaligned(u32 addr
, u32 size
)
244 asm volatile(" dhwbi %0, 0 ;" : : "a"(addr
));
245 cnt
= (size
+ ((XCHAL_DCACHE_LINESIZE
- 1) & addr
)
246 - XCHAL_DCACHE_LINESIZE
- 1) / XCHAL_DCACHE_LINESIZE
;
248 asm volatile(" dhi %0, %1" : : "a"(addr
),
249 "n"(XCHAL_DCACHE_LINESIZE
));
250 addr
+= XCHAL_DCACHE_LINESIZE
;
252 asm volatile(" dhwbi %0, %1" : : "a"(addr
),
253 "n"(XCHAL_DCACHE_LINESIZE
));
254 asm volatile(" dsync");
258 static inline void flush_invalidate_dcache_unaligned(u32 addr
, u32 size
)
262 cnt
= (size
+ ((XCHAL_DCACHE_LINESIZE
- 1) & addr
)
263 + XCHAL_DCACHE_LINESIZE
- 1) / XCHAL_DCACHE_LINESIZE
;
265 asm volatile(" dhwbi %0, 0" : : "a"(addr
));
266 addr
+= XCHAL_DCACHE_LINESIZE
;
268 asm volatile(" dsync");
272 #endif /* _XTENSA_CACHEFLUSH_H */