2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * (C) 2001 - 2013 Tensilica Inc.
9 #ifndef _XTENSA_CACHEFLUSH_H
10 #define _XTENSA_CACHEFLUSH_H
13 #include <asm/processor.h>
17 * Lo-level routines for cache flushing.
19 * invalidate data or instruction cache:
21 * __invalidate_icache_all()
22 * __invalidate_icache_page(adr)
23 * __invalidate_dcache_page(adr)
24 * __invalidate_icache_range(from,size)
25 * __invalidate_dcache_range(from,size)
29 * __flush_dcache_page(adr)
31 * flush and invalidate data cache:
33 * __flush_invalidate_dcache_all()
34 * __flush_invalidate_dcache_page(adr)
35 * __flush_invalidate_dcache_range(from,size)
37 * specials for cache aliasing:
39 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
40 * __invalidate_dcache_page_alias(vaddr,paddr)
41 * __invalidate_icache_page_alias(vaddr,paddr)
44 extern void __invalidate_dcache_all(void);
45 extern void __invalidate_icache_all(void);
46 extern void __invalidate_dcache_page(unsigned long);
47 extern void __invalidate_icache_page(unsigned long);
48 extern void __invalidate_icache_range(unsigned long, unsigned long);
49 extern void __invalidate_dcache_range(unsigned long, unsigned long);
51 #if XCHAL_DCACHE_IS_WRITEBACK
52 extern void __flush_invalidate_dcache_all(void);
53 extern void __flush_dcache_page(unsigned long);
54 extern void __flush_dcache_range(unsigned long, unsigned long);
55 extern void __flush_invalidate_dcache_page(unsigned long);
56 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
58 static inline void __flush_dcache_page(unsigned long va
)
61 static inline void __flush_dcache_range(unsigned long va
, unsigned long sz
)
64 # define __flush_invalidate_dcache_all() __invalidate_dcache_all()
65 # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
66 # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
69 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
70 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
71 extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
73 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt
,
74 unsigned long phys
) { }
75 static inline void __invalidate_dcache_page_alias(unsigned long virt
,
76 unsigned long phys
) { }
78 #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
79 extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
81 static inline void __invalidate_icache_page_alias(unsigned long virt
,
82 unsigned long phys
) { }
86 * We have physically tagged caches - nothing to do here -
87 * unless we have cache aliasing.
89 * Pages can get remapped. Because this might change the 'color' of that page,
90 * we have to flush the cache before the PTE is changed.
91 * (see also Documentation/core-api/cachetlb.rst)
94 #if defined(CONFIG_MMU) && \
95 ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
98 void flush_cache_all(void);
99 void flush_cache_range(struct vm_area_struct
*, ulong
, ulong
);
100 void flush_icache_range(unsigned long start
, unsigned long end
);
101 void flush_cache_page(struct vm_area_struct
*,
102 unsigned long, unsigned long);
104 #define flush_cache_all local_flush_cache_all
105 #define flush_cache_range local_flush_cache_range
106 #define flush_icache_range local_flush_icache_range
107 #define flush_cache_page local_flush_cache_page
110 #define local_flush_cache_all() \
112 __flush_invalidate_dcache_all(); \
113 __invalidate_icache_all(); \
116 #define flush_cache_mm(mm) flush_cache_all()
117 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
119 #define flush_cache_vmap(start,end) flush_cache_all()
120 #define flush_cache_vunmap(start,end) flush_cache_all()
122 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
123 extern void flush_dcache_page(struct page
*);
125 void local_flush_cache_range(struct vm_area_struct
*vma
,
126 unsigned long start
, unsigned long end
);
127 void local_flush_cache_page(struct vm_area_struct
*vma
,
128 unsigned long address
, unsigned long pfn
);
132 #define flush_cache_all() do { } while (0)
133 #define flush_cache_mm(mm) do { } while (0)
134 #define flush_cache_dup_mm(mm) do { } while (0)
136 #define flush_cache_vmap(start,end) do { } while (0)
137 #define flush_cache_vunmap(start,end) do { } while (0)
139 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
140 #define flush_dcache_page(page) do { } while (0)
142 #define flush_icache_range local_flush_icache_range
143 #define flush_cache_page(vma, addr, pfn) do { } while (0)
144 #define flush_cache_range(vma, start, end) do { } while (0)
148 /* Ensure consistency between data and instruction cache. */
149 #define local_flush_icache_range(start, end) \
151 __flush_dcache_range(start, (end) - (start)); \
152 __invalidate_icache_range(start,(end) - (start)); \
155 /* This is not required, see Documentation/core-api/cachetlb.rst */
156 #define flush_icache_page(vma,page) do { } while (0)
158 #define flush_dcache_mmap_lock(mapping) do { } while (0)
159 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
161 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
163 extern void copy_to_user_page(struct vm_area_struct
*, struct page
*,
164 unsigned long, void*, const void*, unsigned long);
165 extern void copy_from_user_page(struct vm_area_struct
*, struct page
*,
166 unsigned long, void*, const void*, unsigned long);
170 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
172 memcpy(dst, src, len); \
173 __flush_dcache_range((unsigned long) dst, len); \
174 __invalidate_icache_range((unsigned long) dst, len); \
177 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
178 memcpy(dst, src, len)
182 #endif /* _XTENSA_CACHEFLUSH_H */