1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _M68K_CACHEFLUSH_H
3 #define _M68K_CACHEFLUSH_H
7 #include <asm/mcfsim.h>
11 #define FLUSH_I_AND_D (0x00000808)
12 #define FLUSH_I (0x00000008)
14 #ifndef ICACHE_MAX_ADDR
15 #define ICACHE_MAX_ADDR 0
16 #define ICACHE_SET_MASK 0
17 #define DCACHE_MAX_ADDR 0
18 #define DCACHE_SETMASK 0
28 * ColdFire architecture has no way to clear individual cache lines, so we
29 * are stuck invalidating all the cache entries when we want a clear operation.
31 static inline void clear_cf_icache(unsigned long start
, unsigned long end
)
33 __asm__
__volatile__ (
37 : "r" (CACHE_MODE
| CACR_ICINVA
| CACR_BCINVA
));
40 static inline void clear_cf_dcache(unsigned long start
, unsigned long end
)
42 __asm__
__volatile__ (
46 : "r" (CACHE_MODE
| CACR_DCINVA
));
49 static inline void clear_cf_bcache(unsigned long start
, unsigned long end
)
51 __asm__
__volatile__ (
55 : "r" (CACHE_MODE
| CACR_ICINVA
| CACR_BCINVA
| CACR_DCINVA
));
59 * Use the ColdFire cpushl instruction to push (and invalidate) cache lines.
60 * The start and end addresses are cache line numbers not memory addresses.
62 static inline void flush_cf_icache(unsigned long start
, unsigned long end
)
66 for (set
= start
; set
<= end
; set
+= (0x10 - 3)) {
67 __asm__
__volatile__ (
68 "cpushl %%ic,(%0)\n\t"
70 "cpushl %%ic,(%0)\n\t"
72 "cpushl %%ic,(%0)\n\t"
80 static inline void flush_cf_dcache(unsigned long start
, unsigned long end
)
84 for (set
= start
; set
<= end
; set
+= (0x10 - 3)) {
85 __asm__
__volatile__ (
86 "cpushl %%dc,(%0)\n\t"
88 "cpushl %%dc,(%0)\n\t"
90 "cpushl %%dc,(%0)\n\t"
98 static inline void flush_cf_bcache(unsigned long start
, unsigned long end
)
102 for (set
= start
; set
<= end
; set
+= (0x10 - 3)) {
103 __asm__
__volatile__ (
104 "cpushl %%bc,(%0)\n\t"
106 "cpushl %%bc,(%0)\n\t"
108 "cpushl %%bc,(%0)\n\t"
117 * Cache handling functions
120 static inline void flush_icache(void)
122 if (CPU_IS_COLDFIRE
) {
123 flush_cf_icache(0, ICACHE_MAX_ADDR
);
124 } else if (CPU_IS_040_OR_060
) {
125 asm volatile ( "nop\n"
131 asm volatile ( "movec %%cacr,%0\n"
140 * invalidate the cache for the specified memory range.
141 * It starts at the physical address specified for
142 * the given number of bytes.
144 extern void cache_clear(unsigned long paddr
, int len
);
146 * push any dirty cache in the specified memory range.
147 * It starts at the physical address specified for
148 * the given number of bytes.
150 extern void cache_push(unsigned long paddr
, int len
);
153 * push and invalidate pages in the specified user virtual
156 extern void cache_push_v(unsigned long vaddr
, int len
);
158 /* This is needed whenever the virtual mapping of the current
160 #define __flush_cache_all() \
162 if (CPU_IS_COLDFIRE) { \
163 flush_cf_dcache(0, DCACHE_MAX_ADDR); \
164 } else if (CPU_IS_040_OR_060) { \
165 __asm__ __volatile__("nop\n\t" \
170 unsigned long _tmp; \
171 __asm__ __volatile__("movec %%cacr,%0\n\t" \
175 : "di" (FLUSH_I_AND_D)); \
179 #define __flush_cache_030() \
181 if (CPU_IS_020_OR_030) { \
182 unsigned long _tmp; \
183 __asm__ __volatile__("movec %%cacr,%0\n\t" \
187 : "di" (FLUSH_I_AND_D)); \
191 #define flush_cache_all() __flush_cache_all()
193 #define flush_cache_vmap(start, end) flush_cache_all()
194 #define flush_cache_vunmap(start, end) flush_cache_all()
196 static inline void flush_cache_mm(struct mm_struct
*mm
)
198 if (mm
== current
->mm
)
202 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
204 /* flush_cache_range/flush_cache_page must be macros to avoid
205 a dependency on linux/mm.h, which includes this file... */
206 static inline void flush_cache_range(struct vm_area_struct
*vma
,
210 if (vma
->vm_mm
== current
->mm
)
214 static inline void flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
, unsigned long pfn
)
216 if (vma
->vm_mm
== current
->mm
)
221 /* Push the page at kernel virtual address and clear the icache */
222 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
223 static inline void __flush_page_to_ram(void *vaddr
)
225 if (CPU_IS_COLDFIRE
) {
226 unsigned long addr
, start
, end
;
227 addr
= ((unsigned long) vaddr
) & ~(PAGE_SIZE
- 1);
228 start
= addr
& ICACHE_SET_MASK
;
229 end
= (addr
+ PAGE_SIZE
- 1) & ICACHE_SET_MASK
;
231 flush_cf_bcache(0, end
);
232 end
= ICACHE_MAX_ADDR
;
234 flush_cf_bcache(start
, end
);
235 } else if (CPU_IS_040_OR_060
) {
236 __asm__
__volatile__("nop\n\t"
238 "cpushp %%bc,(%0)\n\t"
240 : : "a" (__pa(vaddr
)));
243 __asm__
__volatile__("movec %%cacr,%0\n\t"
251 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
252 #define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
253 #define flush_dcache_mmap_lock(mapping) do { } while (0)
254 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
255 #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
257 extern void flush_icache_user_page(struct vm_area_struct
*vma
, struct page
*page
,
258 unsigned long addr
, int len
);
259 extern void flush_icache_range(unsigned long address
, unsigned long endaddr
);
260 extern void flush_icache_user_range(unsigned long address
,
261 unsigned long endaddr
);
263 static inline void copy_to_user_page(struct vm_area_struct
*vma
,
264 struct page
*page
, unsigned long vaddr
,
265 void *dst
, void *src
, int len
)
267 flush_cache_page(vma
, vaddr
, page_to_pfn(page
));
268 memcpy(dst
, src
, len
);
269 flush_icache_user_page(vma
, page
, vaddr
, len
);
271 static inline void copy_from_user_page(struct vm_area_struct
*vma
,
272 struct page
*page
, unsigned long vaddr
,
273 void *dst
, void *src
, int len
)
275 flush_cache_page(vma
, vaddr
, page_to_pfn(page
));
276 memcpy(dst
, src
, len
);
279 #endif /* _M68K_CACHEFLUSH_H */