1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SH_CACHEFLUSH_H
3 #define __ASM_SH_CACHEFLUSH_H
10 * - flush_cache_all() flushes entire cache
11 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
12 * - flush_cache_dup mm(mm) handles cache flushing when forking
13 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
14 * - flush_cache_range(vma, start, end) flushes a range of pages
16 * - flush_dcache_folio(folio) flushes(wback&invalidates) a folio for dcache
17 * - flush_icache_range(start, end) flushes(invalidates) a range for icache
18 * - flush_icache_pages(vma, pg, nr) flushes(invalidates) pages for icache
19 * - flush_cache_sigtramp(vaddr) flushes the signal trampoline
21 extern void (*local_flush_cache_all
)(void *args
);
22 extern void (*local_flush_cache_mm
)(void *args
);
23 extern void (*local_flush_cache_dup_mm
)(void *args
);
24 extern void (*local_flush_cache_page
)(void *args
);
25 extern void (*local_flush_cache_range
)(void *args
);
26 extern void (*local_flush_dcache_folio
)(void *args
);
27 extern void (*local_flush_icache_range
)(void *args
);
28 extern void (*local_flush_icache_folio
)(void *args
);
29 extern void (*local_flush_cache_sigtramp
)(void *args
);
31 static inline void cache_noop(void *args
) { }
33 extern void (*__flush_wback_region
)(void *start
, int size
);
34 extern void (*__flush_purge_region
)(void *start
, int size
);
35 extern void (*__flush_invalidate_region
)(void *start
, int size
);
37 extern void flush_cache_all(void);
38 extern void flush_cache_mm(struct mm_struct
*mm
);
39 extern void flush_cache_dup_mm(struct mm_struct
*mm
);
40 extern void flush_cache_page(struct vm_area_struct
*vma
,
41 unsigned long addr
, unsigned long pfn
);
42 extern void flush_cache_range(struct vm_area_struct
*vma
,
43 unsigned long start
, unsigned long end
);
44 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
45 void flush_dcache_folio(struct folio
*folio
);
46 #define flush_dcache_folio flush_dcache_folio
47 static inline void flush_dcache_page(struct page
*page
)
49 flush_dcache_folio(page_folio(page
));
52 extern void flush_icache_range(unsigned long start
, unsigned long end
);
53 #define flush_icache_user_range flush_icache_range
54 void flush_icache_pages(struct vm_area_struct
*vma
, struct page
*page
,
56 #define flush_icache_pages flush_icache_pages
57 extern void flush_cache_sigtramp(unsigned long address
);
60 struct vm_area_struct
*vma
;
61 unsigned long addr1
, addr2
;
64 #define ARCH_HAS_FLUSH_ANON_PAGE
65 extern void __flush_anon_page(struct page
*page
, unsigned long);
67 static inline void flush_anon_page(struct vm_area_struct
*vma
,
68 struct page
*page
, unsigned long vmaddr
)
70 if (boot_cpu_data
.dcache
.n_aliases
&& PageAnon(page
))
71 __flush_anon_page(page
, vmaddr
);
74 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
75 static inline void flush_kernel_vmap_range(void *addr
, int size
)
77 __flush_wback_region(addr
, size
);
79 static inline void invalidate_kernel_vmap_range(void *addr
, int size
)
81 __flush_invalidate_region(addr
, size
);
84 extern void copy_to_user_page(struct vm_area_struct
*vma
,
85 struct page
*page
, unsigned long vaddr
, void *dst
, const void *src
,
88 extern void copy_from_user_page(struct vm_area_struct
*vma
,
89 struct page
*page
, unsigned long vaddr
, void *dst
, const void *src
,
92 #define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
93 #define flush_cache_vmap_early(start, end) do { } while (0)
94 #define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
96 #define flush_dcache_mmap_lock(mapping) do { } while (0)
97 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
99 void kmap_coherent_init(void);
100 void *kmap_coherent(struct page
*page
, unsigned long addr
);
101 void kunmap_coherent(void *kvaddr
);
103 #define PG_dcache_clean PG_arch_1
105 void cpu_cache_init(void);
107 void __weak
l2_cache_init(void);
109 void __weak
j2_cache_init(void);
110 void __weak
sh2_cache_init(void);
111 void __weak
sh2a_cache_init(void);
112 void __weak
sh3_cache_init(void);
113 void __weak
shx3_cache_init(void);
114 void __weak
sh4_cache_init(void);
115 void __weak
sh7705_cache_init(void);
117 void __weak
sh4__flush_region_init(void);
119 static inline void *sh_cacheop_vaddr(void *vaddr
)
121 if (__in_29bit_mode())
122 vaddr
= (void *)CAC_ADDR((unsigned long)vaddr
);
126 #endif /* __ASM_SH_CACHEFLUSH_H */