1 #ifndef __ASM_SH_CACHEFLUSH_H
2 #define __ASM_SH_CACHEFLUSH_H
11 * - flush_cache_all() flushes entire cache
12 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
13 * - flush_cache_dup mm(mm) handles cache flushing when forking
14 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
15 * - flush_cache_range(vma, start, end) flushes a range of pages
17 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
18 * - flush_icache_range(start, end) flushes(invalidates) a range for icache
19 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
20 * - flush_cache_sigtramp(vaddr) flushes the signal trampoline
22 extern void (*local_flush_cache_all
)(void *args
);
23 extern void (*local_flush_cache_mm
)(void *args
);
24 extern void (*local_flush_cache_dup_mm
)(void *args
);
25 extern void (*local_flush_cache_page
)(void *args
);
26 extern void (*local_flush_cache_range
)(void *args
);
27 extern void (*local_flush_dcache_page
)(void *args
);
28 extern void (*local_flush_icache_range
)(void *args
);
29 extern void (*local_flush_icache_page
)(void *args
);
30 extern void (*local_flush_cache_sigtramp
)(void *args
);
32 static inline void cache_noop(void *args
) { }
34 extern void (*__flush_wback_region
)(void *start
, int size
);
35 extern void (*__flush_purge_region
)(void *start
, int size
);
36 extern void (*__flush_invalidate_region
)(void *start
, int size
);
38 extern void flush_cache_all(void);
39 extern void flush_cache_mm(struct mm_struct
*mm
);
40 extern void flush_cache_dup_mm(struct mm_struct
*mm
);
41 extern void flush_cache_page(struct vm_area_struct
*vma
,
42 unsigned long addr
, unsigned long pfn
);
43 extern void flush_cache_range(struct vm_area_struct
*vma
,
44 unsigned long start
, unsigned long end
);
45 extern void flush_dcache_page(struct page
*page
);
46 extern void flush_icache_range(unsigned long start
, unsigned long end
);
47 extern void flush_icache_page(struct vm_area_struct
*vma
,
49 extern void flush_cache_sigtramp(unsigned long address
);
52 struct vm_area_struct
*vma
;
53 unsigned long addr1
, addr2
;
56 #define ARCH_HAS_FLUSH_ANON_PAGE
57 extern void __flush_anon_page(struct page
*page
, unsigned long);
59 static inline void flush_anon_page(struct vm_area_struct
*vma
,
60 struct page
*page
, unsigned long vmaddr
)
62 if (boot_cpu_data
.dcache
.n_aliases
&& PageAnon(page
))
63 __flush_anon_page(page
, vmaddr
);
66 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
67 static inline void flush_kernel_dcache_page(struct page
*page
)
69 flush_dcache_page(page
);
72 extern void copy_to_user_page(struct vm_area_struct
*vma
,
73 struct page
*page
, unsigned long vaddr
, void *dst
, const void *src
,
76 extern void copy_from_user_page(struct vm_area_struct
*vma
,
77 struct page
*page
, unsigned long vaddr
, void *dst
, const void *src
,
80 #define flush_cache_vmap(start, end) flush_cache_all()
81 #define flush_cache_vunmap(start, end) flush_cache_all()
83 #define flush_dcache_mmap_lock(mapping) do { } while (0)
84 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
86 void kmap_coherent_init(void);
87 void *kmap_coherent(struct page
*page
, unsigned long addr
);
88 void kunmap_coherent(void *kvaddr
);
90 #define PG_dcache_dirty PG_arch_1
92 void cpu_cache_init(void);
94 #endif /* __KERNEL__ */
95 #endif /* __ASM_SH_CACHEFLUSH_H */