sched: remove PREEMPT_RESTRICT
[pv_ops_mirror.git] / include / asm-sh64 / cacheflush.h
blob1e53a47bdc9755f84486dc8b021b60515d5a8dca
1 #ifndef __ASM_SH64_CACHEFLUSH_H
2 #define __ASM_SH64_CACHEFLUSH_H
4 #ifndef __ASSEMBLY__
6 #include <asm/page.h>
8 struct vm_area_struct;
9 struct page;
10 struct mm_struct;
12 extern void flush_cache_all(void);
13 extern void flush_cache_mm(struct mm_struct *mm);
14 extern void flush_cache_sigtramp(unsigned long start, unsigned long end);
15 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
16 unsigned long end);
17 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
18 extern void flush_dcache_page(struct page *pg);
19 extern void flush_icache_range(unsigned long start, unsigned long end);
20 extern void flush_icache_user_range(struct vm_area_struct *vma,
21 struct page *page, unsigned long addr,
22 int len);
24 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
26 #define flush_dcache_mmap_lock(mapping) do { } while (0)
27 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
29 #define flush_cache_vmap(start, end) flush_cache_all()
30 #define flush_cache_vunmap(start, end) flush_cache_all()
32 #define flush_icache_page(vma, page) do { } while (0)
34 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
35 do { \
36 flush_cache_page(vma, vaddr, page_to_pfn(page));\
37 memcpy(dst, src, len); \
38 flush_icache_user_range(vma, page, vaddr, len); \
39 } while (0)
41 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
42 do { \
43 flush_cache_page(vma, vaddr, page_to_pfn(page));\
44 memcpy(dst, src, len); \
45 } while (0)
47 #endif /* __ASSEMBLY__ */
49 #endif /* __ASM_SH64_CACHEFLUSH_H */