1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef __ABI_CSKY_CACHEFLUSH_H
4 #define __ABI_CSKY_CACHEFLUSH_H
6 /* Keep includes the same across arches. */
10 * The cache doesn't need to be flushed when TLB entries change when
11 * the cache is mapped to physical memory, not virtual memory
13 #define flush_cache_all() do { } while (0)
14 #define flush_cache_mm(mm) do { } while (0)
15 #define flush_cache_dup_mm(mm) do { } while (0)
16 #define flush_cache_range(vma, start, end) do { } while (0)
17 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
19 #define PG_dcache_clean PG_arch_1
21 static inline void flush_dcache_folio(struct folio
*folio
)
23 if (test_bit(PG_dcache_clean
, &folio
->flags
))
24 clear_bit(PG_dcache_clean
, &folio
->flags
);
26 #define flush_dcache_folio flush_dcache_folio
28 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
29 static inline void flush_dcache_page(struct page
*page
)
31 flush_dcache_folio(page_folio(page
));
34 #define flush_dcache_mmap_lock(mapping) do { } while (0)
35 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
37 #define flush_icache_range(start, end) cache_wbinv_range(start, end)
39 void flush_icache_mm_range(struct mm_struct
*mm
,
40 unsigned long start
, unsigned long end
);
41 void flush_icache_deferred(struct mm_struct
*mm
);
43 #define flush_cache_vmap(start, end) do { } while (0)
44 #define flush_cache_vmap_early(start, end) do { } while (0)
45 #define flush_cache_vunmap(start, end) do { } while (0)
47 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
49 memcpy(dst, src, len); \
50 if (vma->vm_flags & VM_EXEC) { \
51 dcache_wb_range((unsigned long)dst, \
52 (unsigned long)dst + len); \
53 flush_icache_mm_range(current->mm, \
55 (unsigned long)dst + len); \
58 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
61 #endif /* __ABI_CSKY_CACHEFLUSH_H */