1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/cacheflush.h
5 * Copyright (C) 1999-2002 Russell King.
6 * Copyright (C) 2012 ARM Ltd.
8 #ifndef __ASM_CACHEFLUSH_H
9 #define __ASM_CACHEFLUSH_H
11 #include <linux/kgdb.h>
15 * This flag is used to indicate that the page pointed to by a pte is clean
16 * and does not require cleaning before returning it to the user.
18 #define PG_dcache_clean PG_arch_1
24 * The arch/arm64/mm/cache.S implements these methods.
26 * Start addresses are inclusive and end addresses are exclusive; start
27 * addresses should be rounded down, end addresses up.
29 * See Documentation/core-api/cachetlb.rst for more information. Please note that
30 * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
35 * Clean and invalidate all user space cache entries
36 * before a change of page tables.
38 * flush_icache_range(start, end)
40 * Ensure coherency between the I-cache and the D-cache in the
41 * region described by start, end.
42 * - start - virtual start address
43 * - end - virtual end address
45 * invalidate_icache_range(start, end)
47 * Invalidate the I-cache in the region described by start, end.
48 * - start - virtual start address
49 * - end - virtual end address
51 * __flush_cache_user_range(start, end)
53 * Ensure coherency between the I-cache and the D-cache in the
54 * region described by start, end.
55 * - start - virtual start address
56 * - end - virtual end address
58 * __flush_dcache_area(kaddr, size)
60 * Ensure that the data held in page is written back.
61 * - kaddr - page address
62 * - size - region size
64 extern void __flush_icache_range(unsigned long start
, unsigned long end
);
65 extern int invalidate_icache_range(unsigned long start
, unsigned long end
);
66 extern void __flush_dcache_area(void *addr
, size_t len
);
67 extern void __inval_dcache_area(void *addr
, size_t len
);
68 extern void __clean_dcache_area_poc(void *addr
, size_t len
);
69 extern void __clean_dcache_area_pop(void *addr
, size_t len
);
70 extern void __clean_dcache_area_pou(void *addr
, size_t len
);
71 extern long __flush_cache_user_range(unsigned long start
, unsigned long end
);
72 extern void sync_icache_aliases(void *kaddr
, unsigned long len
);
74 static inline void flush_icache_range(unsigned long start
, unsigned long end
)
76 __flush_icache_range(start
, end
);
79 * IPI all online CPUs so that they undergo a context synchronization
80 * event and are forced to refetch the new instructions.
84 * KGDB performs cache maintenance with interrupts disabled, so we
85 * will deadlock trying to IPI the secondary CPUs. In theory, we can
86 * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
87 * just means that KGDB will elide the maintenance altogether! As it
88 * turns out, KGDB uses IPIs to round-up the secondary CPUs during
89 * the patching operation, so we don't need extra IPIs here anyway.
90 * In which case, add a KGDB-specific bodge and return early.
92 if (kgdb_connected
&& irqs_disabled())
98 static inline void flush_cache_mm(struct mm_struct
*mm
)
102 static inline void flush_cache_page(struct vm_area_struct
*vma
,
103 unsigned long user_addr
, unsigned long pfn
)
107 static inline void flush_cache_range(struct vm_area_struct
*vma
,
108 unsigned long start
, unsigned long end
)
113 * Cache maintenance functions used by the DMA API. No to be used directly.
115 extern void __dma_map_area(const void *, size_t, int);
116 extern void __dma_unmap_area(const void *, size_t, int);
117 extern void __dma_flush_area(const void *, size_t);
120 * Copy user data from/to a page which is mapped into a different
121 * processes address space. Really, we want to allow our "user
122 * space" model to handle this.
124 extern void copy_to_user_page(struct vm_area_struct
*, struct page
*,
125 unsigned long, void *, const void *, unsigned long);
126 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
128 memcpy(dst, src, len); \
131 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
134 * flush_dcache_page is used when the kernel has written to the page
135 * cache page at virtual address page->virtual.
137 * If this page isn't mapped (ie, page_mapping == NULL), or it might
138 * have userspace mappings, then we _must_ always clean + invalidate
139 * the dcache entries associated with the kernel mapping.
141 * Otherwise we can defer the operation, and clean the cache when we are
142 * about to change to user space. This is the same method as used on SPARC64.
143 * See update_mmu_cache for the user space part.
145 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
146 extern void flush_dcache_page(struct page
*);
148 static inline void __flush_icache_all(void)
150 if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC
))
157 #define flush_dcache_mmap_lock(mapping) do { } while (0)
158 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
161 * We don't appear to need to do anything here. In fact, if we did, we'd
162 * duplicate cache flushing elsewhere performed by flush_dcache_page().
164 #define flush_icache_page(vma,page) do { } while (0)
167 * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
169 static inline void flush_cache_vmap(unsigned long start
, unsigned long end
)
173 static inline void flush_cache_vunmap(unsigned long start
, unsigned long end
)
177 int set_memory_valid(unsigned long addr
, int numpages
, int enable
);
179 int set_direct_map_invalid_noflush(struct page
*page
);
180 int set_direct_map_default_noflush(struct page
*page
);