of: MSI: Simplify irqdomain lookup
[linux/fpc-iii.git] / arch / arm64 / include / asm / cacheflush.h
blob54efedaf331fda55478d001d860d6137be5d08e8
1 /*
2 * Based on arch/arm/include/asm/cacheflush.h
4 * Copyright (C) 1999-2002 Russell King.
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #ifndef __ASM_CACHEFLUSH_H
20 #define __ASM_CACHEFLUSH_H
22 #include <linux/mm.h>
25 * This flag is used to indicate that the page pointed to by a pte is clean
26 * and does not require cleaning before returning it to the user.
28 #define PG_dcache_clean PG_arch_1
31 * MM Cache Management
32 * ===================
34 * The arch/arm64/mm/cache.S implements these methods.
36 * Start addresses are inclusive and end addresses are exclusive; start
37 * addresses should be rounded down, end addresses up.
39 * See Documentation/cachetlb.txt for more information. Please note that
40 * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
41 * VIPT or ASID-tagged VIVT I-cache.
43 * flush_cache_mm(mm)
45 * Clean and invalidate all user space cache entries
46 * before a change of page tables.
48 * flush_icache_range(start, end)
50 * Ensure coherency between the I-cache and the D-cache in the
51 * region described by start, end.
52 * - start - virtual start address
53 * - end - virtual end address
55 * __flush_cache_user_range(start, end)
57 * Ensure coherency between the I-cache and the D-cache in the
58 * region described by start, end.
59 * - start - virtual start address
60 * - end - virtual end address
62 * __flush_dcache_area(kaddr, size)
64 * Ensure that the data held in page is written back.
65 * - kaddr - page address
66 * - size - region size
68 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
69 extern void flush_icache_range(unsigned long start, unsigned long end);
70 extern void __flush_dcache_area(void *addr, size_t len);
71 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
73 static inline void flush_cache_mm(struct mm_struct *mm)
77 static inline void flush_cache_page(struct vm_area_struct *vma,
78 unsigned long user_addr, unsigned long pfn)
83 * Cache maintenance functions used by the DMA API. No to be used directly.
85 extern void __dma_map_area(const void *, size_t, int);
86 extern void __dma_unmap_area(const void *, size_t, int);
87 extern void __dma_flush_range(const void *, const void *);
90 * Copy user data from/to a page which is mapped into a different
91 * processes address space. Really, we want to allow our "user
92 * space" model to handle this.
94 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
95 unsigned long, void *, const void *, unsigned long);
96 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
97 do { \
98 memcpy(dst, src, len); \
99 } while (0)
101 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
104 * flush_dcache_page is used when the kernel has written to the page
105 * cache page at virtual address page->virtual.
107 * If this page isn't mapped (ie, page_mapping == NULL), or it might
108 * have userspace mappings, then we _must_ always clean + invalidate
109 * the dcache entries associated with the kernel mapping.
111 * Otherwise we can defer the operation, and clean the cache when we are
112 * about to change to user space. This is the same method as used on SPARC64.
113 * See update_mmu_cache for the user space part.
115 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
116 extern void flush_dcache_page(struct page *);
118 static inline void __local_flush_icache_all(void)
120 asm("ic iallu");
121 dsb(nsh);
122 isb();
125 static inline void __flush_icache_all(void)
127 asm("ic ialluis");
128 dsb(ish);
131 #define flush_dcache_mmap_lock(mapping) \
132 spin_lock_irq(&(mapping)->tree_lock)
133 #define flush_dcache_mmap_unlock(mapping) \
134 spin_unlock_irq(&(mapping)->tree_lock)
137 * We don't appear to need to do anything here. In fact, if we did, we'd
138 * duplicate cache flushing elsewhere performed by flush_dcache_page().
140 #define flush_icache_page(vma,page) do { } while (0)
143 * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
145 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
149 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
153 int set_memory_ro(unsigned long addr, int numpages);
154 int set_memory_rw(unsigned long addr, int numpages);
155 int set_memory_x(unsigned long addr, int numpages);
156 int set_memory_nx(unsigned long addr, int numpages);
158 #ifdef CONFIG_DEBUG_RODATA
159 void mark_rodata_ro(void);
160 #endif
162 #endif