x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / mips / mm / cache.c
blob15f813c303b45bcb35f253249bd3571b6f4b8d8c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
8 */
9 #include <linux/fs.h>
10 #include <linux/fcntl.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/linkage.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/syscalls.h>
17 #include <linux/mm.h>
19 #include <asm/cacheflush.h>
20 #include <asm/processor.h>
21 #include <asm/cpu.h>
22 #include <asm/cpu-features.h>
24 /* Cache operations. */
25 void (*flush_cache_all)(void);
26 void (*__flush_cache_all)(void);
27 void (*flush_cache_mm)(struct mm_struct *mm);
28 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
29 unsigned long end);
30 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
31 unsigned long pfn);
32 void (*flush_icache_range)(unsigned long start, unsigned long end);
33 void (*local_flush_icache_range)(unsigned long start, unsigned long end);
35 void (*__flush_cache_vmap)(void);
36 void (*__flush_cache_vunmap)(void);
38 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
39 void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
41 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
43 /* MIPS specific cache operations */
44 void (*flush_cache_sigtramp)(unsigned long addr);
45 void (*local_flush_data_cache_page)(void * addr);
46 void (*flush_data_cache_page)(unsigned long addr);
47 void (*flush_icache_all)(void);
49 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
50 EXPORT_SYMBOL(flush_data_cache_page);
51 EXPORT_SYMBOL(flush_icache_all);
53 #ifdef CONFIG_DMA_NONCOHERENT
55 /* DMA cache operations. */
56 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
57 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
58 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
60 EXPORT_SYMBOL(_dma_cache_wback_inv);
62 #endif /* CONFIG_DMA_NONCOHERENT */
65 * We could optimize the case where the cache argument is not BCACHE but
66 * that seems very atypical use ...
68 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
69 unsigned int, cache)
71 if (bytes == 0)
72 return 0;
73 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
74 return -EFAULT;
76 flush_icache_range(addr, addr + bytes);
78 return 0;
81 void __flush_dcache_page(struct page *page)
83 struct address_space *mapping = page_mapping(page);
84 unsigned long addr;
86 if (PageHighMem(page))
87 return;
88 if (mapping && !mapping_mapped(mapping)) {
89 SetPageDcacheDirty(page);
90 return;
94 * We could delay the flush for the !page_mapping case too. But that
95 * case is for exec env/arg pages and those are %99 certainly going to
96 * get faulted into the tlb (and thus flushed) anyways.
98 addr = (unsigned long) page_address(page);
99 flush_data_cache_page(addr);
102 EXPORT_SYMBOL(__flush_dcache_page);
104 void __flush_anon_page(struct page *page, unsigned long vmaddr)
106 unsigned long addr = (unsigned long) page_address(page);
108 if (pages_do_alias(addr, vmaddr)) {
109 if (page_mapped(page) && !Page_dcache_dirty(page)) {
110 void *kaddr;
112 kaddr = kmap_coherent(page, vmaddr);
113 flush_data_cache_page((unsigned long)kaddr);
114 kunmap_coherent();
115 } else
116 flush_data_cache_page(addr);
120 EXPORT_SYMBOL(__flush_anon_page);
122 void __update_cache(struct vm_area_struct *vma, unsigned long address,
123 pte_t pte)
125 struct page *page;
126 unsigned long pfn, addr;
127 int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
129 pfn = pte_pfn(pte);
130 if (unlikely(!pfn_valid(pfn)))
131 return;
132 page = pfn_to_page(pfn);
133 if (page_mapping(page) && Page_dcache_dirty(page)) {
134 addr = (unsigned long) page_address(page);
135 if (exec || pages_do_alias(addr, address & PAGE_MASK))
136 flush_data_cache_page(addr);
137 ClearPageDcacheDirty(page);
141 unsigned long _page_cachable_default;
142 EXPORT_SYMBOL(_page_cachable_default);
144 static inline void setup_protection_map(void)
146 if (cpu_has_rixi) {
147 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
148 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
149 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
150 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
151 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
152 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
153 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
154 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
156 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
157 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
158 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
159 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
160 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
161 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
162 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
163 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
165 } else {
166 protection_map[0] = PAGE_NONE;
167 protection_map[1] = PAGE_READONLY;
168 protection_map[2] = PAGE_COPY;
169 protection_map[3] = PAGE_COPY;
170 protection_map[4] = PAGE_READONLY;
171 protection_map[5] = PAGE_READONLY;
172 protection_map[6] = PAGE_COPY;
173 protection_map[7] = PAGE_COPY;
174 protection_map[8] = PAGE_NONE;
175 protection_map[9] = PAGE_READONLY;
176 protection_map[10] = PAGE_SHARED;
177 protection_map[11] = PAGE_SHARED;
178 protection_map[12] = PAGE_READONLY;
179 protection_map[13] = PAGE_READONLY;
180 protection_map[14] = PAGE_SHARED;
181 protection_map[15] = PAGE_SHARED;
185 void cpu_cache_init(void)
187 if (cpu_has_3k_cache) {
188 extern void __weak r3k_cache_init(void);
190 r3k_cache_init();
192 if (cpu_has_6k_cache) {
193 extern void __weak r6k_cache_init(void);
195 r6k_cache_init();
197 if (cpu_has_4k_cache) {
198 extern void __weak r4k_cache_init(void);
200 r4k_cache_init();
202 if (cpu_has_8k_cache) {
203 extern void __weak r8k_cache_init(void);
205 r8k_cache_init();
207 if (cpu_has_tx39_cache) {
208 extern void __weak tx39_cache_init(void);
210 tx39_cache_init();
213 if (cpu_has_octeon_cache) {
214 extern void __weak octeon_cache_init(void);
216 octeon_cache_init();
219 setup_protection_map();
222 int __weak __uncached_access(struct file *file, unsigned long addr)
224 if (file->f_flags & O_DSYNC)
225 return 1;
227 return addr >= __pa(high_memory);