2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
10 #include <linux/fcntl.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/linkage.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
18 #include <asm/cacheflush.h>
19 #include <asm/processor.h>
21 #include <asm/cpu-features.h>
23 /* Cache operations. */
24 void (*flush_cache_all
)(void);
25 void (*__flush_cache_all
)(void);
26 void (*flush_cache_mm
)(struct mm_struct
*mm
);
27 void (*flush_cache_range
)(struct vm_area_struct
*vma
, unsigned long start
,
29 void (*flush_cache_page
)(struct vm_area_struct
*vma
, unsigned long page
,
31 void (*flush_icache_range
)(unsigned long start
, unsigned long end
);
32 void (*local_flush_icache_range
)(unsigned long start
, unsigned long end
);
34 void (*__flush_cache_vmap
)(void);
35 void (*__flush_cache_vunmap
)(void);
37 /* MIPS specific cache operations */
38 void (*flush_cache_sigtramp
)(unsigned long addr
);
39 void (*local_flush_data_cache_page
)(void * addr
);
40 void (*flush_data_cache_page
)(unsigned long addr
);
41 void (*flush_icache_all
)(void);
43 EXPORT_SYMBOL_GPL(local_flush_data_cache_page
);
44 EXPORT_SYMBOL(flush_data_cache_page
);
46 #ifdef CONFIG_DMA_NONCOHERENT
48 /* DMA cache operations. */
49 void (*_dma_cache_wback_inv
)(unsigned long start
, unsigned long size
);
50 void (*_dma_cache_wback
)(unsigned long start
, unsigned long size
);
51 void (*_dma_cache_inv
)(unsigned long start
, unsigned long size
);
53 EXPORT_SYMBOL(_dma_cache_wback_inv
);
55 #endif /* CONFIG_DMA_NONCOHERENT */
58 * We could optimize the case where the cache argument is not BCACHE but
59 * that seems very atypical use ...
61 asmlinkage
int sys_cacheflush(unsigned long addr
,
62 unsigned long bytes
, unsigned int cache
)
66 if (!access_ok(VERIFY_WRITE
, (void __user
*) addr
, bytes
))
69 flush_icache_range(addr
, addr
+ bytes
);
74 void __flush_dcache_page(struct page
*page
)
76 struct address_space
*mapping
= page_mapping(page
);
79 if (PageHighMem(page
))
81 if (mapping
&& !mapping_mapped(mapping
)) {
82 SetPageDcacheDirty(page
);
87 * We could delay the flush for the !page_mapping case too. But that
88 * case is for exec env/arg pages and those are %99 certainly going to
89 * get faulted into the tlb (and thus flushed) anyways.
91 addr
= (unsigned long) page_address(page
);
92 flush_data_cache_page(addr
);
95 EXPORT_SYMBOL(__flush_dcache_page
);
97 void __flush_anon_page(struct page
*page
, unsigned long vmaddr
)
99 unsigned long addr
= (unsigned long) page_address(page
);
101 if (pages_do_alias(addr
, vmaddr
)) {
102 if (page_mapped(page
) && !Page_dcache_dirty(page
)) {
105 kaddr
= kmap_coherent(page
, vmaddr
);
106 flush_data_cache_page((unsigned long)kaddr
);
109 flush_data_cache_page(addr
);
113 EXPORT_SYMBOL(__flush_anon_page
);
115 void __update_cache(struct vm_area_struct
*vma
, unsigned long address
,
119 unsigned long pfn
, addr
;
120 int exec
= (vma
->vm_flags
& VM_EXEC
) && !cpu_has_ic_fills_f_dc
;
123 if (unlikely(!pfn_valid(pfn
)))
125 page
= pfn_to_page(pfn
);
126 if (page_mapping(page
) && Page_dcache_dirty(page
)) {
127 addr
= (unsigned long) page_address(page
);
128 if (exec
|| pages_do_alias(addr
, address
& PAGE_MASK
))
129 flush_data_cache_page(addr
);
130 ClearPageDcacheDirty(page
);
134 unsigned long _page_cachable_default
;
135 EXPORT_SYMBOL_GPL(_page_cachable_default
);
137 static inline void setup_protection_map(void)
139 protection_map
[0] = PAGE_NONE
;
140 protection_map
[1] = PAGE_READONLY
;
141 protection_map
[2] = PAGE_COPY
;
142 protection_map
[3] = PAGE_COPY
;
143 protection_map
[4] = PAGE_READONLY
;
144 protection_map
[5] = PAGE_READONLY
;
145 protection_map
[6] = PAGE_COPY
;
146 protection_map
[7] = PAGE_COPY
;
147 protection_map
[8] = PAGE_NONE
;
148 protection_map
[9] = PAGE_READONLY
;
149 protection_map
[10] = PAGE_SHARED
;
150 protection_map
[11] = PAGE_SHARED
;
151 protection_map
[12] = PAGE_READONLY
;
152 protection_map
[13] = PAGE_READONLY
;
153 protection_map
[14] = PAGE_SHARED
;
154 protection_map
[15] = PAGE_SHARED
;
157 void __devinit
cpu_cache_init(void)
159 if (cpu_has_3k_cache
) {
160 extern void __weak
r3k_cache_init(void);
164 if (cpu_has_6k_cache
) {
165 extern void __weak
r6k_cache_init(void);
169 if (cpu_has_4k_cache
) {
170 extern void __weak
r4k_cache_init(void);
174 if (cpu_has_8k_cache
) {
175 extern void __weak
r8k_cache_init(void);
179 if (cpu_has_tx39_cache
) {
180 extern void __weak
tx39_cache_init(void);
185 setup_protection_map();
188 int __weak
__uncached_access(struct file
*file
, unsigned long addr
)
190 if (file
->f_flags
& O_SYNC
)
193 return addr
>= __pa(high_memory
);