2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2009, Wind River Systems Inc
7 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
10 #include <linux/export.h>
11 #include <linux/sched.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpuinfo.h>
18 static void __flush_dcache(unsigned long start
, unsigned long end
)
22 start
&= ~(cpuinfo
.dcache_line_size
- 1);
23 end
+= (cpuinfo
.dcache_line_size
- 1);
24 end
&= ~(cpuinfo
.dcache_line_size
- 1);
26 for (addr
= start
; addr
< end
; addr
+= cpuinfo
.dcache_line_size
) {
27 __asm__
__volatile__ (" flushda 0(%0)\n"
29 : /* Inputs */ "r"(addr
)
34 static void __flush_dcache_all(unsigned long start
, unsigned long end
)
38 start
&= ~(cpuinfo
.dcache_line_size
- 1);
39 end
+= (cpuinfo
.dcache_line_size
- 1);
40 end
&= ~(cpuinfo
.dcache_line_size
- 1);
42 if (end
> start
+ cpuinfo
.dcache_size
)
43 end
= start
+ cpuinfo
.dcache_size
;
45 for (addr
= start
; addr
< end
; addr
+= cpuinfo
.dcache_line_size
) {
46 __asm__
__volatile__ (" flushd 0(%0)\n"
48 : /* Inputs */ "r"(addr
)
53 static void __invalidate_dcache(unsigned long start
, unsigned long end
)
57 start
&= ~(cpuinfo
.dcache_line_size
- 1);
58 end
+= (cpuinfo
.dcache_line_size
- 1);
59 end
&= ~(cpuinfo
.dcache_line_size
- 1);
61 for (addr
= start
; addr
< end
; addr
+= cpuinfo
.dcache_line_size
) {
62 __asm__
__volatile__ (" initda 0(%0)\n"
64 : /* Inputs */ "r"(addr
)
69 static void __flush_icache(unsigned long start
, unsigned long end
)
73 start
&= ~(cpuinfo
.icache_line_size
- 1);
74 end
+= (cpuinfo
.icache_line_size
- 1);
75 end
&= ~(cpuinfo
.icache_line_size
- 1);
77 if (end
> start
+ cpuinfo
.icache_size
)
78 end
= start
+ cpuinfo
.icache_size
;
80 for (addr
= start
; addr
< end
; addr
+= cpuinfo
.icache_line_size
) {
81 __asm__
__volatile__ (" flushi %0\n"
83 : /* Inputs */ "r"(addr
)
86 __asm__
__volatile(" flushp\n");
89 static void flush_aliases(struct address_space
*mapping
, struct page
*page
)
91 struct mm_struct
*mm
= current
->active_mm
;
92 struct vm_area_struct
*mpnt
;
97 flush_dcache_mmap_lock(mapping
);
98 vma_interval_tree_foreach(mpnt
, &mapping
->i_mmap
, pgoff
, pgoff
) {
101 if (mpnt
->vm_mm
!= mm
)
103 if (!(mpnt
->vm_flags
& VM_MAYSHARE
))
106 offset
= (pgoff
- mpnt
->vm_pgoff
) << PAGE_SHIFT
;
107 flush_cache_page(mpnt
, mpnt
->vm_start
+ offset
,
110 flush_dcache_mmap_unlock(mapping
);
113 void flush_cache_all(void)
115 __flush_dcache_all(0, cpuinfo
.dcache_size
);
116 __flush_icache(0, cpuinfo
.icache_size
);
119 void flush_cache_mm(struct mm_struct
*mm
)
124 void flush_cache_dup_mm(struct mm_struct
*mm
)
129 void flush_icache_range(unsigned long start
, unsigned long end
)
131 __flush_dcache(start
, end
);
132 __flush_icache(start
, end
);
135 void flush_dcache_range(unsigned long start
, unsigned long end
)
137 __flush_dcache(start
, end
);
138 __flush_icache(start
, end
);
140 EXPORT_SYMBOL(flush_dcache_range
);
142 void invalidate_dcache_range(unsigned long start
, unsigned long end
)
144 __invalidate_dcache(start
, end
);
146 EXPORT_SYMBOL(invalidate_dcache_range
);
148 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
151 __flush_dcache(start
, end
);
152 if (vma
== NULL
|| (vma
->vm_flags
& VM_EXEC
))
153 __flush_icache(start
, end
);
156 void flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
158 unsigned long start
= (unsigned long) page_address(page
);
159 unsigned long end
= start
+ PAGE_SIZE
;
161 __flush_dcache(start
, end
);
162 __flush_icache(start
, end
);
165 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
,
168 unsigned long start
= vmaddr
;
169 unsigned long end
= start
+ PAGE_SIZE
;
171 __flush_dcache(start
, end
);
172 if (vma
->vm_flags
& VM_EXEC
)
173 __flush_icache(start
, end
);
176 void __flush_dcache_page(struct address_space
*mapping
, struct page
*page
)
179 * Writeback any data associated with the kernel mapping of this
180 * page. This ensures that data in the physical page is mutually
181 * coherent with the kernels mapping.
183 unsigned long start
= (unsigned long)page_address(page
);
185 __flush_dcache_all(start
, start
+ PAGE_SIZE
);
188 void flush_dcache_page(struct page
*page
)
190 struct address_space
*mapping
;
193 * The zero page is never written to, so never has any dirty
194 * cache lines, and therefore never needs to be flushed.
196 if (page
== ZERO_PAGE(0))
199 mapping
= page_mapping(page
);
201 /* Flush this page if there are aliases. */
202 if (mapping
&& !mapping_mapped(mapping
)) {
203 clear_bit(PG_dcache_clean
, &page
->flags
);
205 __flush_dcache_page(mapping
, page
);
207 unsigned long start
= (unsigned long)page_address(page
);
208 flush_aliases(mapping
, page
);
209 flush_icache_range(start
, start
+ PAGE_SIZE
);
211 set_bit(PG_dcache_clean
, &page
->flags
);
214 EXPORT_SYMBOL(flush_dcache_page
);
216 void update_mmu_cache(struct vm_area_struct
*vma
,
217 unsigned long address
, pte_t
*pte
)
219 unsigned long pfn
= pte_pfn(*pte
);
221 struct address_space
*mapping
;
227 * The zero page is never written to, so never has any dirty
228 * cache lines, and therefore never needs to be flushed.
230 page
= pfn_to_page(pfn
);
231 if (page
== ZERO_PAGE(0))
234 mapping
= page_mapping(page
);
235 if (!test_and_set_bit(PG_dcache_clean
, &page
->flags
))
236 __flush_dcache_page(mapping
, page
);
240 flush_aliases(mapping
, page
);
241 if (vma
->vm_flags
& VM_EXEC
)
242 flush_icache_page(vma
, page
);
246 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
249 __flush_dcache(vaddr
, vaddr
+ PAGE_SIZE
);
250 __flush_icache(vaddr
, vaddr
+ PAGE_SIZE
);
251 copy_page(vto
, vfrom
);
252 __flush_dcache((unsigned long)vto
, (unsigned long)vto
+ PAGE_SIZE
);
253 __flush_icache((unsigned long)vto
, (unsigned long)vto
+ PAGE_SIZE
);
256 void clear_user_page(void *addr
, unsigned long vaddr
, struct page
*page
)
258 __flush_dcache(vaddr
, vaddr
+ PAGE_SIZE
);
259 __flush_icache(vaddr
, vaddr
+ PAGE_SIZE
);
261 __flush_dcache((unsigned long)addr
, (unsigned long)addr
+ PAGE_SIZE
);
262 __flush_icache((unsigned long)addr
, (unsigned long)addr
+ PAGE_SIZE
);
265 void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
266 unsigned long user_vaddr
,
267 void *dst
, void *src
, int len
)
269 flush_cache_page(vma
, user_vaddr
, page_to_pfn(page
));
270 memcpy(dst
, src
, len
);
271 __flush_dcache_all((unsigned long)src
, (unsigned long)src
+ len
);
272 if (vma
->vm_flags
& VM_EXEC
)
273 __flush_icache((unsigned long)src
, (unsigned long)src
+ len
);
276 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
277 unsigned long user_vaddr
,
278 void *dst
, void *src
, int len
)
280 flush_cache_page(vma
, user_vaddr
, page_to_pfn(page
));
281 memcpy(dst
, src
, len
);
282 __flush_dcache_all((unsigned long)dst
, (unsigned long)dst
+ len
);
283 if (vma
->vm_flags
& VM_EXEC
)
284 __flush_icache((unsigned long)dst
, (unsigned long)dst
+ len
);