2 * arch/xtensa/mm/cache.c
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2001-2006 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/bootmem.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
28 #include <asm/bootparam.h>
29 #include <asm/mmu_context.h>
31 #include <asm/tlbflush.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
36 //#define printd(x...) printk(x)
37 #define printd(x...) do { } while(0)
41 * The kernel provides one architecture bit PG_arch_1 in the page flags that
42 * can be used for cache coherency.
46 * The Xtensa architecture doesn't keep the instruction cache coherent with
47 * the data cache. We use the architecture bit to indicate if the caches
48 * are coherent. The kernel clears this bit whenever a page is added to the
49 * page cache. At that time, the caches might not be in sync. We, therefore,
50 * define this flag as 'clean' if set.
54 * With cache aliasing, we have to always flush the cache when pages are
55 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
62 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
63 static inline void kmap_invalidate_coherent(struct page
*page
,
66 if (!DCACHE_ALIAS_EQ(page_to_phys(page
), vaddr
)) {
69 if (!PageHighMem(page
)) {
70 kvaddr
= (unsigned long)page_to_virt(page
);
72 __invalidate_dcache_page(kvaddr
);
74 kvaddr
= TLBTEMP_BASE_1
+
75 (page_to_phys(page
) & DCACHE_ALIAS_MASK
);
77 __invalidate_dcache_page_alias(kvaddr
,
83 static inline void *coherent_kvaddr(struct page
*page
, unsigned long base
,
84 unsigned long vaddr
, unsigned long *paddr
)
86 if (PageHighMem(page
) || !DCACHE_ALIAS_EQ(page_to_phys(page
), vaddr
)) {
87 *paddr
= page_to_phys(page
);
88 return (void *)(base
+ (vaddr
& DCACHE_ALIAS_MASK
));
91 return page_to_virt(page
);
95 void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
98 void *kvaddr
= coherent_kvaddr(page
, TLBTEMP_BASE_1
, vaddr
, &paddr
);
101 kmap_invalidate_coherent(page
, vaddr
);
102 set_bit(PG_arch_1
, &page
->flags
);
103 clear_page_alias(kvaddr
, paddr
);
106 EXPORT_SYMBOL(clear_user_highpage
);
108 void copy_user_highpage(struct page
*dst
, struct page
*src
,
109 unsigned long vaddr
, struct vm_area_struct
*vma
)
111 unsigned long dst_paddr
, src_paddr
;
112 void *dst_vaddr
= coherent_kvaddr(dst
, TLBTEMP_BASE_1
, vaddr
,
114 void *src_vaddr
= coherent_kvaddr(src
, TLBTEMP_BASE_2
, vaddr
,
118 kmap_invalidate_coherent(dst
, vaddr
);
119 set_bit(PG_arch_1
, &dst
->flags
);
120 copy_page_alias(dst_vaddr
, src_vaddr
, dst_paddr
, src_paddr
);
123 EXPORT_SYMBOL(copy_user_highpage
);
126 * Any time the kernel writes to a user page cache page, or it is about to
127 * read from a page cache page this routine is called.
131 void flush_dcache_page(struct page
*page
)
133 struct address_space
*mapping
= page_mapping(page
);
136 * If we have a mapping but the page is not mapped to user-space
137 * yet, we simply mark this page dirty and defer flushing the
138 * caches until update_mmu().
141 if (mapping
&& !mapping_mapped(mapping
)) {
142 if (!test_bit(PG_arch_1
, &page
->flags
))
143 set_bit(PG_arch_1
, &page
->flags
);
148 unsigned long phys
= page_to_phys(page
);
149 unsigned long temp
= page
->index
<< PAGE_SHIFT
;
150 unsigned long alias
= !(DCACHE_ALIAS_EQ(temp
, phys
));
154 * Flush the page in kernel space and user space.
155 * Note that we can omit that step if aliasing is not
156 * an issue, but we do have to synchronize I$ and D$
157 * if we have a mapping.
160 if (!alias
&& !mapping
)
163 virt
= TLBTEMP_BASE_1
+ (phys
& DCACHE_ALIAS_MASK
);
164 __flush_invalidate_dcache_page_alias(virt
, phys
);
166 virt
= TLBTEMP_BASE_1
+ (temp
& DCACHE_ALIAS_MASK
);
169 __flush_invalidate_dcache_page_alias(virt
, phys
);
172 __invalidate_icache_page_alias(virt
, phys
);
175 /* There shouldn't be an entry in the cache for this page anymore. */
177 EXPORT_SYMBOL(flush_dcache_page
);
180 * For now, flush the whole cache. FIXME??
183 void local_flush_cache_range(struct vm_area_struct
*vma
,
184 unsigned long start
, unsigned long end
)
186 __flush_invalidate_dcache_all();
187 __invalidate_icache_all();
189 EXPORT_SYMBOL(local_flush_cache_range
);
192 * Remove any entry in the cache for this page.
194 * Note that this function is only called for user pages, so use the
195 * alias versions of the cache flush functions.
198 void local_flush_cache_page(struct vm_area_struct
*vma
, unsigned long address
,
201 /* Note that we have to use the 'alias' address to avoid multi-hit */
203 unsigned long phys
= page_to_phys(pfn_to_page(pfn
));
204 unsigned long virt
= TLBTEMP_BASE_1
+ (address
& DCACHE_ALIAS_MASK
);
206 __flush_invalidate_dcache_page_alias(virt
, phys
);
207 __invalidate_icache_page_alias(virt
, phys
);
209 EXPORT_SYMBOL(local_flush_cache_page
);
211 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
214 update_mmu_cache(struct vm_area_struct
* vma
, unsigned long addr
, pte_t
*ptep
)
216 unsigned long pfn
= pte_pfn(*ptep
);
222 page
= pfn_to_page(pfn
);
224 /* Invalidate old entry in TLBs */
226 flush_tlb_page(vma
, addr
);
228 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
230 if (!PageReserved(page
) && test_bit(PG_arch_1
, &page
->flags
)) {
231 unsigned long phys
= page_to_phys(page
);
234 tmp
= TLBTEMP_BASE_1
+ (phys
& DCACHE_ALIAS_MASK
);
235 __flush_invalidate_dcache_page_alias(tmp
, phys
);
236 tmp
= TLBTEMP_BASE_1
+ (addr
& DCACHE_ALIAS_MASK
);
237 __flush_invalidate_dcache_page_alias(tmp
, phys
);
238 __invalidate_icache_page_alias(tmp
, phys
);
240 clear_bit(PG_arch_1
, &page
->flags
);
243 if (!PageReserved(page
) && !test_bit(PG_arch_1
, &page
->flags
)
244 && (vma
->vm_flags
& VM_EXEC
) != 0) {
245 unsigned long paddr
= (unsigned long)kmap_atomic(page
);
246 __flush_dcache_page(paddr
);
247 __invalidate_icache_page(paddr
);
248 set_bit(PG_arch_1
, &page
->flags
);
249 kunmap_atomic((void *)paddr
);
255 * access_process_vm() has called get_user_pages(), which has done a
256 * flush_dcache_page() on the page.
259 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
261 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
262 unsigned long vaddr
, void *dst
, const void *src
,
265 unsigned long phys
= page_to_phys(page
);
266 unsigned long alias
= !(DCACHE_ALIAS_EQ(vaddr
, phys
));
268 /* Flush and invalidate user page if aliased. */
271 unsigned long t
= TLBTEMP_BASE_1
+ (vaddr
& DCACHE_ALIAS_MASK
);
272 __flush_invalidate_dcache_page_alias(t
, phys
);
277 memcpy(dst
, src
, len
);
280 * Flush and invalidate kernel page if aliased and synchronize
281 * data and instruction caches for executable pages.
285 unsigned long t
= TLBTEMP_BASE_1
+ (vaddr
& DCACHE_ALIAS_MASK
);
287 __flush_invalidate_dcache_range((unsigned long) dst
, len
);
288 if ((vma
->vm_flags
& VM_EXEC
) != 0)
289 __invalidate_icache_page_alias(t
, phys
);
291 } else if ((vma
->vm_flags
& VM_EXEC
) != 0) {
292 __flush_dcache_range((unsigned long)dst
,len
);
293 __invalidate_icache_range((unsigned long) dst
, len
);
297 extern void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
298 unsigned long vaddr
, void *dst
, const void *src
,
301 unsigned long phys
= page_to_phys(page
);
302 unsigned long alias
= !(DCACHE_ALIAS_EQ(vaddr
, phys
));
305 * Flush user page if aliased.
306 * (Note: a simply flush would be sufficient)
310 unsigned long t
= TLBTEMP_BASE_1
+ (vaddr
& DCACHE_ALIAS_MASK
);
311 __flush_invalidate_dcache_page_alias(t
, phys
);
314 memcpy(dst
, src
, len
);