2 * arch/xtensa/mm/cache.c
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2001-2006 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/memblock.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
28 #include <asm/bootparam.h>
29 #include <asm/mmu_context.h>
31 #include <asm/tlbflush.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
38 * The kernel provides one architecture bit PG_arch_1 in the page flags that
39 * can be used for cache coherency.
43 * The Xtensa architecture doesn't keep the instruction cache coherent with
44 * the data cache. We use the architecture bit to indicate if the caches
45 * are coherent. The kernel clears this bit whenever a page is added to the
46 * page cache. At that time, the caches might not be in sync. We, therefore,
47 * define this flag as 'clean' if set.
51 * With cache aliasing, we have to always flush the cache when pages are
52 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
59 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
60 static inline void kmap_invalidate_coherent(struct page
*page
,
63 if (!DCACHE_ALIAS_EQ(page_to_phys(page
), vaddr
)) {
66 if (!PageHighMem(page
)) {
67 kvaddr
= (unsigned long)page_to_virt(page
);
69 __invalidate_dcache_page(kvaddr
);
71 kvaddr
= TLBTEMP_BASE_1
+
72 (page_to_phys(page
) & DCACHE_ALIAS_MASK
);
74 __invalidate_dcache_page_alias(kvaddr
,
80 static inline void *coherent_kvaddr(struct page
*page
, unsigned long base
,
81 unsigned long vaddr
, unsigned long *paddr
)
83 if (PageHighMem(page
) || !DCACHE_ALIAS_EQ(page_to_phys(page
), vaddr
)) {
84 *paddr
= page_to_phys(page
);
85 return (void *)(base
+ (vaddr
& DCACHE_ALIAS_MASK
));
88 return page_to_virt(page
);
92 void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
95 void *kvaddr
= coherent_kvaddr(page
, TLBTEMP_BASE_1
, vaddr
, &paddr
);
98 kmap_invalidate_coherent(page
, vaddr
);
99 set_bit(PG_arch_1
, &page
->flags
);
100 clear_page_alias(kvaddr
, paddr
);
103 EXPORT_SYMBOL(clear_user_highpage
);
105 void copy_user_highpage(struct page
*dst
, struct page
*src
,
106 unsigned long vaddr
, struct vm_area_struct
*vma
)
108 unsigned long dst_paddr
, src_paddr
;
109 void *dst_vaddr
= coherent_kvaddr(dst
, TLBTEMP_BASE_1
, vaddr
,
111 void *src_vaddr
= coherent_kvaddr(src
, TLBTEMP_BASE_2
, vaddr
,
115 kmap_invalidate_coherent(dst
, vaddr
);
116 set_bit(PG_arch_1
, &dst
->flags
);
117 copy_page_alias(dst_vaddr
, src_vaddr
, dst_paddr
, src_paddr
);
120 EXPORT_SYMBOL(copy_user_highpage
);
123 * Any time the kernel writes to a user page cache page, or it is about to
124 * read from a page cache page this routine is called.
128 void flush_dcache_page(struct page
*page
)
130 struct address_space
*mapping
= page_mapping_file(page
);
133 * If we have a mapping but the page is not mapped to user-space
134 * yet, we simply mark this page dirty and defer flushing the
135 * caches until update_mmu().
138 if (mapping
&& !mapping_mapped(mapping
)) {
139 if (!test_bit(PG_arch_1
, &page
->flags
))
140 set_bit(PG_arch_1
, &page
->flags
);
145 unsigned long phys
= page_to_phys(page
);
146 unsigned long temp
= page
->index
<< PAGE_SHIFT
;
147 unsigned long alias
= !(DCACHE_ALIAS_EQ(temp
, phys
));
151 * Flush the page in kernel space and user space.
152 * Note that we can omit that step if aliasing is not
153 * an issue, but we do have to synchronize I$ and D$
154 * if we have a mapping.
157 if (!alias
&& !mapping
)
160 virt
= TLBTEMP_BASE_1
+ (phys
& DCACHE_ALIAS_MASK
);
161 __flush_invalidate_dcache_page_alias(virt
, phys
);
163 virt
= TLBTEMP_BASE_1
+ (temp
& DCACHE_ALIAS_MASK
);
166 __flush_invalidate_dcache_page_alias(virt
, phys
);
169 __invalidate_icache_page_alias(virt
, phys
);
172 /* There shouldn't be an entry in the cache for this page anymore. */
174 EXPORT_SYMBOL(flush_dcache_page
);
177 * For now, flush the whole cache. FIXME??
180 void local_flush_cache_range(struct vm_area_struct
*vma
,
181 unsigned long start
, unsigned long end
)
183 __flush_invalidate_dcache_all();
184 __invalidate_icache_all();
186 EXPORT_SYMBOL(local_flush_cache_range
);
189 * Remove any entry in the cache for this page.
191 * Note that this function is only called for user pages, so use the
192 * alias versions of the cache flush functions.
195 void local_flush_cache_page(struct vm_area_struct
*vma
, unsigned long address
,
198 /* Note that we have to use the 'alias' address to avoid multi-hit */
200 unsigned long phys
= page_to_phys(pfn_to_page(pfn
));
201 unsigned long virt
= TLBTEMP_BASE_1
+ (address
& DCACHE_ALIAS_MASK
);
203 __flush_invalidate_dcache_page_alias(virt
, phys
);
204 __invalidate_icache_page_alias(virt
, phys
);
206 EXPORT_SYMBOL(local_flush_cache_page
);
208 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
211 update_mmu_cache(struct vm_area_struct
* vma
, unsigned long addr
, pte_t
*ptep
)
213 unsigned long pfn
= pte_pfn(*ptep
);
219 page
= pfn_to_page(pfn
);
221 /* Invalidate old entry in TLBs */
223 flush_tlb_page(vma
, addr
);
225 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
227 if (!PageReserved(page
) && test_bit(PG_arch_1
, &page
->flags
)) {
228 unsigned long phys
= page_to_phys(page
);
231 tmp
= TLBTEMP_BASE_1
+ (phys
& DCACHE_ALIAS_MASK
);
232 __flush_invalidate_dcache_page_alias(tmp
, phys
);
233 tmp
= TLBTEMP_BASE_1
+ (addr
& DCACHE_ALIAS_MASK
);
234 __flush_invalidate_dcache_page_alias(tmp
, phys
);
235 __invalidate_icache_page_alias(tmp
, phys
);
237 clear_bit(PG_arch_1
, &page
->flags
);
240 if (!PageReserved(page
) && !test_bit(PG_arch_1
, &page
->flags
)
241 && (vma
->vm_flags
& VM_EXEC
) != 0) {
242 unsigned long paddr
= (unsigned long)kmap_atomic(page
);
243 __flush_dcache_page(paddr
);
244 __invalidate_icache_page(paddr
);
245 set_bit(PG_arch_1
, &page
->flags
);
246 kunmap_atomic((void *)paddr
);
252 * access_process_vm() has called get_user_pages(), which has done a
253 * flush_dcache_page() on the page.
256 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
258 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
259 unsigned long vaddr
, void *dst
, const void *src
,
262 unsigned long phys
= page_to_phys(page
);
263 unsigned long alias
= !(DCACHE_ALIAS_EQ(vaddr
, phys
));
265 /* Flush and invalidate user page if aliased. */
268 unsigned long t
= TLBTEMP_BASE_1
+ (vaddr
& DCACHE_ALIAS_MASK
);
269 __flush_invalidate_dcache_page_alias(t
, phys
);
274 memcpy(dst
, src
, len
);
277 * Flush and invalidate kernel page if aliased and synchronize
278 * data and instruction caches for executable pages.
282 unsigned long t
= TLBTEMP_BASE_1
+ (vaddr
& DCACHE_ALIAS_MASK
);
284 __flush_invalidate_dcache_range((unsigned long) dst
, len
);
285 if ((vma
->vm_flags
& VM_EXEC
) != 0)
286 __invalidate_icache_page_alias(t
, phys
);
288 } else if ((vma
->vm_flags
& VM_EXEC
) != 0) {
289 __flush_dcache_range((unsigned long)dst
,len
);
290 __invalidate_icache_range((unsigned long) dst
, len
);
294 extern void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
295 unsigned long vaddr
, void *dst
, const void *src
,
298 unsigned long phys
= page_to_phys(page
);
299 unsigned long alias
= !(DCACHE_ALIAS_EQ(vaddr
, phys
));
302 * Flush user page if aliased.
303 * (Note: a simply flush would be sufficient)
307 unsigned long t
= TLBTEMP_BASE_1
+ (vaddr
& DCACHE_ALIAS_MASK
);
308 __flush_invalidate_dcache_page_alias(t
, phys
);
311 memcpy(dst
, src
, len
);