x86: arch/x86/mm/init_32.c cleanup
[wrt350n-kernel.git] / arch / xtensa / mm / cache.c
blob9a1fa9478ae74090664466824e8ee495a88be9e5
1 /*
2 * arch/xtensa/mm/cache.c
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 2001-2006 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor
12 * Marc Gauthier
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/bootmem.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
28 #include <asm/pgtable.h>
29 #include <asm/bootparam.h>
30 #include <asm/mmu_context.h>
31 #include <asm/tlb.h>
32 #include <asm/tlbflush.h>
33 #include <asm/page.h>
34 #include <asm/pgalloc.h>
35 #include <asm/pgtable.h>
37 //#define printd(x...) printk(x)
38 #define printd(x...) do { } while(0)
40 /*
41 * Note:
42 * The kernel provides one architecture bit PG_arch_1 in the page flags that
43 * can be used for cache coherency.
45 * I$-D$ coherency.
47 * The Xtensa architecture doesn't keep the instruction cache coherent with
48 * the data cache. We use the architecture bit to indicate if the caches
49 * are coherent. The kernel clears this bit whenever a page is added to the
50 * page cache. At that time, the caches might not be in sync. We, therefore,
51 * define this flag as 'clean' if set.
53 * D-cache aliasing.
55 * With cache aliasing, we have to always flush the cache when pages are
56 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
57 * page.
63 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
66 * Any time the kernel writes to a user page cache page, or it is about to
67 * read from a page cache page this routine is called.
71 void flush_dcache_page(struct page *page)
73 struct address_space *mapping = page_mapping(page);
76 * If we have a mapping but the page is not mapped to user-space
77 * yet, we simply mark this page dirty and defer flushing the
78 * caches until update_mmu().
81 if (mapping && !mapping_mapped(mapping)) {
82 if (!test_bit(PG_arch_1, &page->flags))
83 set_bit(PG_arch_1, &page->flags);
84 return;
86 } else {
88 unsigned long phys = page_to_phys(page);
89 unsigned long temp = page->index << PAGE_SHIFT;
90 unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
91 unsigned long virt;
93 /*
94 * Flush the page in kernel space and user space.
95 * Note that we can omit that step if aliasing is not
96 * an issue, but we do have to synchronize I$ and D$
97 * if we have a mapping.
100 if (!alias && !mapping)
101 return;
103 __flush_invalidate_dcache_page((long)page_address(page));
105 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
107 if (alias)
108 __flush_invalidate_dcache_page_alias(virt, phys);
110 if (mapping)
111 __invalidate_icache_page_alias(virt, phys);
114 /* There shouldn't be an entry in the cache for this page anymore. */
119 * For now, flush the whole cache. FIXME??
122 void flush_cache_range(struct vm_area_struct* vma,
123 unsigned long start, unsigned long end)
125 __flush_invalidate_dcache_all();
126 __invalidate_icache_all();
130 * Remove any entry in the cache for this page.
132 * Note that this function is only called for user pages, so use the
133 * alias versions of the cache flush functions.
136 void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
137 unsigned long pfn)
139 /* Note that we have to use the 'alias' address to avoid multi-hit */
141 unsigned long phys = page_to_phys(pfn_to_page(pfn));
142 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
144 __flush_invalidate_dcache_page_alias(virt, phys);
145 __invalidate_icache_page_alias(virt, phys);
148 #endif
150 void
151 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
153 unsigned long pfn = pte_pfn(pte);
154 struct page *page;
156 if (!pfn_valid(pfn))
157 return;
159 page = pfn_to_page(pfn);
161 /* Invalidate old entry in TLBs */
163 invalidate_itlb_mapping(addr);
164 invalidate_dtlb_mapping(addr);
166 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
168 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
170 unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
171 unsigned long paddr = (unsigned long) page_address(page);
172 unsigned long phys = page_to_phys(page);
174 __flush_invalidate_dcache_page(paddr);
176 __flush_invalidate_dcache_page_alias(vaddr, phys);
177 __invalidate_icache_page_alias(vaddr, phys);
179 clear_bit(PG_arch_1, &page->flags);
181 #else
182 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
183 && (vma->vm_flags & VM_EXEC) != 0) {
184 unsigned long vaddr = addr & PAGE_MASK;
185 __flush_dcache_page(vaddr);
186 __invalidate_icache_page(vaddr);
187 set_bit(PG_arch_1, &page->flags);
189 #endif
193 * access_process_vm() has called get_user_pages(), which has done a
194 * flush_dcache_page() on the page.
197 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
199 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
200 unsigned long vaddr, void *dst, const void *src,
201 unsigned long len)
203 unsigned long phys = page_to_phys(page);
204 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
206 /* Flush and invalidate user page if aliased. */
208 if (alias) {
209 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
210 __flush_invalidate_dcache_page_alias(temp, phys);
213 /* Copy data */
215 memcpy(dst, src, len);
218 * Flush and invalidate kernel page if aliased and synchronize
219 * data and instruction caches for executable pages.
222 if (alias) {
223 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
225 __flush_invalidate_dcache_range((unsigned long) dst, len);
226 if ((vma->vm_flags & VM_EXEC) != 0) {
227 __invalidate_icache_page_alias(temp, phys);
230 } else if ((vma->vm_flags & VM_EXEC) != 0) {
231 __flush_dcache_range((unsigned long)dst,len);
232 __invalidate_icache_range((unsigned long) dst, len);
236 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
237 unsigned long vaddr, void *dst, const void *src,
238 unsigned long len)
240 unsigned long phys = page_to_phys(page);
241 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
244 * Flush user page if aliased.
245 * (Note: a simply flush would be sufficient)
248 if (alias) {
249 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
250 __flush_invalidate_dcache_page_alias(temp, phys);
253 memcpy(dst, src, len);
256 #endif