Initial commit
[wrt350n-kernel.git] / arch / sh / mm / pg-sh7705.c
bloba4b015f95a3ad379b69e9f2bffe13bcce3aa1450
1 /*
2 * arch/sh/mm/pg-sh7705.c
4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2004 Alex Song
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
13 #include <linux/init.h>
14 #include <linux/mman.h>
15 #include <linux/mm.h>
16 #include <linux/threads.h>
17 #include <asm/addrspace.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/processor.h>
21 #include <asm/cache.h>
22 #include <asm/io.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
25 #include <asm/mmu_context.h>
26 #include <asm/cacheflush.h>
28 static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
30 unsigned long v;
31 unsigned long begin, end;
32 unsigned long p1_begin;
35 begin = L1_CACHE_ALIGN((unsigned long)virt);
36 end = L1_CACHE_ALIGN((unsigned long)virt + size);
38 p1_begin = (unsigned long)p1 & ~(L1_CACHE_BYTES - 1);
40 /* do this the slow way as we may not have TLB entries
41 * for virt yet. */
42 for (v = begin; v < end; v += L1_CACHE_BYTES) {
43 unsigned long p;
44 unsigned long ways, addr;
46 p = __pa(p1_begin);
48 ways = current_cpu_data.dcache.ways;
49 addr = CACHE_OC_ADDRESS_ARRAY;
51 do {
52 unsigned long data;
54 addr |= (v & current_cpu_data.dcache.entry_mask);
56 data = ctrl_inl(addr);
57 if ((data & CACHE_PHYSADDR_MASK) ==
58 (p & CACHE_PHYSADDR_MASK)) {
59 data &= ~(SH_CACHE_UPDATED|SH_CACHE_VALID);
60 ctrl_outl(data, addr);
63 addr += current_cpu_data.dcache.way_incr;
64 } while (--ways);
66 p1_begin += L1_CACHE_BYTES;
71 * clear_user_page
72 * @to: P1 address
73 * @address: U0 address to be mapped
75 void clear_user_page(void *to, unsigned long address, struct page *pg)
77 struct page *page = virt_to_page(to);
79 __set_bit(PG_mapped, &page->flags);
80 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
81 clear_page(to);
82 __flush_wback_region(to, PAGE_SIZE);
83 } else {
84 __flush_purge_virtual_region(to,
85 (void *)(address & 0xfffff000),
86 PAGE_SIZE);
87 clear_page(to);
88 __flush_wback_region(to, PAGE_SIZE);
93 * copy_user_page
94 * @to: P1 address
95 * @from: P1 address
96 * @address: U0 address to be mapped
98 void copy_user_page(void *to, void *from, unsigned long address, struct page *pg)
100 struct page *page = virt_to_page(to);
103 __set_bit(PG_mapped, &page->flags);
104 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
105 copy_page(to, from);
106 __flush_wback_region(to, PAGE_SIZE);
107 } else {
108 __flush_purge_virtual_region(to,
109 (void *)(address & 0xfffff000),
110 PAGE_SIZE);
111 copy_page(to, from);
112 __flush_wback_region(to, PAGE_SIZE);
117 * For SH7705, we have our own implementation for ptep_get_and_clear
118 * Copied from pg-sh4.c
120 inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
122 pte_t pte = *ptep;
124 pte_clear(mm, addr, ptep);
125 if (!pte_not_present(pte)) {
126 unsigned long pfn = pte_pfn(pte);
127 if (pfn_valid(pfn)) {
128 struct page *page = pfn_to_page(pfn);
129 struct address_space *mapping = page_mapping(page);
130 if (!mapping || !mapping_writably_mapped(mapping))
131 __clear_bit(PG_mapped, &page->flags);
135 return pte;