cgroups: fix compile warning
[linux-2.6/openmoko-kernel/knife-kernel.git] / arch / sh / mm / pg-sh4.c
blob8c7a9ca79879aef2de7de1f253a6e8c8b5969121
1 /*
2 * arch/sh/mm/pg-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Released under the terms of the GNU GPL v2.0.
8 */
9 #include <linux/mm.h>
10 #include <linux/mutex.h>
11 #include <linux/fs.h>
12 #include <linux/highmem.h>
13 #include <linux/module.h>
14 #include <asm/mmu_context.h>
15 #include <asm/cacheflush.h>
17 #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
19 static inline void *kmap_coherent(struct page *page, unsigned long addr)
21 enum fixed_addresses idx;
22 unsigned long vaddr, flags;
23 pte_t pte;
25 inc_preempt_count();
27 idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
28 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
29 pte = mk_pte(page, PAGE_KERNEL);
31 local_irq_save(flags);
32 flush_tlb_one(get_asid(), vaddr);
33 local_irq_restore(flags);
35 update_mmu_cache(NULL, vaddr, pte);
37 return (void *)vaddr;
40 static inline void kunmap_coherent(struct page *page)
42 dec_preempt_count();
43 preempt_check_resched();
47 * clear_user_page
48 * @to: P1 address
49 * @address: U0 address to be mapped
50 * @page: page (virt_to_page(to))
52 void clear_user_page(void *to, unsigned long address, struct page *page)
54 __set_bit(PG_mapped, &page->flags);
56 clear_page(to);
57 if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
58 __flush_wback_region(to, PAGE_SIZE);
61 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
62 unsigned long vaddr, void *dst, const void *src,
63 unsigned long len)
65 void *vto;
67 __set_bit(PG_mapped, &page->flags);
69 vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
70 memcpy(vto, src, len);
71 kunmap_coherent(vto);
73 if (vma->vm_flags & VM_EXEC)
74 flush_cache_page(vma, vaddr, page_to_pfn(page));
77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
78 unsigned long vaddr, void *dst, const void *src,
79 unsigned long len)
81 void *vfrom;
83 __set_bit(PG_mapped, &page->flags);
85 vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
86 memcpy(dst, vfrom, len);
87 kunmap_coherent(vfrom);
90 void copy_user_highpage(struct page *to, struct page *from,
91 unsigned long vaddr, struct vm_area_struct *vma)
93 void *vfrom, *vto;
95 __set_bit(PG_mapped, &to->flags);
97 vto = kmap_atomic(to, KM_USER1);
98 vfrom = kmap_coherent(from, vaddr);
99 copy_page(vto, vfrom);
100 kunmap_coherent(vfrom);
102 if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS))
103 __flush_wback_region(vto, PAGE_SIZE);
105 kunmap_atomic(vto, KM_USER1);
106 /* Make sure this page is cleared on other CPU's too before using it */
107 smp_wmb();
109 EXPORT_SYMBOL(copy_user_highpage);
112 * For SH-4, we have our own implementation for ptep_get_and_clear
114 inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
116 pte_t pte = *ptep;
118 pte_clear(mm, addr, ptep);
119 if (!pte_not_present(pte)) {
120 unsigned long pfn = pte_pfn(pte);
121 if (pfn_valid(pfn)) {
122 struct page *page = pfn_to_page(pfn);
123 struct address_space *mapping = page_mapping(page);
124 if (!mapping || !mapping_writably_mapped(mapping))
125 __clear_bit(PG_mapped, &page->flags);
128 return pte;