x86: use _ASM_EXTABLE macro in arch/x86/lib/usercopy_32.c
[wrt350n-kernel.git] / arch / arm / mm / copypage-xscale.c
blob2e455f82a4d50fbdc390100f6cbcf77c11e1ac4b
1 /*
2 * linux/arch/arm/lib/copypage-xscale.S
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
16 #include <linux/init.h>
17 #include <linux/mm.h>
19 #include <asm/page.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
24 #include "mm.h"
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
30 #define COPYPAGE_MINICACHE 0xffff8000
32 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
33 L_PTE_CACHEABLE)
35 static DEFINE_SPINLOCK(minicache_lock);
38 * XScale mini-dcache optimised copy_user_page
40 * We flush the destination cache lines just before we write the data into the
41 * corresponding address. Since the Dcache is read-allocate, this removes the
42 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
43 * and merged as appropriate.
45 static void __attribute__((naked))
46 mc_copy_user_page(void *from, void *to)
49 * Strangely enough, best performance is achieved
50 * when prefetching destination as well. (NP)
52 asm volatile(
53 "stmfd sp!, {r4, r5, lr} \n\
54 mov lr, %2 \n\
55 pld [r0, #0] \n\
56 pld [r0, #32] \n\
57 pld [r1, #0] \n\
58 pld [r1, #32] \n\
59 1: pld [r0, #64] \n\
60 pld [r0, #96] \n\
61 pld [r1, #64] \n\
62 pld [r1, #96] \n\
63 2: ldrd r2, [r0], #8 \n\
64 ldrd r4, [r0], #8 \n\
65 mov ip, r1 \n\
66 strd r2, [r1], #8 \n\
67 ldrd r2, [r0], #8 \n\
68 strd r4, [r1], #8 \n\
69 ldrd r4, [r0], #8 \n\
70 strd r2, [r1], #8 \n\
71 strd r4, [r1], #8 \n\
72 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
73 ldrd r2, [r0], #8 \n\
74 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
75 ldrd r4, [r0], #8 \n\
76 mov ip, r1 \n\
77 strd r2, [r1], #8 \n\
78 ldrd r2, [r0], #8 \n\
79 strd r4, [r1], #8 \n\
80 ldrd r4, [r0], #8 \n\
81 strd r2, [r1], #8 \n\
82 strd r4, [r1], #8 \n\
83 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
84 subs lr, lr, #1 \n\
85 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
86 bgt 1b \n\
87 beq 2b \n\
88 ldmfd sp!, {r4, r5, pc} "
90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
93 void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
95 struct page *page = virt_to_page(kfrom);
97 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
98 __flush_dcache_page(page_mapping(page), page);
100 spin_lock(&minicache_lock);
102 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
103 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
105 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
107 spin_unlock(&minicache_lock);
111 * XScale optimised clear_user_page
113 void __attribute__((naked))
114 xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
116 asm volatile(
117 "mov r1, %0 \n\
118 mov r2, #0 \n\
119 mov r3, #0 \n\
120 1: mov ip, r0 \n\
121 strd r2, [r0], #8 \n\
122 strd r2, [r0], #8 \n\
123 strd r2, [r0], #8 \n\
124 strd r2, [r0], #8 \n\
125 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
126 subs r1, r1, #1 \n\
127 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
128 bne 1b \n\
129 mov pc, lr"
131 : "I" (PAGE_SIZE / 32));
134 struct cpu_user_fns xscale_mc_user_fns __initdata = {
135 .cpu_clear_user_page = xscale_mc_clear_user_page,
136 .cpu_copy_user_page = xscale_mc_copy_user_page,