2 * linux/arch/arm/lib/copypage-armv4mc.S
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
16 #include <linux/init.h>
18 #include <linux/highmem.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
30 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
33 static DEFINE_SPINLOCK(minicache_lock
);
36 * ARMv4 mini-dcache optimised copy_user_highpage
38 * We flush the destination cache lines just before we write the data into the
39 * corresponding address. Since the Dcache is read-allocate, this removes the
40 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
41 * and merged as appropriate.
43 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
44 * instruction. If your processor does not supply this, you have to write your
45 * own copy_user_highpage that does the right thing.
47 static void __attribute__((naked
))
48 mc_copy_user_page(void *from
, void *to
)
51 "stmfd sp!, {r4, lr} @ 2\n\
53 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
54 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
55 stmia %1!, {r2, r3, ip, lr} @ 4\n\
56 ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\
57 stmia %1!, {r2, r3, ip, lr} @ 4\n\
58 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
59 mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
60 stmia %1!, {r2, r3, ip, lr} @ 4\n\
61 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
62 subs r4, r4, #1 @ 1\n\
63 stmia %1!, {r2, r3, ip, lr} @ 4\n\
64 ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
66 ldmfd sp!, {r4, pc} @ 3"
68 : "r" (from
), "r" (to
), "I" (PAGE_SIZE
/ 64));
71 void v4_mc_copy_user_highpage(struct page
*to
, struct page
*from
,
74 void *kto
= kmap_atomic(to
, KM_USER1
);
76 if (test_and_clear_bit(PG_dcache_dirty
, &from
->flags
))
77 __flush_dcache_page(page_mapping(from
), from
);
79 spin_lock(&minicache_lock
);
81 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from
), minicache_pgprot
), 0);
82 flush_tlb_kernel_page(0xffff8000);
84 mc_copy_user_page((void *)0xffff8000, kto
);
86 spin_unlock(&minicache_lock
);
88 kunmap_atomic(kto
, KM_USER1
);
92 * ARMv4 optimised clear_user_page
94 void v4_mc_clear_user_highpage(struct page
*page
, unsigned long vaddr
)
96 void *ptr
, *kaddr
= kmap_atomic(page
, KM_USER0
);
103 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
104 stmia %0!, {r2, r3, ip, lr} @ 4\n\
105 stmia %0!, {r2, r3, ip, lr} @ 4\n\
106 mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
107 stmia %0!, {r2, r3, ip, lr} @ 4\n\
108 stmia %0!, {r2, r3, ip, lr} @ 4\n\
109 subs r1, r1, #1 @ 1\n\
112 : "0" (kaddr
), "I" (PAGE_SIZE
/ 64)
113 : "r1", "r2", "r3", "ip", "lr");
114 kunmap_atomic(kaddr
, KM_USER0
);
117 struct cpu_user_fns v4_mc_user_fns __initdata
= {
118 .cpu_clear_user_highpage
= v4_mc_clear_user_highpage
,
119 .cpu_copy_user_highpage
= v4_mc_copy_user_highpage
,