x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / arm / mm / copypage-xscale.c
blob0fb85025344d936734f0ff695ed21c2626414560
1 /*
2 * linux/arch/arm/lib/copypage-xscale.S
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/highmem.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
24 #include "mm.h"
26 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
27 L_PTE_MT_MINICACHE)
29 static DEFINE_RAW_SPINLOCK(minicache_lock);
32 * XScale mini-dcache optimised copy_user_highpage
34 * We flush the destination cache lines just before we write the data into the
35 * corresponding address. Since the Dcache is read-allocate, this removes the
36 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
37 * and merged as appropriate.
39 static void __naked
40 mc_copy_user_page(void *from, void *to)
43 * Strangely enough, best performance is achieved
44 * when prefetching destination as well. (NP)
46 asm volatile(
47 "stmfd sp!, {r4, r5, lr} \n\
48 mov lr, %2 \n\
49 pld [r0, #0] \n\
50 pld [r0, #32] \n\
51 pld [r1, #0] \n\
52 pld [r1, #32] \n\
53 1: pld [r0, #64] \n\
54 pld [r0, #96] \n\
55 pld [r1, #64] \n\
56 pld [r1, #96] \n\
57 2: ldrd r2, [r0], #8 \n\
58 ldrd r4, [r0], #8 \n\
59 mov ip, r1 \n\
60 strd r2, [r1], #8 \n\
61 ldrd r2, [r0], #8 \n\
62 strd r4, [r1], #8 \n\
63 ldrd r4, [r0], #8 \n\
64 strd r2, [r1], #8 \n\
65 strd r4, [r1], #8 \n\
66 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
67 ldrd r2, [r0], #8 \n\
68 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
69 ldrd r4, [r0], #8 \n\
70 mov ip, r1 \n\
71 strd r2, [r1], #8 \n\
72 ldrd r2, [r0], #8 \n\
73 strd r4, [r1], #8 \n\
74 ldrd r4, [r0], #8 \n\
75 strd r2, [r1], #8 \n\
76 strd r4, [r1], #8 \n\
77 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
78 subs lr, lr, #1 \n\
79 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
80 bgt 1b \n\
81 beq 2b \n\
82 ldmfd sp!, {r4, r5, pc} "
84 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
87 void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
88 unsigned long vaddr, struct vm_area_struct *vma)
90 void *kto = kmap_atomic(to);
92 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
93 __flush_dcache_page(page_mapping(from), from);
95 raw_spin_lock(&minicache_lock);
97 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
99 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
101 raw_spin_unlock(&minicache_lock);
103 kunmap_atomic(kto);
107 * XScale optimised clear_user_page
109 void
110 xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
112 void *ptr, *kaddr = kmap_atomic(page);
113 asm volatile(
114 "mov r1, %2 \n\
115 mov r2, #0 \n\
116 mov r3, #0 \n\
117 1: mov ip, %0 \n\
118 strd r2, [%0], #8 \n\
119 strd r2, [%0], #8 \n\
120 strd r2, [%0], #8 \n\
121 strd r2, [%0], #8 \n\
122 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
123 subs r1, r1, #1 \n\
124 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
125 bne 1b"
126 : "=r" (ptr)
127 : "0" (kaddr), "I" (PAGE_SIZE / 32)
128 : "r1", "r2", "r3", "ip");
129 kunmap_atomic(kaddr);
132 struct cpu_user_fns xscale_mc_user_fns __initdata = {
133 .cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
134 .cpu_copy_user_highpage = xscale_mc_copy_user_highpage,