Linux 5.7.7
[linux/fpc-iii.git] / arch / arm / mm / copypage-v4mc.c
bloba94bd08fdec25b31526c9669b7feaf9150727b36
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/lib/copypage-armv4mc.S
5 * Copyright (C) 1995-2005 Russell King
7 * This handles the mini data cache, as found on SA11x0 and XScale
8 * processors. When we copy a user page page, we map it in such a way
9 * that accesses to this page will not touch the main data cache, but
10 * will be cached in the mini data cache. This prevents us thrashing
11 * the main data cache on page faults.
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
17 #include <asm/pgtable.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
21 #include "mm.h"
23 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
24 L_PTE_MT_MINICACHE)
26 static DEFINE_RAW_SPINLOCK(minicache_lock);
29 * ARMv4 mini-dcache optimised copy_user_highpage
31 * We flush the destination cache lines just before we write the data into the
32 * corresponding address. Since the Dcache is read-allocate, this removes the
33 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
34 * and merged as appropriate.
36 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
37 * instruction. If your processor does not supply this, you have to write your
38 * own copy_user_highpage that does the right thing.
40 static void mc_copy_user_page(void *from, void *to)
42 int tmp;
44 asm volatile ("\
45 .syntax unified\n\
46 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
47 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
48 stmia %1!, {r2, r3, ip, lr} @ 4\n\
49 ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\
50 stmia %1!, {r2, r3, ip, lr} @ 4\n\
51 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
52 mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
53 stmia %1!, {r2, r3, ip, lr} @ 4\n\
54 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
55 subs %2, %2, #1 @ 1\n\
56 stmia %1!, {r2, r3, ip, lr} @ 4\n\
57 ldmiane %0!, {r2, r3, ip, lr} @ 4\n\
58 bne 1b @ "
59 : "+&r" (from), "+&r" (to), "=&r" (tmp)
60 : "2" (PAGE_SIZE / 64)
61 : "r2", "r3", "ip", "lr");
64 void v4_mc_copy_user_highpage(struct page *to, struct page *from,
65 unsigned long vaddr, struct vm_area_struct *vma)
67 void *kto = kmap_atomic(to);
69 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
70 __flush_dcache_page(page_mapping_file(from), from);
72 raw_spin_lock(&minicache_lock);
74 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
76 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
78 raw_spin_unlock(&minicache_lock);
80 kunmap_atomic(kto);
84 * ARMv4 optimised clear_user_page
86 void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
88 void *ptr, *kaddr = kmap_atomic(page);
89 asm volatile("\
90 mov r1, %2 @ 1\n\
91 mov r2, #0 @ 1\n\
92 mov r3, #0 @ 1\n\
93 mov ip, #0 @ 1\n\
94 mov lr, #0 @ 1\n\
95 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
96 stmia %0!, {r2, r3, ip, lr} @ 4\n\
97 stmia %0!, {r2, r3, ip, lr} @ 4\n\
98 mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
99 stmia %0!, {r2, r3, ip, lr} @ 4\n\
100 stmia %0!, {r2, r3, ip, lr} @ 4\n\
101 subs r1, r1, #1 @ 1\n\
102 bne 1b @ 1"
103 : "=r" (ptr)
104 : "0" (kaddr), "I" (PAGE_SIZE / 64)
105 : "r1", "r2", "r3", "ip", "lr");
106 kunmap_atomic(kaddr);
109 struct cpu_user_fns v4_mc_user_fns __initdata = {
110 .cpu_clear_user_highpage = v4_mc_clear_user_highpage,
111 .cpu_copy_user_highpage = v4_mc_copy_user_highpage,