WIP FPC-III support
[linux/fpc-iii.git] / arch / csky / mm / tlb.c
blobed151238111228ca6d1c851a895c9413e5be1dd1
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/init.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
9 #include <asm/mmu_context.h>
10 #include <asm/setup.h>
13 * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
14 * 1VPN -> 2PFN
16 #define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
17 #define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
19 void flush_tlb_all(void)
21 tlb_invalid_all();
24 void flush_tlb_mm(struct mm_struct *mm)
26 #ifdef CONFIG_CPU_HAS_TLBI
27 asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
28 #else
29 tlb_invalid_all();
30 #endif
34 * MMU operation regs only could invalid tlb entry in jtlb and we
35 * need change asid field to invalid I-utlb & D-utlb.
37 #ifndef CONFIG_CPU_HAS_TLBI
38 #define restore_asid_inv_utlb(oldpid, newpid) \
39 do { \
40 if (oldpid == newpid) \
41 write_mmu_entryhi(oldpid + 1); \
42 write_mmu_entryhi(oldpid); \
43 } while (0)
44 #endif
46 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
47 unsigned long end)
49 unsigned long newpid = cpu_asid(vma->vm_mm);
51 start &= TLB_ENTRY_SIZE_MASK;
52 end += TLB_ENTRY_SIZE - 1;
53 end &= TLB_ENTRY_SIZE_MASK;
55 #ifdef CONFIG_CPU_HAS_TLBI
56 while (start < end) {
57 asm volatile("tlbi.vas %0"::"r"(start | newpid));
58 start += 2*PAGE_SIZE;
60 sync_is();
61 #else
63 unsigned long flags, oldpid;
65 local_irq_save(flags);
66 oldpid = read_mmu_entryhi() & ASID_MASK;
67 while (start < end) {
68 int idx;
70 write_mmu_entryhi(start | newpid);
71 start += 2*PAGE_SIZE;
72 tlb_probe();
73 idx = read_mmu_index();
74 if (idx >= 0)
75 tlb_invalid_indexed();
77 restore_asid_inv_utlb(oldpid, newpid);
78 local_irq_restore(flags);
80 #endif
83 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
85 start &= TLB_ENTRY_SIZE_MASK;
86 end += TLB_ENTRY_SIZE - 1;
87 end &= TLB_ENTRY_SIZE_MASK;
89 #ifdef CONFIG_CPU_HAS_TLBI
90 while (start < end) {
91 asm volatile("tlbi.vaas %0"::"r"(start));
92 start += 2*PAGE_SIZE;
94 sync_is();
95 #else
97 unsigned long flags, oldpid;
99 local_irq_save(flags);
100 oldpid = read_mmu_entryhi() & ASID_MASK;
101 while (start < end) {
102 int idx;
104 write_mmu_entryhi(start | oldpid);
105 start += 2*PAGE_SIZE;
106 tlb_probe();
107 idx = read_mmu_index();
108 if (idx >= 0)
109 tlb_invalid_indexed();
111 restore_asid_inv_utlb(oldpid, oldpid);
112 local_irq_restore(flags);
114 #endif
117 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
119 int newpid = cpu_asid(vma->vm_mm);
121 addr &= TLB_ENTRY_SIZE_MASK;
123 #ifdef CONFIG_CPU_HAS_TLBI
124 asm volatile("tlbi.vas %0"::"r"(addr | newpid));
125 sync_is();
126 #else
128 int oldpid, idx;
129 unsigned long flags;
131 local_irq_save(flags);
132 oldpid = read_mmu_entryhi() & ASID_MASK;
133 write_mmu_entryhi(addr | newpid);
134 tlb_probe();
135 idx = read_mmu_index();
136 if (idx >= 0)
137 tlb_invalid_indexed();
139 restore_asid_inv_utlb(oldpid, newpid);
140 local_irq_restore(flags);
142 #endif
145 void flush_tlb_one(unsigned long addr)
147 addr &= TLB_ENTRY_SIZE_MASK;
149 #ifdef CONFIG_CPU_HAS_TLBI
150 asm volatile("tlbi.vaas %0"::"r"(addr));
151 sync_is();
152 #else
154 int oldpid, idx;
155 unsigned long flags;
157 local_irq_save(flags);
158 oldpid = read_mmu_entryhi() & ASID_MASK;
159 write_mmu_entryhi(addr | oldpid);
160 tlb_probe();
161 idx = read_mmu_index();
162 if (idx >= 0)
163 tlb_invalid_indexed();
165 restore_asid_inv_utlb(oldpid, oldpid);
166 local_irq_restore(flags);
168 #endif
170 EXPORT_SYMBOL(flush_tlb_one);