Linux 5.7.6
[linux/fpc-iii.git] / arch / csky / mm / tlb.c
blobeb3ba6c9c92790ebfc9cf1a7ea0bb69892fc72bb
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/init.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
9 #include <asm/mmu_context.h>
10 #include <asm/pgtable.h>
11 #include <asm/setup.h>
14 * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
15 * 1VPN -> 2PFN
17 #define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
18 #define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
20 void flush_tlb_all(void)
22 tlb_invalid_all();
25 void flush_tlb_mm(struct mm_struct *mm)
27 #ifdef CONFIG_CPU_HAS_TLBI
28 asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
29 #else
30 tlb_invalid_all();
31 #endif
35 * MMU operation regs only could invalid tlb entry in jtlb and we
36 * need change asid field to invalid I-utlb & D-utlb.
38 #ifndef CONFIG_CPU_HAS_TLBI
39 #define restore_asid_inv_utlb(oldpid, newpid) \
40 do { \
41 if (oldpid == newpid) \
42 write_mmu_entryhi(oldpid + 1); \
43 write_mmu_entryhi(oldpid); \
44 } while (0)
45 #endif
47 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
48 unsigned long end)
50 unsigned long newpid = cpu_asid(vma->vm_mm);
52 start &= TLB_ENTRY_SIZE_MASK;
53 end += TLB_ENTRY_SIZE - 1;
54 end &= TLB_ENTRY_SIZE_MASK;
56 #ifdef CONFIG_CPU_HAS_TLBI
57 while (start < end) {
58 asm volatile("tlbi.vas %0"::"r"(start | newpid));
59 start += 2*PAGE_SIZE;
61 sync_is();
62 #else
64 unsigned long flags, oldpid;
66 local_irq_save(flags);
67 oldpid = read_mmu_entryhi() & ASID_MASK;
68 while (start < end) {
69 int idx;
71 write_mmu_entryhi(start | newpid);
72 start += 2*PAGE_SIZE;
73 tlb_probe();
74 idx = read_mmu_index();
75 if (idx >= 0)
76 tlb_invalid_indexed();
78 restore_asid_inv_utlb(oldpid, newpid);
79 local_irq_restore(flags);
81 #endif
84 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
86 start &= TLB_ENTRY_SIZE_MASK;
87 end += TLB_ENTRY_SIZE - 1;
88 end &= TLB_ENTRY_SIZE_MASK;
90 #ifdef CONFIG_CPU_HAS_TLBI
91 while (start < end) {
92 asm volatile("tlbi.vaas %0"::"r"(start));
93 start += 2*PAGE_SIZE;
95 sync_is();
96 #else
98 unsigned long flags, oldpid;
100 local_irq_save(flags);
101 oldpid = read_mmu_entryhi() & ASID_MASK;
102 while (start < end) {
103 int idx;
105 write_mmu_entryhi(start | oldpid);
106 start += 2*PAGE_SIZE;
107 tlb_probe();
108 idx = read_mmu_index();
109 if (idx >= 0)
110 tlb_invalid_indexed();
112 restore_asid_inv_utlb(oldpid, oldpid);
113 local_irq_restore(flags);
115 #endif
118 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
120 int newpid = cpu_asid(vma->vm_mm);
122 addr &= TLB_ENTRY_SIZE_MASK;
124 #ifdef CONFIG_CPU_HAS_TLBI
125 asm volatile("tlbi.vas %0"::"r"(addr | newpid));
126 sync_is();
127 #else
129 int oldpid, idx;
130 unsigned long flags;
132 local_irq_save(flags);
133 oldpid = read_mmu_entryhi() & ASID_MASK;
134 write_mmu_entryhi(addr | newpid);
135 tlb_probe();
136 idx = read_mmu_index();
137 if (idx >= 0)
138 tlb_invalid_indexed();
140 restore_asid_inv_utlb(oldpid, newpid);
141 local_irq_restore(flags);
143 #endif
146 void flush_tlb_one(unsigned long addr)
148 addr &= TLB_ENTRY_SIZE_MASK;
150 #ifdef CONFIG_CPU_HAS_TLBI
151 asm volatile("tlbi.vaas %0"::"r"(addr));
152 sync_is();
153 #else
155 int oldpid, idx;
156 unsigned long flags;
158 local_irq_save(flags);
159 oldpid = read_mmu_entryhi() & ASID_MASK;
160 write_mmu_entryhi(addr | oldpid);
161 tlb_probe();
162 idx = read_mmu_index();
163 if (idx >= 0)
164 tlb_invalid_indexed();
166 restore_asid_inv_utlb(oldpid, oldpid);
167 local_irq_restore(flags);
169 #endif
171 EXPORT_SYMBOL(flush_tlb_one);