drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / mips / mm / tlb-r3k.c
blob173f7b36033bd222f542b3277f80435c8d6ea6c2
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * r2300.c: R2000 and R3000 specific mmu/cache code.
5 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * with a lot of changes to make this thing work for R3000s
8 * Tx39XX R4k style caches added. HK
9 * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
10 * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
11 * Copyright (C) 2002 Ralf Baechle
12 * Copyright (C) 2002 Maciej W. Rozycki
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/mm.h>
19 #include <asm/page.h>
20 #include <asm/mmu_context.h>
21 #include <asm/tlbmisc.h>
22 #include <asm/isadep.h>
23 #include <asm/io.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cpu.h>
26 #include <asm/setup.h>
27 #include <asm/tlbex.h>
29 #undef DEBUG_TLB
31 /* CP0 hazard avoidance. */
32 #define BARRIER \
33 __asm__ __volatile__( \
34 ".set push\n\t" \
35 ".set noreorder\n\t" \
36 "nop\n\t" \
37 ".set pop\n\t")
39 /* TLB operations. */
40 static void local_flush_tlb_from(int entry)
42 unsigned long old_ctx;
44 old_ctx = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
45 write_c0_entrylo0(0);
46 while (entry < current_cpu_data.tlbsize) {
47 write_c0_index(entry << 8);
48 write_c0_entryhi((entry | 0x80000) << 12);
49 entry++; /* BARRIER */
50 tlb_write_indexed();
52 write_c0_entryhi(old_ctx);
55 void local_flush_tlb_all(void)
57 unsigned long flags;
59 #ifdef DEBUG_TLB
60 printk("[tlball]");
61 #endif
62 local_irq_save(flags);
63 local_flush_tlb_from(8);
64 local_irq_restore(flags);
67 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
68 unsigned long end)
70 unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
71 struct mm_struct *mm = vma->vm_mm;
72 int cpu = smp_processor_id();
74 if (cpu_context(cpu, mm) != 0) {
75 unsigned long size, flags;
77 #ifdef DEBUG_TLB
78 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
79 cpu_context(cpu, mm) & asid_mask, start, end);
80 #endif
81 local_irq_save(flags);
82 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
83 if (size <= current_cpu_data.tlbsize) {
84 int oldpid = read_c0_entryhi() & asid_mask;
85 int newpid = cpu_context(cpu, mm) & asid_mask;
87 start &= PAGE_MASK;
88 end += PAGE_SIZE - 1;
89 end &= PAGE_MASK;
90 while (start < end) {
91 int idx;
93 write_c0_entryhi(start | newpid);
94 start += PAGE_SIZE; /* BARRIER */
95 tlb_probe();
96 idx = read_c0_index();
97 write_c0_entrylo0(0);
98 write_c0_entryhi(KSEG0);
99 if (idx < 0) /* BARRIER */
100 continue;
101 tlb_write_indexed();
103 write_c0_entryhi(oldpid);
104 } else {
105 drop_mmu_context(mm);
107 local_irq_restore(flags);
111 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
113 unsigned long size, flags;
115 #ifdef DEBUG_TLB
116 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
117 #endif
118 local_irq_save(flags);
119 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
120 if (size <= current_cpu_data.tlbsize) {
121 int pid = read_c0_entryhi();
123 start &= PAGE_MASK;
124 end += PAGE_SIZE - 1;
125 end &= PAGE_MASK;
127 while (start < end) {
128 int idx;
130 write_c0_entryhi(start);
131 start += PAGE_SIZE; /* BARRIER */
132 tlb_probe();
133 idx = read_c0_index();
134 write_c0_entrylo0(0);
135 write_c0_entryhi(KSEG0);
136 if (idx < 0) /* BARRIER */
137 continue;
138 tlb_write_indexed();
140 write_c0_entryhi(pid);
141 } else {
142 local_flush_tlb_all();
144 local_irq_restore(flags);
147 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
149 unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
150 int cpu = smp_processor_id();
152 if (cpu_context(cpu, vma->vm_mm) != 0) {
153 unsigned long flags;
154 int oldpid, newpid, idx;
156 #ifdef DEBUG_TLB
157 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
158 #endif
159 newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
160 page &= PAGE_MASK;
161 local_irq_save(flags);
162 oldpid = read_c0_entryhi() & asid_mask;
163 write_c0_entryhi(page | newpid);
164 BARRIER;
165 tlb_probe();
166 idx = read_c0_index();
167 write_c0_entrylo0(0);
168 write_c0_entryhi(KSEG0);
169 if (idx < 0) /* BARRIER */
170 goto finish;
171 tlb_write_indexed();
173 finish:
174 write_c0_entryhi(oldpid);
175 local_irq_restore(flags);
179 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
181 unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
182 unsigned long flags;
183 int idx, pid;
186 * Handle debugger faulting in for debuggee.
188 if (current->active_mm != vma->vm_mm)
189 return;
191 pid = read_c0_entryhi() & asid_mask;
193 #ifdef DEBUG_TLB
194 if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
195 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
196 (cpu_context(cpu, vma->vm_mm)), pid);
198 #endif
200 local_irq_save(flags);
201 address &= PAGE_MASK;
202 write_c0_entryhi(address | pid);
203 BARRIER;
204 tlb_probe();
205 idx = read_c0_index();
206 write_c0_entrylo0(pte_val(pte));
207 write_c0_entryhi(address | pid);
208 if (idx < 0) { /* BARRIER */
209 tlb_write_random();
210 } else {
211 tlb_write_indexed();
213 write_c0_entryhi(pid);
214 local_irq_restore(flags);
217 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
218 unsigned long entryhi, unsigned long pagemask)
220 unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
221 unsigned long flags;
222 unsigned long old_ctx;
223 static unsigned long wired = 0;
225 if (wired < 8) {
226 #ifdef DEBUG_TLB
227 printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
228 entrylo0, entryhi);
229 #endif
231 local_irq_save(flags);
232 old_ctx = read_c0_entryhi() & asid_mask;
233 write_c0_entrylo0(entrylo0);
234 write_c0_entryhi(entryhi);
235 write_c0_index(wired);
236 wired++; /* BARRIER */
237 tlb_write_indexed();
238 write_c0_entryhi(old_ctx);
239 local_flush_tlb_all();
240 local_irq_restore(flags);
244 void tlb_init(void)
246 local_flush_tlb_from(0);
247 build_tlb_refill_handler();