2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
27 #include <asm/tlbdebug.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
36 atomic_t kvm_mips_instance
;
37 EXPORT_SYMBOL_GPL(kvm_mips_instance
);
39 static u32
kvm_mips_get_kernel_asid(struct kvm_vcpu
*vcpu
)
41 int cpu
= smp_processor_id();
43 return vcpu
->arch
.guest_kernel_asid
[cpu
] &
44 cpu_asid_mask(&cpu_data
[cpu
]);
47 static u32
kvm_mips_get_user_asid(struct kvm_vcpu
*vcpu
)
49 int cpu
= smp_processor_id();
51 return vcpu
->arch
.guest_user_asid
[cpu
] &
52 cpu_asid_mask(&cpu_data
[cpu
]);
55 inline u32
kvm_mips_get_commpage_asid(struct kvm_vcpu
*vcpu
)
57 return vcpu
->kvm
->arch
.commpage_tlb
;
60 /* Structure defining an tlb entry data set. */
62 void kvm_mips_dump_host_tlbs(void)
66 local_irq_save(flags
);
68 kvm_info("HOST TLBs:\n");
73 local_irq_restore(flags
);
75 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs
);
77 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu
*vcpu
)
79 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
80 struct kvm_mips_tlb tlb
;
83 kvm_info("Guest TLBs:\n");
84 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0
));
86 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
87 tlb
= vcpu
->arch
.guest_tlb
[i
];
88 kvm_info("TLB%c%3d Hi 0x%08lx ",
89 (tlb
.tlb_lo
[0] | tlb
.tlb_lo
[1]) & ENTRYLO_V
92 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
93 (u64
) mips3_tlbpfn_to_paddr(tlb
.tlb_lo
[0]),
94 (tlb
.tlb_lo
[0] & ENTRYLO_D
) ? 'D' : ' ',
95 (tlb
.tlb_lo
[0] & ENTRYLO_G
) ? 'G' : ' ',
96 (tlb
.tlb_lo
[0] & ENTRYLO_C
) >> ENTRYLO_C_SHIFT
);
97 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
98 (u64
) mips3_tlbpfn_to_paddr(tlb
.tlb_lo
[1]),
99 (tlb
.tlb_lo
[1] & ENTRYLO_D
) ? 'D' : ' ',
100 (tlb
.tlb_lo
[1] & ENTRYLO_G
) ? 'G' : ' ',
101 (tlb
.tlb_lo
[1] & ENTRYLO_C
) >> ENTRYLO_C_SHIFT
,
105 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs
);
107 /* XXXKYMA: Must be called with interrupts disabled */
108 /* set flush_dcache_mask == 0 if no dcache flush required */
109 int kvm_mips_host_tlb_write(struct kvm_vcpu
*vcpu
, unsigned long entryhi
,
110 unsigned long entrylo0
, unsigned long entrylo1
,
111 int flush_dcache_mask
)
114 unsigned long old_entryhi
;
117 local_irq_save(flags
);
119 old_entryhi
= read_c0_entryhi();
120 write_c0_entryhi(entryhi
);
125 idx
= read_c0_index();
127 if (idx
> current_cpu_data
.tlbsize
) {
128 kvm_err("%s: Invalid Index: %d\n", __func__
, idx
);
129 kvm_mips_dump_host_tlbs();
130 local_irq_restore(flags
);
134 write_c0_entrylo0(entrylo0
);
135 write_c0_entrylo1(entrylo1
);
144 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
145 vcpu
->arch
.pc
, idx
, read_c0_entryhi(),
146 read_c0_entrylo0(), read_c0_entrylo1());
149 if (flush_dcache_mask
) {
150 if (entrylo0
& ENTRYLO_V
) {
151 ++vcpu
->stat
.flush_dcache_exits
;
152 flush_data_cache_page((entryhi
& VPN2_MASK
) &
155 if (entrylo1
& ENTRYLO_V
) {
156 ++vcpu
->stat
.flush_dcache_exits
;
157 flush_data_cache_page(((entryhi
& VPN2_MASK
) &
158 ~flush_dcache_mask
) |
159 (0x1 << PAGE_SHIFT
));
163 /* Restore old ASID */
164 write_c0_entryhi(old_entryhi
);
166 local_irq_restore(flags
);
169 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write
);
171 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr
,
172 struct kvm_vcpu
*vcpu
)
175 unsigned long flags
, old_entryhi
= 0, vaddr
= 0;
176 unsigned long entrylo
[2] = { 0, 0 };
177 unsigned int pair_idx
;
179 pfn
= PFN_DOWN(virt_to_phys(vcpu
->arch
.kseg0_commpage
));
180 pair_idx
= (badvaddr
>> PAGE_SHIFT
) & 1;
181 entrylo
[pair_idx
] = mips3_paddr_to_tlbpfn(pfn
<< PAGE_SHIFT
) |
182 ((_page_cachable_default
>> _CACHE_SHIFT
) << ENTRYLO_C_SHIFT
) |
183 ENTRYLO_D
| ENTRYLO_V
;
185 local_irq_save(flags
);
187 old_entryhi
= read_c0_entryhi();
188 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
189 write_c0_entryhi(vaddr
| kvm_mips_get_kernel_asid(vcpu
));
190 write_c0_entrylo0(entrylo
[0]);
191 write_c0_entrylo1(entrylo
[1]);
192 write_c0_index(kvm_mips_get_commpage_asid(vcpu
));
197 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
198 vcpu
->arch
.pc
, read_c0_index(), read_c0_entryhi(),
199 read_c0_entrylo0(), read_c0_entrylo1());
201 /* Restore old ASID */
202 write_c0_entryhi(old_entryhi
);
204 local_irq_restore(flags
);
208 EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault
);
210 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long entryhi
)
214 struct kvm_mips_tlb
*tlb
= vcpu
->arch
.guest_tlb
;
216 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
217 if (TLB_HI_VPN2_HIT(tlb
[i
], entryhi
) &&
218 TLB_HI_ASID_HIT(tlb
[i
], entryhi
)) {
224 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
225 __func__
, entryhi
, index
, tlb
[i
].tlb_lo
[0], tlb
[i
].tlb_lo
[1]);
229 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup
);
231 int kvm_mips_host_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long vaddr
)
233 unsigned long old_entryhi
, flags
;
236 local_irq_save(flags
);
238 old_entryhi
= read_c0_entryhi();
240 if (KVM_GUEST_KERNEL_MODE(vcpu
))
241 write_c0_entryhi((vaddr
& VPN2_MASK
) |
242 kvm_mips_get_kernel_asid(vcpu
));
244 write_c0_entryhi((vaddr
& VPN2_MASK
) |
245 kvm_mips_get_user_asid(vcpu
));
252 idx
= read_c0_index();
254 /* Restore old ASID */
255 write_c0_entryhi(old_entryhi
);
258 local_irq_restore(flags
);
260 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr
, idx
);
264 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup
);
266 int kvm_mips_host_tlb_inv(struct kvm_vcpu
*vcpu
, unsigned long va
)
269 unsigned long flags
, old_entryhi
;
271 local_irq_save(flags
);
273 old_entryhi
= read_c0_entryhi();
275 write_c0_entryhi((va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
));
280 idx
= read_c0_index();
282 if (idx
>= current_cpu_data
.tlbsize
)
286 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
287 write_c0_entrylo0(0);
288 write_c0_entrylo1(0);
295 write_c0_entryhi(old_entryhi
);
298 local_irq_restore(flags
);
301 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__
,
302 (va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
), idx
);
306 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv
);
308 void kvm_mips_flush_host_tlb(int skip_kseg0
)
311 unsigned long old_entryhi
, entryhi
;
312 unsigned long old_pagemask
;
314 int maxentry
= current_cpu_data
.tlbsize
;
316 local_irq_save(flags
);
318 old_entryhi
= read_c0_entryhi();
319 old_pagemask
= read_c0_pagemask();
321 /* Blast 'em all away. */
322 for (entry
= 0; entry
< maxentry
; entry
++) {
323 write_c0_index(entry
);
330 entryhi
= read_c0_entryhi();
332 /* Don't blow away guest kernel entries */
333 if (KVM_GUEST_KSEGX(entryhi
) == KVM_GUEST_KSEG0
)
336 write_c0_pagemask(old_pagemask
);
339 /* Make sure all entries differ. */
340 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
341 write_c0_entrylo0(0);
342 write_c0_entrylo1(0);
349 write_c0_entryhi(old_entryhi
);
350 write_c0_pagemask(old_pagemask
);
353 local_irq_restore(flags
);
355 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb
);
357 void kvm_local_flush_tlb_all(void)
360 unsigned long old_ctx
;
363 local_irq_save(flags
);
364 /* Save old context and create impossible VPN2 value */
365 old_ctx
= read_c0_entryhi();
366 write_c0_entrylo0(0);
367 write_c0_entrylo1(0);
369 /* Blast 'em all away. */
370 while (entry
< current_cpu_data
.tlbsize
) {
371 /* Make sure all entries differ. */
372 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
373 write_c0_index(entry
);
379 write_c0_entryhi(old_ctx
);
382 local_irq_restore(flags
);
384 EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all
);