powerpc: Delete __cpuinit usage from all users
[linux/fpc-iii.git] / arch / mips / kvm / kvm_tlb.c
blobc777dd36d4a8bf88be1000dc993bc2d8774b6a33
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
16 #include <linux/mm.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/kvm_host.h>
20 #include <linux/srcu.h>
23 #include <asm/cpu.h>
24 #include <asm/bootinfo.h>
25 #include <asm/mmu_context.h>
26 #include <asm/pgtable.h>
27 #include <asm/cacheflush.h>
29 #undef CONFIG_MIPS_MT
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
36 #define PRIx64 "llx"
38 /* Use VZ EntryHi.EHINV to invalidate TLB entries */
39 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
41 atomic_t kvm_mips_instance;
42 EXPORT_SYMBOL(kvm_mips_instance);
44 /* These function pointers are initialized once the KVM module is loaded */
45 pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
46 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
48 void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
49 EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
51 bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
52 EXPORT_SYMBOL(kvm_mips_is_error_pfn);
54 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
56 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
60 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
62 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
65 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
67 return vcpu->kvm->arch.commpage_tlb;
72 * Structure defining an tlb entry data set.
75 void kvm_mips_dump_host_tlbs(void)
77 unsigned long old_entryhi;
78 unsigned long old_pagemask;
79 struct kvm_mips_tlb tlb;
80 unsigned long flags;
81 int i;
83 local_irq_save(flags);
85 old_entryhi = read_c0_entryhi();
86 old_pagemask = read_c0_pagemask();
88 printk("HOST TLBs:\n");
89 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
91 for (i = 0; i < current_cpu_data.tlbsize; i++) {
92 write_c0_index(i);
93 mtc0_tlbw_hazard();
95 tlb_read();
96 tlbw_use_hazard();
98 tlb.tlb_hi = read_c0_entryhi();
99 tlb.tlb_lo0 = read_c0_entrylo0();
100 tlb.tlb_lo1 = read_c0_entrylo1();
101 tlb.tlb_mask = read_c0_pagemask();
103 printk("TLB%c%3d Hi 0x%08lx ",
104 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
105 i, tlb.tlb_hi);
106 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
107 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
108 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
109 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
110 (tlb.tlb_lo0 >> 3) & 7);
111 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
112 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
113 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
114 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
115 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
117 write_c0_entryhi(old_entryhi);
118 write_c0_pagemask(old_pagemask);
119 mtc0_tlbw_hazard();
120 local_irq_restore(flags);
123 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
125 struct mips_coproc *cop0 = vcpu->arch.cop0;
126 struct kvm_mips_tlb tlb;
127 int i;
129 printk("Guest TLBs:\n");
130 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
132 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
133 tlb = vcpu->arch.guest_tlb[i];
134 printk("TLB%c%3d Hi 0x%08lx ",
135 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
136 i, tlb.tlb_hi);
137 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
138 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
139 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
140 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
141 (tlb.tlb_lo0 >> 3) & 7);
142 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
143 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
144 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
145 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
146 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
150 void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
152 int i;
153 volatile struct kvm_mips_tlb tlb;
155 printk("Shadow TLBs:\n");
156 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
157 tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
158 printk("TLB%c%3d Hi 0x%08lx ",
159 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
160 i, tlb.tlb_hi);
161 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
162 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
163 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
164 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
165 (tlb.tlb_lo0 >> 3) & 7);
166 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
167 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
168 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
169 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
170 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
174 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
176 int srcu_idx, err = 0;
177 pfn_t pfn;
179 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
180 return 0;
182 srcu_idx = srcu_read_lock(&kvm->srcu);
183 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
185 if (kvm_mips_is_error_pfn(pfn)) {
186 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
187 err = -EFAULT;
188 goto out;
191 kvm->arch.guest_pmap[gfn] = pfn;
192 out:
193 srcu_read_unlock(&kvm->srcu, srcu_idx);
194 return err;
197 /* Translate guest KSEG0 addresses to Host PA */
198 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
199 unsigned long gva)
201 gfn_t gfn;
202 uint32_t offset = gva & ~PAGE_MASK;
203 struct kvm *kvm = vcpu->kvm;
205 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
206 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
207 __builtin_return_address(0), gva);
208 return KVM_INVALID_PAGE;
211 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
213 if (gfn >= kvm->arch.guest_pmap_npages) {
214 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
215 gva);
216 return KVM_INVALID_PAGE;
219 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
220 return KVM_INVALID_ADDR;
222 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
225 /* XXXKYMA: Must be called with interrupts disabled */
226 /* set flush_dcache_mask == 0 if no dcache flush required */
228 kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
229 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
231 unsigned long flags;
232 unsigned long old_entryhi;
233 volatile int idx;
235 local_irq_save(flags);
238 old_entryhi = read_c0_entryhi();
239 write_c0_entryhi(entryhi);
240 mtc0_tlbw_hazard();
242 tlb_probe();
243 tlb_probe_hazard();
244 idx = read_c0_index();
246 if (idx > current_cpu_data.tlbsize) {
247 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
248 kvm_mips_dump_host_tlbs();
249 return -1;
252 if (idx < 0) {
253 idx = read_c0_random() % current_cpu_data.tlbsize;
254 write_c0_index(idx);
255 mtc0_tlbw_hazard();
257 write_c0_entrylo0(entrylo0);
258 write_c0_entrylo1(entrylo1);
259 mtc0_tlbw_hazard();
261 tlb_write_indexed();
262 tlbw_use_hazard();
264 #ifdef DEBUG
265 if (debug) {
266 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
267 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
268 vcpu->arch.pc, idx, read_c0_entryhi(),
269 read_c0_entrylo0(), read_c0_entrylo1());
271 #endif
273 /* Flush D-cache */
274 if (flush_dcache_mask) {
275 if (entrylo0 & MIPS3_PG_V) {
276 ++vcpu->stat.flush_dcache_exits;
277 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
279 if (entrylo1 & MIPS3_PG_V) {
280 ++vcpu->stat.flush_dcache_exits;
281 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
282 (0x1 << PAGE_SHIFT));
286 /* Restore old ASID */
287 write_c0_entryhi(old_entryhi);
288 mtc0_tlbw_hazard();
289 tlbw_use_hazard();
290 local_irq_restore(flags);
291 return 0;
295 /* XXXKYMA: Must be called with interrupts disabled */
296 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
297 struct kvm_vcpu *vcpu)
299 gfn_t gfn;
300 pfn_t pfn0, pfn1;
301 unsigned long vaddr = 0;
302 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
303 int even;
304 struct kvm *kvm = vcpu->kvm;
305 const int flush_dcache_mask = 0;
308 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
309 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
310 kvm_mips_dump_host_tlbs();
311 return -1;
314 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
315 if (gfn >= kvm->arch.guest_pmap_npages) {
316 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
317 gfn, badvaddr);
318 kvm_mips_dump_host_tlbs();
319 return -1;
321 even = !(gfn & 0x1);
322 vaddr = badvaddr & (PAGE_MASK << 1);
324 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
325 return -1;
327 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
328 return -1;
330 if (even) {
331 pfn0 = kvm->arch.guest_pmap[gfn];
332 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
333 } else {
334 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
335 pfn1 = kvm->arch.guest_pmap[gfn];
338 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
339 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
340 (0x1 << 1);
341 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
342 (0x1 << 1);
344 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
345 flush_dcache_mask);
348 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
349 struct kvm_vcpu *vcpu)
351 pfn_t pfn0, pfn1;
352 unsigned long flags, old_entryhi = 0, vaddr = 0;
353 unsigned long entrylo0 = 0, entrylo1 = 0;
356 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
357 pfn1 = 0;
358 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
359 (0x1 << 1);
360 entrylo1 = 0;
362 local_irq_save(flags);
364 old_entryhi = read_c0_entryhi();
365 vaddr = badvaddr & (PAGE_MASK << 1);
366 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
367 mtc0_tlbw_hazard();
368 write_c0_entrylo0(entrylo0);
369 mtc0_tlbw_hazard();
370 write_c0_entrylo1(entrylo1);
371 mtc0_tlbw_hazard();
372 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
373 mtc0_tlbw_hazard();
374 tlb_write_indexed();
375 mtc0_tlbw_hazard();
376 tlbw_use_hazard();
378 #ifdef DEBUG
379 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
380 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
381 read_c0_entrylo0(), read_c0_entrylo1());
382 #endif
384 /* Restore old ASID */
385 write_c0_entryhi(old_entryhi);
386 mtc0_tlbw_hazard();
387 tlbw_use_hazard();
388 local_irq_restore(flags);
390 return 0;
394 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
395 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
397 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
398 struct kvm *kvm = vcpu->kvm;
399 pfn_t pfn0, pfn1;
402 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
403 pfn0 = 0;
404 pfn1 = 0;
405 } else {
406 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
407 return -1;
409 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
410 return -1;
412 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
413 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
416 if (hpa0)
417 *hpa0 = pfn0 << PAGE_SHIFT;
419 if (hpa1)
420 *hpa1 = pfn1 << PAGE_SHIFT;
422 /* Get attributes from the Guest TLB */
423 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
424 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
425 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
426 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
427 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
428 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
430 #ifdef DEBUG
431 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
432 tlb->tlb_lo0, tlb->tlb_lo1);
433 #endif
435 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
436 tlb->tlb_mask);
439 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
441 int i;
442 int index = -1;
443 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
446 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
447 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
448 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
449 index = i;
450 break;
454 #ifdef DEBUG
455 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
456 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
457 #endif
459 return index;
462 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
464 unsigned long old_entryhi, flags;
465 volatile int idx;
468 local_irq_save(flags);
470 old_entryhi = read_c0_entryhi();
472 if (KVM_GUEST_KERNEL_MODE(vcpu))
473 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
474 else {
475 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
478 mtc0_tlbw_hazard();
480 tlb_probe();
481 tlb_probe_hazard();
482 idx = read_c0_index();
484 /* Restore old ASID */
485 write_c0_entryhi(old_entryhi);
486 mtc0_tlbw_hazard();
487 tlbw_use_hazard();
489 local_irq_restore(flags);
491 #ifdef DEBUG
492 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
493 #endif
495 return idx;
498 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
500 int idx;
501 unsigned long flags, old_entryhi;
503 local_irq_save(flags);
506 old_entryhi = read_c0_entryhi();
508 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
509 mtc0_tlbw_hazard();
511 tlb_probe();
512 tlb_probe_hazard();
513 idx = read_c0_index();
515 if (idx >= current_cpu_data.tlbsize)
516 BUG();
518 if (idx > 0) {
519 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
520 mtc0_tlbw_hazard();
522 write_c0_entrylo0(0);
523 mtc0_tlbw_hazard();
525 write_c0_entrylo1(0);
526 mtc0_tlbw_hazard();
528 tlb_write_indexed();
529 mtc0_tlbw_hazard();
532 write_c0_entryhi(old_entryhi);
533 mtc0_tlbw_hazard();
534 tlbw_use_hazard();
536 local_irq_restore(flags);
538 #ifdef DEBUG
539 if (idx > 0) {
540 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
541 (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
543 #endif
545 return 0;
548 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
549 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
551 unsigned long flags, old_entryhi;
553 if (index >= current_cpu_data.tlbsize)
554 BUG();
556 local_irq_save(flags);
559 old_entryhi = read_c0_entryhi();
561 write_c0_entryhi(UNIQUE_ENTRYHI(index));
562 mtc0_tlbw_hazard();
564 write_c0_index(index);
565 mtc0_tlbw_hazard();
567 write_c0_entrylo0(0);
568 mtc0_tlbw_hazard();
570 write_c0_entrylo1(0);
571 mtc0_tlbw_hazard();
573 tlb_write_indexed();
574 mtc0_tlbw_hazard();
575 tlbw_use_hazard();
577 write_c0_entryhi(old_entryhi);
578 mtc0_tlbw_hazard();
579 tlbw_use_hazard();
581 local_irq_restore(flags);
583 return 0;
586 void kvm_mips_flush_host_tlb(int skip_kseg0)
588 unsigned long flags;
589 unsigned long old_entryhi, entryhi;
590 unsigned long old_pagemask;
591 int entry = 0;
592 int maxentry = current_cpu_data.tlbsize;
595 local_irq_save(flags);
597 old_entryhi = read_c0_entryhi();
598 old_pagemask = read_c0_pagemask();
600 /* Blast 'em all away. */
601 for (entry = 0; entry < maxentry; entry++) {
603 write_c0_index(entry);
604 mtc0_tlbw_hazard();
606 if (skip_kseg0) {
607 tlb_read();
608 tlbw_use_hazard();
610 entryhi = read_c0_entryhi();
612 /* Don't blow away guest kernel entries */
613 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
614 continue;
618 /* Make sure all entries differ. */
619 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
620 mtc0_tlbw_hazard();
621 write_c0_entrylo0(0);
622 mtc0_tlbw_hazard();
623 write_c0_entrylo1(0);
624 mtc0_tlbw_hazard();
626 tlb_write_indexed();
627 mtc0_tlbw_hazard();
630 tlbw_use_hazard();
632 write_c0_entryhi(old_entryhi);
633 write_c0_pagemask(old_pagemask);
634 mtc0_tlbw_hazard();
635 tlbw_use_hazard();
637 local_irq_restore(flags);
640 void
641 kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
642 struct kvm_vcpu *vcpu)
644 unsigned long asid = asid_cache(cpu);
646 if (!((asid += ASID_INC) & ASID_MASK)) {
647 if (cpu_has_vtag_icache) {
648 flush_icache_all();
651 kvm_local_flush_tlb_all(); /* start new asid cycle */
653 if (!asid) /* fix version if needed */
654 asid = ASID_FIRST_VERSION;
657 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
660 void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
662 unsigned long flags;
663 unsigned long old_entryhi;
664 unsigned long old_pagemask;
665 int entry = 0;
666 int cpu = smp_processor_id();
668 local_irq_save(flags);
670 old_entryhi = read_c0_entryhi();
671 old_pagemask = read_c0_pagemask();
673 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
674 write_c0_index(entry);
675 mtc0_tlbw_hazard();
676 tlb_read();
677 tlbw_use_hazard();
679 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
680 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
681 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
682 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
685 write_c0_entryhi(old_entryhi);
686 write_c0_pagemask(old_pagemask);
687 mtc0_tlbw_hazard();
689 local_irq_restore(flags);
693 void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
695 unsigned long flags;
696 unsigned long old_ctx;
697 int entry;
698 int cpu = smp_processor_id();
700 local_irq_save(flags);
702 old_ctx = read_c0_entryhi();
704 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
705 write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
706 mtc0_tlbw_hazard();
707 write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
708 write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
710 write_c0_index(entry);
711 mtc0_tlbw_hazard();
713 tlb_write_indexed();
714 tlbw_use_hazard();
717 tlbw_use_hazard();
718 write_c0_entryhi(old_ctx);
719 mtc0_tlbw_hazard();
720 local_irq_restore(flags);
724 void kvm_local_flush_tlb_all(void)
726 unsigned long flags;
727 unsigned long old_ctx;
728 int entry = 0;
730 local_irq_save(flags);
731 /* Save old context and create impossible VPN2 value */
732 old_ctx = read_c0_entryhi();
733 write_c0_entrylo0(0);
734 write_c0_entrylo1(0);
736 /* Blast 'em all away. */
737 while (entry < current_cpu_data.tlbsize) {
738 /* Make sure all entries differ. */
739 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
740 write_c0_index(entry);
741 mtc0_tlbw_hazard();
742 tlb_write_indexed();
743 entry++;
745 tlbw_use_hazard();
746 write_c0_entryhi(old_ctx);
747 mtc0_tlbw_hazard();
749 local_irq_restore(flags);
752 void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
754 int cpu, entry;
756 for_each_possible_cpu(cpu) {
757 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
758 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
759 UNIQUE_ENTRYHI(entry);
760 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
761 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
762 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
763 read_c0_pagemask();
764 #ifdef DEBUG
765 kvm_debug
766 ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
767 cpu, entry,
768 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
769 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
770 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
771 #endif
776 /* Restore ASID once we are scheduled back after preemption */
777 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
779 unsigned long flags;
780 int newasid = 0;
782 #ifdef DEBUG
783 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
784 #endif
786 /* Alocate new kernel and user ASIDs if needed */
788 local_irq_save(flags);
790 if (((vcpu->arch.
791 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
792 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
793 vcpu->arch.guest_kernel_asid[cpu] =
794 vcpu->arch.guest_kernel_mm.context.asid[cpu];
795 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
796 vcpu->arch.guest_user_asid[cpu] =
797 vcpu->arch.guest_user_mm.context.asid[cpu];
798 newasid++;
800 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
801 cpu_context(cpu, current->mm));
802 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
803 cpu, vcpu->arch.guest_kernel_asid[cpu]);
804 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
805 vcpu->arch.guest_user_asid[cpu]);
808 if (vcpu->arch.last_sched_cpu != cpu) {
809 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
810 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
813 /* Only reload shadow host TLB if new ASIDs haven't been allocated */
814 #if 0
815 if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
816 kvm_mips_flush_host_tlb(0);
817 kvm_shadow_tlb_load(vcpu);
819 #endif
821 if (!newasid) {
822 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
823 if (current->flags & PF_VCPU) {
824 write_c0_entryhi(vcpu->arch.
825 preempt_entryhi & ASID_MASK);
826 ehb();
828 } else {
829 /* New ASIDs were allocated for the VM */
831 /* Were we in guest context? If so then the pre-empted ASID is no longer
832 * valid, we need to set it to what it should be based on the mode of
833 * the Guest (Kernel/User)
835 if (current->flags & PF_VCPU) {
836 if (KVM_GUEST_KERNEL_MODE(vcpu))
837 write_c0_entryhi(vcpu->arch.
838 guest_kernel_asid[cpu] &
839 ASID_MASK);
840 else
841 write_c0_entryhi(vcpu->arch.
842 guest_user_asid[cpu] &
843 ASID_MASK);
844 ehb();
848 local_irq_restore(flags);
852 /* ASID can change if another task is scheduled during preemption */
853 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
855 unsigned long flags;
856 uint32_t cpu;
858 local_irq_save(flags);
860 cpu = smp_processor_id();
863 vcpu->arch.preempt_entryhi = read_c0_entryhi();
864 vcpu->arch.last_sched_cpu = cpu;
866 #if 0
867 if ((atomic_read(&kvm_mips_instance) > 1)) {
868 kvm_shadow_tlb_put(vcpu);
870 #endif
872 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
873 ASID_VERSION_MASK)) {
874 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
875 cpu_context(cpu, current->mm));
876 drop_mmu_context(current->mm, cpu);
878 write_c0_entryhi(cpu_asid(cpu, current->mm));
879 ehb();
881 local_irq_restore(flags);
884 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
886 struct mips_coproc *cop0 = vcpu->arch.cop0;
887 unsigned long paddr, flags;
888 uint32_t inst;
889 int index;
891 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
892 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
893 local_irq_save(flags);
894 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
895 if (index >= 0) {
896 inst = *(opc);
897 } else {
898 index =
899 kvm_mips_guest_tlb_lookup(vcpu,
900 ((unsigned long) opc & VPN2_MASK)
902 (kvm_read_c0_guest_entryhi
903 (cop0) & ASID_MASK));
904 if (index < 0) {
905 kvm_err
906 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
907 __func__, opc, vcpu, read_c0_entryhi());
908 kvm_mips_dump_host_tlbs();
909 local_irq_restore(flags);
910 return KVM_INVALID_INST;
912 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
913 &vcpu->arch.
914 guest_tlb[index],
915 NULL, NULL);
916 inst = *(opc);
918 local_irq_restore(flags);
919 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
920 paddr =
921 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
922 (unsigned long) opc);
923 inst = *(uint32_t *) CKSEG0ADDR(paddr);
924 } else {
925 kvm_err("%s: illegal address: %p\n", __func__, opc);
926 return KVM_INVALID_INST;
929 return inst;
932 EXPORT_SYMBOL(kvm_local_flush_tlb_all);
933 EXPORT_SYMBOL(kvm_shadow_tlb_put);
934 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
935 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
936 EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
937 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
938 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
939 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
940 EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
941 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
942 EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
943 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
944 EXPORT_SYMBOL(kvm_shadow_tlb_load);
945 EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
946 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
947 EXPORT_SYMBOL(kvm_get_inst);
948 EXPORT_SYMBOL(kvm_arch_vcpu_load);
949 EXPORT_SYMBOL(kvm_arch_vcpu_put);