2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/kvm_host.h>
20 #include <linux/srcu.h>
24 #include <asm/bootinfo.h>
25 #include <asm/mmu_context.h>
26 #include <asm/pgtable.h>
27 #include <asm/cacheflush.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
38 /* Use VZ EntryHi.EHINV to invalidate TLB entries */
39 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
41 atomic_t kvm_mips_instance
;
42 EXPORT_SYMBOL(kvm_mips_instance
);
44 /* These function pointers are initialized once the KVM module is loaded */
45 pfn_t(*kvm_mips_gfn_to_pfn
) (struct kvm
*kvm
, gfn_t gfn
);
46 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn
);
48 void (*kvm_mips_release_pfn_clean
) (pfn_t pfn
);
49 EXPORT_SYMBOL(kvm_mips_release_pfn_clean
);
51 bool(*kvm_mips_is_error_pfn
) (pfn_t pfn
);
52 EXPORT_SYMBOL(kvm_mips_is_error_pfn
);
54 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu
*vcpu
)
56 return vcpu
->arch
.guest_kernel_asid
[smp_processor_id()] & ASID_MASK
;
60 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu
*vcpu
)
62 return vcpu
->arch
.guest_user_asid
[smp_processor_id()] & ASID_MASK
;
65 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu
*vcpu
)
67 return vcpu
->kvm
->arch
.commpage_tlb
;
72 * Structure defining an tlb entry data set.
75 void kvm_mips_dump_host_tlbs(void)
77 unsigned long old_entryhi
;
78 unsigned long old_pagemask
;
79 struct kvm_mips_tlb tlb
;
83 local_irq_save(flags
);
85 old_entryhi
= read_c0_entryhi();
86 old_pagemask
= read_c0_pagemask();
88 printk("HOST TLBs:\n");
89 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK
);
91 for (i
= 0; i
< current_cpu_data
.tlbsize
; i
++) {
98 tlb
.tlb_hi
= read_c0_entryhi();
99 tlb
.tlb_lo0
= read_c0_entrylo0();
100 tlb
.tlb_lo1
= read_c0_entrylo1();
101 tlb
.tlb_mask
= read_c0_pagemask();
103 printk("TLB%c%3d Hi 0x%08lx ",
104 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
106 printk("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
107 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
108 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
109 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
110 (tlb
.tlb_lo0
>> 3) & 7);
111 printk("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
112 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
113 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
114 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
115 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
117 write_c0_entryhi(old_entryhi
);
118 write_c0_pagemask(old_pagemask
);
120 local_irq_restore(flags
);
123 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu
*vcpu
)
125 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
126 struct kvm_mips_tlb tlb
;
129 printk("Guest TLBs:\n");
130 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0
));
132 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
133 tlb
= vcpu
->arch
.guest_tlb
[i
];
134 printk("TLB%c%3d Hi 0x%08lx ",
135 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
137 printk("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
138 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
139 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
140 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
141 (tlb
.tlb_lo0
>> 3) & 7);
142 printk("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
143 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
144 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
145 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
146 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
150 void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu
*vcpu
)
153 volatile struct kvm_mips_tlb tlb
;
155 printk("Shadow TLBs:\n");
156 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
157 tlb
= vcpu
->arch
.shadow_tlb
[smp_processor_id()][i
];
158 printk("TLB%c%3d Hi 0x%08lx ",
159 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
161 printk("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
162 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
163 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
164 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
165 (tlb
.tlb_lo0
>> 3) & 7);
166 printk("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
167 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
168 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
169 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
170 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
174 static int kvm_mips_map_page(struct kvm
*kvm
, gfn_t gfn
)
176 int srcu_idx
, err
= 0;
179 if (kvm
->arch
.guest_pmap
[gfn
] != KVM_INVALID_PAGE
)
182 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
183 pfn
= kvm_mips_gfn_to_pfn(kvm
, gfn
);
185 if (kvm_mips_is_error_pfn(pfn
)) {
186 kvm_err("Couldn't get pfn for gfn %#" PRIx64
"!\n", gfn
);
191 kvm
->arch
.guest_pmap
[gfn
] = pfn
;
193 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
197 /* Translate guest KSEG0 addresses to Host PA */
198 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu
*vcpu
,
202 uint32_t offset
= gva
& ~PAGE_MASK
;
203 struct kvm
*kvm
= vcpu
->kvm
;
205 if (KVM_GUEST_KSEGX(gva
) != KVM_GUEST_KSEG0
) {
206 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__
,
207 __builtin_return_address(0), gva
);
208 return KVM_INVALID_PAGE
;
211 gfn
= (KVM_GUEST_CPHYSADDR(gva
) >> PAGE_SHIFT
);
213 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
214 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__
, gfn
,
216 return KVM_INVALID_PAGE
;
219 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
220 return KVM_INVALID_ADDR
;
222 return (kvm
->arch
.guest_pmap
[gfn
] << PAGE_SHIFT
) + offset
;
225 /* XXXKYMA: Must be called with interrupts disabled */
226 /* set flush_dcache_mask == 0 if no dcache flush required */
228 kvm_mips_host_tlb_write(struct kvm_vcpu
*vcpu
, unsigned long entryhi
,
229 unsigned long entrylo0
, unsigned long entrylo1
, int flush_dcache_mask
)
232 unsigned long old_entryhi
;
235 local_irq_save(flags
);
238 old_entryhi
= read_c0_entryhi();
239 write_c0_entryhi(entryhi
);
244 idx
= read_c0_index();
246 if (idx
> current_cpu_data
.tlbsize
) {
247 kvm_err("%s: Invalid Index: %d\n", __func__
, idx
);
248 kvm_mips_dump_host_tlbs();
253 idx
= read_c0_random() % current_cpu_data
.tlbsize
;
257 write_c0_entrylo0(entrylo0
);
258 write_c0_entrylo1(entrylo1
);
266 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
267 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
268 vcpu
->arch
.pc
, idx
, read_c0_entryhi(),
269 read_c0_entrylo0(), read_c0_entrylo1());
274 if (flush_dcache_mask
) {
275 if (entrylo0
& MIPS3_PG_V
) {
276 ++vcpu
->stat
.flush_dcache_exits
;
277 flush_data_cache_page((entryhi
& VPN2_MASK
) & ~flush_dcache_mask
);
279 if (entrylo1
& MIPS3_PG_V
) {
280 ++vcpu
->stat
.flush_dcache_exits
;
281 flush_data_cache_page(((entryhi
& VPN2_MASK
) & ~flush_dcache_mask
) |
282 (0x1 << PAGE_SHIFT
));
286 /* Restore old ASID */
287 write_c0_entryhi(old_entryhi
);
290 local_irq_restore(flags
);
295 /* XXXKYMA: Must be called with interrupts disabled */
296 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr
,
297 struct kvm_vcpu
*vcpu
)
301 unsigned long vaddr
= 0;
302 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
304 struct kvm
*kvm
= vcpu
->kvm
;
305 const int flush_dcache_mask
= 0;
308 if (KVM_GUEST_KSEGX(badvaddr
) != KVM_GUEST_KSEG0
) {
309 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__
, badvaddr
);
310 kvm_mips_dump_host_tlbs();
314 gfn
= (KVM_GUEST_CPHYSADDR(badvaddr
) >> PAGE_SHIFT
);
315 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
316 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__
,
318 kvm_mips_dump_host_tlbs();
322 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
324 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
327 if (kvm_mips_map_page(vcpu
->kvm
, gfn
^ 0x1) < 0)
331 pfn0
= kvm
->arch
.guest_pmap
[gfn
];
332 pfn1
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
334 pfn0
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
335 pfn1
= kvm
->arch
.guest_pmap
[gfn
];
338 entryhi
= (vaddr
| kvm_mips_get_kernel_asid(vcpu
));
339 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) | (1 << 2) |
341 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) | (1 << 2) |
344 return kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
348 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr
,
349 struct kvm_vcpu
*vcpu
)
352 unsigned long flags
, old_entryhi
= 0, vaddr
= 0;
353 unsigned long entrylo0
= 0, entrylo1
= 0;
356 pfn0
= CPHYSADDR(vcpu
->arch
.kseg0_commpage
) >> PAGE_SHIFT
;
358 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) | (1 << 2) |
362 local_irq_save(flags
);
364 old_entryhi
= read_c0_entryhi();
365 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
366 write_c0_entryhi(vaddr
| kvm_mips_get_kernel_asid(vcpu
));
368 write_c0_entrylo0(entrylo0
);
370 write_c0_entrylo1(entrylo1
);
372 write_c0_index(kvm_mips_get_commpage_asid(vcpu
));
379 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
380 vcpu
->arch
.pc
, read_c0_index(), read_c0_entryhi(),
381 read_c0_entrylo0(), read_c0_entrylo1());
384 /* Restore old ASID */
385 write_c0_entryhi(old_entryhi
);
388 local_irq_restore(flags
);
394 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu
*vcpu
,
395 struct kvm_mips_tlb
*tlb
, unsigned long *hpa0
, unsigned long *hpa1
)
397 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
398 struct kvm
*kvm
= vcpu
->kvm
;
402 if ((tlb
->tlb_hi
& VPN2_MASK
) == 0) {
406 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
) >> PAGE_SHIFT
) < 0)
409 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
) >> PAGE_SHIFT
) < 0)
412 pfn0
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
) >> PAGE_SHIFT
];
413 pfn1
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
) >> PAGE_SHIFT
];
417 *hpa0
= pfn0
<< PAGE_SHIFT
;
420 *hpa1
= pfn1
<< PAGE_SHIFT
;
422 /* Get attributes from the Guest TLB */
423 entryhi
= (tlb
->tlb_hi
& VPN2_MASK
) | (KVM_GUEST_KERNEL_MODE(vcpu
) ?
424 kvm_mips_get_kernel_asid(vcpu
) : kvm_mips_get_user_asid(vcpu
));
425 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
426 (tlb
->tlb_lo0
& MIPS3_PG_D
) | (tlb
->tlb_lo0
& MIPS3_PG_V
);
427 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) |
428 (tlb
->tlb_lo1
& MIPS3_PG_D
) | (tlb
->tlb_lo1
& MIPS3_PG_V
);
431 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu
->arch
.pc
,
432 tlb
->tlb_lo0
, tlb
->tlb_lo1
);
435 return kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
439 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long entryhi
)
443 struct kvm_mips_tlb
*tlb
= vcpu
->arch
.guest_tlb
;
446 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
447 if (((TLB_VPN2(tlb
[i
]) & ~tlb
[i
].tlb_mask
) == ((entryhi
& VPN2_MASK
) & ~tlb
[i
].tlb_mask
)) &&
448 (TLB_IS_GLOBAL(tlb
[i
]) || (TLB_ASID(tlb
[i
]) == (entryhi
& ASID_MASK
)))) {
455 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
456 __func__
, entryhi
, index
, tlb
[i
].tlb_lo0
, tlb
[i
].tlb_lo1
);
462 int kvm_mips_host_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long vaddr
)
464 unsigned long old_entryhi
, flags
;
468 local_irq_save(flags
);
470 old_entryhi
= read_c0_entryhi();
472 if (KVM_GUEST_KERNEL_MODE(vcpu
))
473 write_c0_entryhi((vaddr
& VPN2_MASK
) | kvm_mips_get_kernel_asid(vcpu
));
475 write_c0_entryhi((vaddr
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
));
482 idx
= read_c0_index();
484 /* Restore old ASID */
485 write_c0_entryhi(old_entryhi
);
489 local_irq_restore(flags
);
492 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr
, idx
);
498 int kvm_mips_host_tlb_inv(struct kvm_vcpu
*vcpu
, unsigned long va
)
501 unsigned long flags
, old_entryhi
;
503 local_irq_save(flags
);
506 old_entryhi
= read_c0_entryhi();
508 write_c0_entryhi((va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
));
513 idx
= read_c0_index();
515 if (idx
>= current_cpu_data
.tlbsize
)
519 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
522 write_c0_entrylo0(0);
525 write_c0_entrylo1(0);
532 write_c0_entryhi(old_entryhi
);
536 local_irq_restore(flags
);
540 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__
,
541 (va
& VPN2_MASK
) | (vcpu
->arch
.asid_map
[va
& ASID_MASK
] & ASID_MASK
), idx
);
548 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
549 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu
*vcpu
, int index
)
551 unsigned long flags
, old_entryhi
;
553 if (index
>= current_cpu_data
.tlbsize
)
556 local_irq_save(flags
);
559 old_entryhi
= read_c0_entryhi();
561 write_c0_entryhi(UNIQUE_ENTRYHI(index
));
564 write_c0_index(index
);
567 write_c0_entrylo0(0);
570 write_c0_entrylo1(0);
577 write_c0_entryhi(old_entryhi
);
581 local_irq_restore(flags
);
586 void kvm_mips_flush_host_tlb(int skip_kseg0
)
589 unsigned long old_entryhi
, entryhi
;
590 unsigned long old_pagemask
;
592 int maxentry
= current_cpu_data
.tlbsize
;
595 local_irq_save(flags
);
597 old_entryhi
= read_c0_entryhi();
598 old_pagemask
= read_c0_pagemask();
600 /* Blast 'em all away. */
601 for (entry
= 0; entry
< maxentry
; entry
++) {
603 write_c0_index(entry
);
610 entryhi
= read_c0_entryhi();
612 /* Don't blow away guest kernel entries */
613 if (KVM_GUEST_KSEGX(entryhi
) == KVM_GUEST_KSEG0
) {
618 /* Make sure all entries differ. */
619 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
621 write_c0_entrylo0(0);
623 write_c0_entrylo1(0);
632 write_c0_entryhi(old_entryhi
);
633 write_c0_pagemask(old_pagemask
);
637 local_irq_restore(flags
);
641 kvm_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
,
642 struct kvm_vcpu
*vcpu
)
644 unsigned long asid
= asid_cache(cpu
);
646 if (!((asid
+= ASID_INC
) & ASID_MASK
)) {
647 if (cpu_has_vtag_icache
) {
651 kvm_local_flush_tlb_all(); /* start new asid cycle */
653 if (!asid
) /* fix version if needed */
654 asid
= ASID_FIRST_VERSION
;
657 cpu_context(cpu
, mm
) = asid_cache(cpu
) = asid
;
660 void kvm_shadow_tlb_put(struct kvm_vcpu
*vcpu
)
663 unsigned long old_entryhi
;
664 unsigned long old_pagemask
;
666 int cpu
= smp_processor_id();
668 local_irq_save(flags
);
670 old_entryhi
= read_c0_entryhi();
671 old_pagemask
= read_c0_pagemask();
673 for (entry
= 0; entry
< current_cpu_data
.tlbsize
; entry
++) {
674 write_c0_index(entry
);
679 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_hi
= read_c0_entryhi();
680 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_lo0
= read_c0_entrylo0();
681 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_lo1
= read_c0_entrylo1();
682 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_mask
= read_c0_pagemask();
685 write_c0_entryhi(old_entryhi
);
686 write_c0_pagemask(old_pagemask
);
689 local_irq_restore(flags
);
693 void kvm_shadow_tlb_load(struct kvm_vcpu
*vcpu
)
696 unsigned long old_ctx
;
698 int cpu
= smp_processor_id();
700 local_irq_save(flags
);
702 old_ctx
= read_c0_entryhi();
704 for (entry
= 0; entry
< current_cpu_data
.tlbsize
; entry
++) {
705 write_c0_entryhi(vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_hi
);
707 write_c0_entrylo0(vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_lo0
);
708 write_c0_entrylo1(vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_lo1
);
710 write_c0_index(entry
);
718 write_c0_entryhi(old_ctx
);
720 local_irq_restore(flags
);
724 void kvm_local_flush_tlb_all(void)
727 unsigned long old_ctx
;
730 local_irq_save(flags
);
731 /* Save old context and create impossible VPN2 value */
732 old_ctx
= read_c0_entryhi();
733 write_c0_entrylo0(0);
734 write_c0_entrylo1(0);
736 /* Blast 'em all away. */
737 while (entry
< current_cpu_data
.tlbsize
) {
738 /* Make sure all entries differ. */
739 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
740 write_c0_index(entry
);
746 write_c0_entryhi(old_ctx
);
749 local_irq_restore(flags
);
752 void kvm_mips_init_shadow_tlb(struct kvm_vcpu
*vcpu
)
756 for_each_possible_cpu(cpu
) {
757 for (entry
= 0; entry
< current_cpu_data
.tlbsize
; entry
++) {
758 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_hi
=
759 UNIQUE_ENTRYHI(entry
);
760 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_lo0
= 0x0;
761 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_lo1
= 0x0;
762 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_mask
=
766 ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
768 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_hi
,
769 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_lo0
,
770 vcpu
->arch
.shadow_tlb
[cpu
][entry
].tlb_lo1
);
776 /* Restore ASID once we are scheduled back after preemption */
777 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
783 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__
, vcpu
, cpu
);
786 /* Alocate new kernel and user ASIDs if needed */
788 local_irq_save(flags
);
791 guest_kernel_asid
[cpu
] ^ asid_cache(cpu
)) & ASID_VERSION_MASK
)) {
792 kvm_get_new_mmu_context(&vcpu
->arch
.guest_kernel_mm
, cpu
, vcpu
);
793 vcpu
->arch
.guest_kernel_asid
[cpu
] =
794 vcpu
->arch
.guest_kernel_mm
.context
.asid
[cpu
];
795 kvm_get_new_mmu_context(&vcpu
->arch
.guest_user_mm
, cpu
, vcpu
);
796 vcpu
->arch
.guest_user_asid
[cpu
] =
797 vcpu
->arch
.guest_user_mm
.context
.asid
[cpu
];
800 kvm_info("[%d]: cpu_context: %#lx\n", cpu
,
801 cpu_context(cpu
, current
->mm
));
802 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
803 cpu
, vcpu
->arch
.guest_kernel_asid
[cpu
]);
804 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu
,
805 vcpu
->arch
.guest_user_asid
[cpu
]);
808 if (vcpu
->arch
.last_sched_cpu
!= cpu
) {
809 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
810 vcpu
->arch
.last_sched_cpu
, cpu
, vcpu
->vcpu_id
);
813 /* Only reload shadow host TLB if new ASIDs haven't been allocated */
815 if ((atomic_read(&kvm_mips_instance
) > 1) && !newasid
) {
816 kvm_mips_flush_host_tlb(0);
817 kvm_shadow_tlb_load(vcpu
);
822 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
823 if (current
->flags
& PF_VCPU
) {
824 write_c0_entryhi(vcpu
->arch
.
825 preempt_entryhi
& ASID_MASK
);
829 /* New ASIDs were allocated for the VM */
831 /* Were we in guest context? If so then the pre-empted ASID is no longer
832 * valid, we need to set it to what it should be based on the mode of
833 * the Guest (Kernel/User)
835 if (current
->flags
& PF_VCPU
) {
836 if (KVM_GUEST_KERNEL_MODE(vcpu
))
837 write_c0_entryhi(vcpu
->arch
.
838 guest_kernel_asid
[cpu
] &
841 write_c0_entryhi(vcpu
->arch
.
842 guest_user_asid
[cpu
] &
848 local_irq_restore(flags
);
852 /* ASID can change if another task is scheduled during preemption */
853 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
858 local_irq_save(flags
);
860 cpu
= smp_processor_id();
863 vcpu
->arch
.preempt_entryhi
= read_c0_entryhi();
864 vcpu
->arch
.last_sched_cpu
= cpu
;
867 if ((atomic_read(&kvm_mips_instance
) > 1)) {
868 kvm_shadow_tlb_put(vcpu
);
872 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
873 ASID_VERSION_MASK
)) {
874 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__
,
875 cpu_context(cpu
, current
->mm
));
876 drop_mmu_context(current
->mm
, cpu
);
878 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
881 local_irq_restore(flags
);
884 uint32_t kvm_get_inst(uint32_t *opc
, struct kvm_vcpu
*vcpu
)
886 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
887 unsigned long paddr
, flags
;
891 if (KVM_GUEST_KSEGX((unsigned long) opc
) < KVM_GUEST_KSEG0
||
892 KVM_GUEST_KSEGX((unsigned long) opc
) == KVM_GUEST_KSEG23
) {
893 local_irq_save(flags
);
894 index
= kvm_mips_host_tlb_lookup(vcpu
, (unsigned long) opc
);
899 kvm_mips_guest_tlb_lookup(vcpu
,
900 ((unsigned long) opc
& VPN2_MASK
)
902 (kvm_read_c0_guest_entryhi
903 (cop0
) & ASID_MASK
));
906 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
907 __func__
, opc
, vcpu
, read_c0_entryhi());
908 kvm_mips_dump_host_tlbs();
909 local_irq_restore(flags
);
910 return KVM_INVALID_INST
;
912 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
,
918 local_irq_restore(flags
);
919 } else if (KVM_GUEST_KSEGX(opc
) == KVM_GUEST_KSEG0
) {
921 kvm_mips_translate_guest_kseg0_to_hpa(vcpu
,
922 (unsigned long) opc
);
923 inst
= *(uint32_t *) CKSEG0ADDR(paddr
);
925 kvm_err("%s: illegal address: %p\n", __func__
, opc
);
926 return KVM_INVALID_INST
;
932 EXPORT_SYMBOL(kvm_local_flush_tlb_all
);
933 EXPORT_SYMBOL(kvm_shadow_tlb_put
);
934 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault
);
935 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault
);
936 EXPORT_SYMBOL(kvm_mips_init_shadow_tlb
);
937 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs
);
938 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault
);
939 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup
);
940 EXPORT_SYMBOL(kvm_mips_flush_host_tlb
);
941 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup
);
942 EXPORT_SYMBOL(kvm_mips_host_tlb_inv
);
943 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa
);
944 EXPORT_SYMBOL(kvm_shadow_tlb_load
);
945 EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs
);
946 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs
);
947 EXPORT_SYMBOL(kvm_get_inst
);
948 EXPORT_SYMBOL(kvm_arch_vcpu_load
);
949 EXPORT_SYMBOL(kvm_arch_vcpu_put
);