2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
32 #define KVM_GUEST_PC_TLB 0
33 #define KVM_GUEST_SP_TLB 1
37 atomic_t kvm_mips_instance
;
38 EXPORT_SYMBOL(kvm_mips_instance
);
40 /* These function pointers are initialized once the KVM module is loaded */
41 pfn_t (*kvm_mips_gfn_to_pfn
)(struct kvm
*kvm
, gfn_t gfn
);
42 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn
);
44 void (*kvm_mips_release_pfn_clean
)(pfn_t pfn
);
45 EXPORT_SYMBOL(kvm_mips_release_pfn_clean
);
47 bool (*kvm_mips_is_error_pfn
)(pfn_t pfn
);
48 EXPORT_SYMBOL(kvm_mips_is_error_pfn
);
50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu
*vcpu
)
52 return vcpu
->arch
.guest_kernel_asid
[smp_processor_id()] & ASID_MASK
;
55 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu
*vcpu
)
57 return vcpu
->arch
.guest_user_asid
[smp_processor_id()] & ASID_MASK
;
60 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu
*vcpu
)
62 return vcpu
->kvm
->arch
.commpage_tlb
;
65 /* Structure defining an tlb entry data set. */
67 void kvm_mips_dump_host_tlbs(void)
69 unsigned long old_entryhi
;
70 unsigned long old_pagemask
;
71 struct kvm_mips_tlb tlb
;
75 local_irq_save(flags
);
77 old_entryhi
= read_c0_entryhi();
78 old_pagemask
= read_c0_pagemask();
80 kvm_info("HOST TLBs:\n");
81 kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK
);
83 for (i
= 0; i
< current_cpu_data
.tlbsize
; i
++) {
90 tlb
.tlb_hi
= read_c0_entryhi();
91 tlb
.tlb_lo0
= read_c0_entrylo0();
92 tlb
.tlb_lo1
= read_c0_entrylo1();
93 tlb
.tlb_mask
= read_c0_pagemask();
95 kvm_info("TLB%c%3d Hi 0x%08lx ",
96 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
98 kvm_info("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
99 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
100 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
101 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
102 (tlb
.tlb_lo0
>> 3) & 7);
103 kvm_info("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
105 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
106 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
107 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
109 write_c0_entryhi(old_entryhi
);
110 write_c0_pagemask(old_pagemask
);
112 local_irq_restore(flags
);
114 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs
);
116 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu
*vcpu
)
118 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
119 struct kvm_mips_tlb tlb
;
122 kvm_info("Guest TLBs:\n");
123 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0
));
125 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
126 tlb
= vcpu
->arch
.guest_tlb
[i
];
127 kvm_info("TLB%c%3d Hi 0x%08lx ",
128 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
130 kvm_info("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
131 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
132 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
133 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
134 (tlb
.tlb_lo0
>> 3) & 7);
135 kvm_info("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
137 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
138 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
139 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
142 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs
);
144 static int kvm_mips_map_page(struct kvm
*kvm
, gfn_t gfn
)
146 int srcu_idx
, err
= 0;
149 if (kvm
->arch
.guest_pmap
[gfn
] != KVM_INVALID_PAGE
)
152 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
153 pfn
= kvm_mips_gfn_to_pfn(kvm
, gfn
);
155 if (kvm_mips_is_error_pfn(pfn
)) {
156 kvm_err("Couldn't get pfn for gfn %#" PRIx64
"!\n", gfn
);
161 kvm
->arch
.guest_pmap
[gfn
] = pfn
;
163 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
167 /* Translate guest KSEG0 addresses to Host PA */
168 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu
*vcpu
,
172 uint32_t offset
= gva
& ~PAGE_MASK
;
173 struct kvm
*kvm
= vcpu
->kvm
;
175 if (KVM_GUEST_KSEGX(gva
) != KVM_GUEST_KSEG0
) {
176 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__
,
177 __builtin_return_address(0), gva
);
178 return KVM_INVALID_PAGE
;
181 gfn
= (KVM_GUEST_CPHYSADDR(gva
) >> PAGE_SHIFT
);
183 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
184 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__
, gfn
,
186 return KVM_INVALID_PAGE
;
189 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
190 return KVM_INVALID_ADDR
;
192 return (kvm
->arch
.guest_pmap
[gfn
] << PAGE_SHIFT
) + offset
;
194 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa
);
196 /* XXXKYMA: Must be called with interrupts disabled */
197 /* set flush_dcache_mask == 0 if no dcache flush required */
198 int kvm_mips_host_tlb_write(struct kvm_vcpu
*vcpu
, unsigned long entryhi
,
199 unsigned long entrylo0
, unsigned long entrylo1
,
200 int flush_dcache_mask
)
203 unsigned long old_entryhi
;
206 local_irq_save(flags
);
208 old_entryhi
= read_c0_entryhi();
209 write_c0_entryhi(entryhi
);
214 idx
= read_c0_index();
216 if (idx
> current_cpu_data
.tlbsize
) {
217 kvm_err("%s: Invalid Index: %d\n", __func__
, idx
);
218 kvm_mips_dump_host_tlbs();
222 write_c0_entrylo0(entrylo0
);
223 write_c0_entrylo1(entrylo1
);
232 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
233 vcpu
->arch
.pc
, idx
, read_c0_entryhi(),
234 read_c0_entrylo0(), read_c0_entrylo1());
237 if (flush_dcache_mask
) {
238 if (entrylo0
& MIPS3_PG_V
) {
239 ++vcpu
->stat
.flush_dcache_exits
;
240 flush_data_cache_page((entryhi
& VPN2_MASK
) &
243 if (entrylo1
& MIPS3_PG_V
) {
244 ++vcpu
->stat
.flush_dcache_exits
;
245 flush_data_cache_page(((entryhi
& VPN2_MASK
) &
246 ~flush_dcache_mask
) |
247 (0x1 << PAGE_SHIFT
));
251 /* Restore old ASID */
252 write_c0_entryhi(old_entryhi
);
255 local_irq_restore(flags
);
259 /* XXXKYMA: Must be called with interrupts disabled */
260 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr
,
261 struct kvm_vcpu
*vcpu
)
265 unsigned long vaddr
= 0;
266 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
268 struct kvm
*kvm
= vcpu
->kvm
;
269 const int flush_dcache_mask
= 0;
271 if (KVM_GUEST_KSEGX(badvaddr
) != KVM_GUEST_KSEG0
) {
272 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__
, badvaddr
);
273 kvm_mips_dump_host_tlbs();
277 gfn
= (KVM_GUEST_CPHYSADDR(badvaddr
) >> PAGE_SHIFT
);
278 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
279 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__
,
281 kvm_mips_dump_host_tlbs();
285 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
287 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
290 if (kvm_mips_map_page(vcpu
->kvm
, gfn
^ 0x1) < 0)
294 pfn0
= kvm
->arch
.guest_pmap
[gfn
];
295 pfn1
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
297 pfn0
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
298 pfn1
= kvm
->arch
.guest_pmap
[gfn
];
301 entryhi
= (vaddr
| kvm_mips_get_kernel_asid(vcpu
));
302 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
303 (1 << 2) | (0x1 << 1);
304 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) |
305 (1 << 2) | (0x1 << 1);
307 return kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
310 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault
);
312 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr
,
313 struct kvm_vcpu
*vcpu
)
316 unsigned long flags
, old_entryhi
= 0, vaddr
= 0;
317 unsigned long entrylo0
= 0, entrylo1
= 0;
319 pfn0
= CPHYSADDR(vcpu
->arch
.kseg0_commpage
) >> PAGE_SHIFT
;
321 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
322 (1 << 2) | (0x1 << 1);
325 local_irq_save(flags
);
327 old_entryhi
= read_c0_entryhi();
328 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
329 write_c0_entryhi(vaddr
| kvm_mips_get_kernel_asid(vcpu
));
331 write_c0_entrylo0(entrylo0
);
333 write_c0_entrylo1(entrylo1
);
335 write_c0_index(kvm_mips_get_commpage_asid(vcpu
));
341 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
342 vcpu
->arch
.pc
, read_c0_index(), read_c0_entryhi(),
343 read_c0_entrylo0(), read_c0_entrylo1());
345 /* Restore old ASID */
346 write_c0_entryhi(old_entryhi
);
349 local_irq_restore(flags
);
353 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault
);
355 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu
*vcpu
,
356 struct kvm_mips_tlb
*tlb
,
360 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
361 struct kvm
*kvm
= vcpu
->kvm
;
364 if ((tlb
->tlb_hi
& VPN2_MASK
) == 0) {
368 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
)
372 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
)
376 pfn0
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
)
378 pfn1
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
)
383 *hpa0
= pfn0
<< PAGE_SHIFT
;
386 *hpa1
= pfn1
<< PAGE_SHIFT
;
388 /* Get attributes from the Guest TLB */
389 entryhi
= (tlb
->tlb_hi
& VPN2_MASK
) | (KVM_GUEST_KERNEL_MODE(vcpu
) ?
390 kvm_mips_get_kernel_asid(vcpu
) :
391 kvm_mips_get_user_asid(vcpu
));
392 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
393 (tlb
->tlb_lo0
& MIPS3_PG_D
) | (tlb
->tlb_lo0
& MIPS3_PG_V
);
394 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) |
395 (tlb
->tlb_lo1
& MIPS3_PG_D
) | (tlb
->tlb_lo1
& MIPS3_PG_V
);
397 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu
->arch
.pc
,
398 tlb
->tlb_lo0
, tlb
->tlb_lo1
);
400 return kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
403 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault
);
405 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long entryhi
)
409 struct kvm_mips_tlb
*tlb
= vcpu
->arch
.guest_tlb
;
411 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
412 if (TLB_HI_VPN2_HIT(tlb
[i
], entryhi
) &&
413 TLB_HI_ASID_HIT(tlb
[i
], entryhi
)) {
419 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
420 __func__
, entryhi
, index
, tlb
[i
].tlb_lo0
, tlb
[i
].tlb_lo1
);
424 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup
);
426 int kvm_mips_host_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long vaddr
)
428 unsigned long old_entryhi
, flags
;
431 local_irq_save(flags
);
433 old_entryhi
= read_c0_entryhi();
435 if (KVM_GUEST_KERNEL_MODE(vcpu
))
436 write_c0_entryhi((vaddr
& VPN2_MASK
) |
437 kvm_mips_get_kernel_asid(vcpu
));
439 write_c0_entryhi((vaddr
& VPN2_MASK
) |
440 kvm_mips_get_user_asid(vcpu
));
447 idx
= read_c0_index();
449 /* Restore old ASID */
450 write_c0_entryhi(old_entryhi
);
454 local_irq_restore(flags
);
456 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr
, idx
);
460 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup
);
462 int kvm_mips_host_tlb_inv(struct kvm_vcpu
*vcpu
, unsigned long va
)
465 unsigned long flags
, old_entryhi
;
467 local_irq_save(flags
);
469 old_entryhi
= read_c0_entryhi();
471 write_c0_entryhi((va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
));
476 idx
= read_c0_index();
478 if (idx
>= current_cpu_data
.tlbsize
)
482 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
485 write_c0_entrylo0(0);
488 write_c0_entrylo1(0);
495 write_c0_entryhi(old_entryhi
);
499 local_irq_restore(flags
);
502 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__
,
503 (va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
), idx
);
507 EXPORT_SYMBOL(kvm_mips_host_tlb_inv
);
509 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
510 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu
*vcpu
, int index
)
512 unsigned long flags
, old_entryhi
;
514 if (index
>= current_cpu_data
.tlbsize
)
517 local_irq_save(flags
);
519 old_entryhi
= read_c0_entryhi();
521 write_c0_entryhi(UNIQUE_ENTRYHI(index
));
524 write_c0_index(index
);
527 write_c0_entrylo0(0);
530 write_c0_entrylo1(0);
537 write_c0_entryhi(old_entryhi
);
541 local_irq_restore(flags
);
546 void kvm_mips_flush_host_tlb(int skip_kseg0
)
549 unsigned long old_entryhi
, entryhi
;
550 unsigned long old_pagemask
;
552 int maxentry
= current_cpu_data
.tlbsize
;
554 local_irq_save(flags
);
556 old_entryhi
= read_c0_entryhi();
557 old_pagemask
= read_c0_pagemask();
559 /* Blast 'em all away. */
560 for (entry
= 0; entry
< maxentry
; entry
++) {
561 write_c0_index(entry
);
568 entryhi
= read_c0_entryhi();
570 /* Don't blow away guest kernel entries */
571 if (KVM_GUEST_KSEGX(entryhi
) == KVM_GUEST_KSEG0
)
575 /* Make sure all entries differ. */
576 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
578 write_c0_entrylo0(0);
580 write_c0_entrylo1(0);
589 write_c0_entryhi(old_entryhi
);
590 write_c0_pagemask(old_pagemask
);
594 local_irq_restore(flags
);
596 EXPORT_SYMBOL(kvm_mips_flush_host_tlb
);
598 void kvm_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
,
599 struct kvm_vcpu
*vcpu
)
601 unsigned long asid
= asid_cache(cpu
);
604 if (!(asid
& ASID_MASK
)) {
605 if (cpu_has_vtag_icache
)
608 kvm_local_flush_tlb_all(); /* start new asid cycle */
610 if (!asid
) /* fix version if needed */
611 asid
= ASID_FIRST_VERSION
;
614 cpu_context(cpu
, mm
) = asid_cache(cpu
) = asid
;
617 void kvm_local_flush_tlb_all(void)
620 unsigned long old_ctx
;
623 local_irq_save(flags
);
624 /* Save old context and create impossible VPN2 value */
625 old_ctx
= read_c0_entryhi();
626 write_c0_entrylo0(0);
627 write_c0_entrylo1(0);
629 /* Blast 'em all away. */
630 while (entry
< current_cpu_data
.tlbsize
) {
631 /* Make sure all entries differ. */
632 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
633 write_c0_index(entry
);
639 write_c0_entryhi(old_ctx
);
642 local_irq_restore(flags
);
644 EXPORT_SYMBOL(kvm_local_flush_tlb_all
);
647 * kvm_mips_migrate_count() - Migrate timer.
648 * @vcpu: Virtual CPU.
650 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
651 * if it was running prior to being cancelled.
653 * Must be called when the VCPU is migrated to a different CPU to ensure that
654 * timer expiry during guest execution interrupts the guest and causes the
655 * interrupt to be delivered in a timely manner.
657 static void kvm_mips_migrate_count(struct kvm_vcpu
*vcpu
)
659 if (hrtimer_cancel(&vcpu
->arch
.comparecount_timer
))
660 hrtimer_restart(&vcpu
->arch
.comparecount_timer
);
663 /* Restore ASID once we are scheduled back after preemption */
664 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
669 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__
, vcpu
, cpu
);
671 /* Alocate new kernel and user ASIDs if needed */
673 local_irq_save(flags
);
676 guest_kernel_asid
[cpu
] ^ asid_cache(cpu
)) & ASID_VERSION_MASK
)) {
677 kvm_get_new_mmu_context(&vcpu
->arch
.guest_kernel_mm
, cpu
, vcpu
);
678 vcpu
->arch
.guest_kernel_asid
[cpu
] =
679 vcpu
->arch
.guest_kernel_mm
.context
.asid
[cpu
];
680 kvm_get_new_mmu_context(&vcpu
->arch
.guest_user_mm
, cpu
, vcpu
);
681 vcpu
->arch
.guest_user_asid
[cpu
] =
682 vcpu
->arch
.guest_user_mm
.context
.asid
[cpu
];
685 kvm_debug("[%d]: cpu_context: %#lx\n", cpu
,
686 cpu_context(cpu
, current
->mm
));
687 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
688 cpu
, vcpu
->arch
.guest_kernel_asid
[cpu
]);
689 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu
,
690 vcpu
->arch
.guest_user_asid
[cpu
]);
693 if (vcpu
->arch
.last_sched_cpu
!= cpu
) {
694 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
695 vcpu
->arch
.last_sched_cpu
, cpu
, vcpu
->vcpu_id
);
697 * Migrate the timer interrupt to the current CPU so that it
698 * always interrupts the guest and synchronously triggers a
699 * guest timer interrupt.
701 kvm_mips_migrate_count(vcpu
);
706 * If we preempted while the guest was executing, then reload
707 * the pre-empted ASID
709 if (current
->flags
& PF_VCPU
) {
710 write_c0_entryhi(vcpu
->arch
.
711 preempt_entryhi
& ASID_MASK
);
715 /* New ASIDs were allocated for the VM */
718 * Were we in guest context? If so then the pre-empted ASID is
719 * no longer valid, we need to set it to what it should be based
720 * on the mode of the Guest (Kernel/User)
722 if (current
->flags
& PF_VCPU
) {
723 if (KVM_GUEST_KERNEL_MODE(vcpu
))
724 write_c0_entryhi(vcpu
->arch
.
725 guest_kernel_asid
[cpu
] &
728 write_c0_entryhi(vcpu
->arch
.
729 guest_user_asid
[cpu
] &
735 local_irq_restore(flags
);
738 EXPORT_SYMBOL(kvm_arch_vcpu_load
);
740 /* ASID can change if another task is scheduled during preemption */
741 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
746 local_irq_save(flags
);
748 cpu
= smp_processor_id();
750 vcpu
->arch
.preempt_entryhi
= read_c0_entryhi();
751 vcpu
->arch
.last_sched_cpu
= cpu
;
753 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
754 ASID_VERSION_MASK
)) {
755 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__
,
756 cpu_context(cpu
, current
->mm
));
757 drop_mmu_context(current
->mm
, cpu
);
759 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
762 local_irq_restore(flags
);
764 EXPORT_SYMBOL(kvm_arch_vcpu_put
);
766 uint32_t kvm_get_inst(uint32_t *opc
, struct kvm_vcpu
*vcpu
)
768 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
769 unsigned long paddr
, flags
, vpn2
, asid
;
773 if (KVM_GUEST_KSEGX((unsigned long) opc
) < KVM_GUEST_KSEG0
||
774 KVM_GUEST_KSEGX((unsigned long) opc
) == KVM_GUEST_KSEG23
) {
775 local_irq_save(flags
);
776 index
= kvm_mips_host_tlb_lookup(vcpu
, (unsigned long) opc
);
780 vpn2
= (unsigned long) opc
& VPN2_MASK
;
781 asid
= kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
;
782 index
= kvm_mips_guest_tlb_lookup(vcpu
, vpn2
| asid
);
784 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
785 __func__
, opc
, vcpu
, read_c0_entryhi());
786 kvm_mips_dump_host_tlbs();
787 local_irq_restore(flags
);
788 return KVM_INVALID_INST
;
790 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
,
796 local_irq_restore(flags
);
797 } else if (KVM_GUEST_KSEGX(opc
) == KVM_GUEST_KSEG0
) {
799 kvm_mips_translate_guest_kseg0_to_hpa(vcpu
,
800 (unsigned long) opc
);
801 inst
= *(uint32_t *) CKSEG0ADDR(paddr
);
803 kvm_err("%s: illegal address: %p\n", __func__
, opc
);
804 return KVM_INVALID_INST
;
809 EXPORT_SYMBOL(kvm_get_inst
);