2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
23 #include <asm/bootinfo.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
38 atomic_t kvm_mips_instance
;
39 EXPORT_SYMBOL(kvm_mips_instance
);
41 /* These function pointers are initialized once the KVM module is loaded */
42 pfn_t(*kvm_mips_gfn_to_pfn
) (struct kvm
*kvm
, gfn_t gfn
);
43 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn
);
45 void (*kvm_mips_release_pfn_clean
) (pfn_t pfn
);
46 EXPORT_SYMBOL(kvm_mips_release_pfn_clean
);
48 bool(*kvm_mips_is_error_pfn
) (pfn_t pfn
);
49 EXPORT_SYMBOL(kvm_mips_is_error_pfn
);
51 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu
*vcpu
)
53 return vcpu
->arch
.guest_kernel_asid
[smp_processor_id()] & ASID_MASK
;
57 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu
*vcpu
)
59 return vcpu
->arch
.guest_user_asid
[smp_processor_id()] & ASID_MASK
;
62 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu
*vcpu
)
64 return vcpu
->kvm
->arch
.commpage_tlb
;
69 * Structure defining an tlb entry data set.
72 void kvm_mips_dump_host_tlbs(void)
74 unsigned long old_entryhi
;
75 unsigned long old_pagemask
;
76 struct kvm_mips_tlb tlb
;
80 local_irq_save(flags
);
82 old_entryhi
= read_c0_entryhi();
83 old_pagemask
= read_c0_pagemask();
85 printk("HOST TLBs:\n");
86 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK
);
88 for (i
= 0; i
< current_cpu_data
.tlbsize
; i
++) {
95 tlb
.tlb_hi
= read_c0_entryhi();
96 tlb
.tlb_lo0
= read_c0_entrylo0();
97 tlb
.tlb_lo1
= read_c0_entrylo1();
98 tlb
.tlb_mask
= read_c0_pagemask();
100 printk("TLB%c%3d Hi 0x%08lx ",
101 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
103 printk("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
105 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
106 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
107 (tlb
.tlb_lo0
>> 3) & 7);
108 printk("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
109 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
110 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
111 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
112 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
114 write_c0_entryhi(old_entryhi
);
115 write_c0_pagemask(old_pagemask
);
117 local_irq_restore(flags
);
120 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu
*vcpu
)
122 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
123 struct kvm_mips_tlb tlb
;
126 printk("Guest TLBs:\n");
127 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0
));
129 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
130 tlb
= vcpu
->arch
.guest_tlb
[i
];
131 printk("TLB%c%3d Hi 0x%08lx ",
132 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
134 printk("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
135 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
136 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
137 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
138 (tlb
.tlb_lo0
>> 3) & 7);
139 printk("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
140 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
141 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
142 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
143 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
147 static int kvm_mips_map_page(struct kvm
*kvm
, gfn_t gfn
)
149 int srcu_idx
, err
= 0;
152 if (kvm
->arch
.guest_pmap
[gfn
] != KVM_INVALID_PAGE
)
155 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
156 pfn
= kvm_mips_gfn_to_pfn(kvm
, gfn
);
158 if (kvm_mips_is_error_pfn(pfn
)) {
159 kvm_err("Couldn't get pfn for gfn %#" PRIx64
"!\n", gfn
);
164 kvm
->arch
.guest_pmap
[gfn
] = pfn
;
166 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
170 /* Translate guest KSEG0 addresses to Host PA */
171 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu
*vcpu
,
175 uint32_t offset
= gva
& ~PAGE_MASK
;
176 struct kvm
*kvm
= vcpu
->kvm
;
178 if (KVM_GUEST_KSEGX(gva
) != KVM_GUEST_KSEG0
) {
179 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__
,
180 __builtin_return_address(0), gva
);
181 return KVM_INVALID_PAGE
;
184 gfn
= (KVM_GUEST_CPHYSADDR(gva
) >> PAGE_SHIFT
);
186 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
187 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__
, gfn
,
189 return KVM_INVALID_PAGE
;
192 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
193 return KVM_INVALID_ADDR
;
195 return (kvm
->arch
.guest_pmap
[gfn
] << PAGE_SHIFT
) + offset
;
198 /* XXXKYMA: Must be called with interrupts disabled */
199 /* set flush_dcache_mask == 0 if no dcache flush required */
201 kvm_mips_host_tlb_write(struct kvm_vcpu
*vcpu
, unsigned long entryhi
,
202 unsigned long entrylo0
, unsigned long entrylo1
, int flush_dcache_mask
)
205 unsigned long old_entryhi
;
208 local_irq_save(flags
);
211 old_entryhi
= read_c0_entryhi();
212 write_c0_entryhi(entryhi
);
217 idx
= read_c0_index();
219 if (idx
> current_cpu_data
.tlbsize
) {
220 kvm_err("%s: Invalid Index: %d\n", __func__
, idx
);
221 kvm_mips_dump_host_tlbs();
226 idx
= read_c0_random() % current_cpu_data
.tlbsize
;
230 write_c0_entrylo0(entrylo0
);
231 write_c0_entrylo1(entrylo1
);
239 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
240 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
241 vcpu
->arch
.pc
, idx
, read_c0_entryhi(),
242 read_c0_entrylo0(), read_c0_entrylo1());
247 if (flush_dcache_mask
) {
248 if (entrylo0
& MIPS3_PG_V
) {
249 ++vcpu
->stat
.flush_dcache_exits
;
250 flush_data_cache_page((entryhi
& VPN2_MASK
) & ~flush_dcache_mask
);
252 if (entrylo1
& MIPS3_PG_V
) {
253 ++vcpu
->stat
.flush_dcache_exits
;
254 flush_data_cache_page(((entryhi
& VPN2_MASK
) & ~flush_dcache_mask
) |
255 (0x1 << PAGE_SHIFT
));
259 /* Restore old ASID */
260 write_c0_entryhi(old_entryhi
);
263 local_irq_restore(flags
);
268 /* XXXKYMA: Must be called with interrupts disabled */
269 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr
,
270 struct kvm_vcpu
*vcpu
)
274 unsigned long vaddr
= 0;
275 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
277 struct kvm
*kvm
= vcpu
->kvm
;
278 const int flush_dcache_mask
= 0;
281 if (KVM_GUEST_KSEGX(badvaddr
) != KVM_GUEST_KSEG0
) {
282 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__
, badvaddr
);
283 kvm_mips_dump_host_tlbs();
287 gfn
= (KVM_GUEST_CPHYSADDR(badvaddr
) >> PAGE_SHIFT
);
288 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
289 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__
,
291 kvm_mips_dump_host_tlbs();
295 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
297 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
300 if (kvm_mips_map_page(vcpu
->kvm
, gfn
^ 0x1) < 0)
304 pfn0
= kvm
->arch
.guest_pmap
[gfn
];
305 pfn1
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
307 pfn0
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
308 pfn1
= kvm
->arch
.guest_pmap
[gfn
];
311 entryhi
= (vaddr
| kvm_mips_get_kernel_asid(vcpu
));
312 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) | (1 << 2) |
314 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) | (1 << 2) |
317 return kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
321 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr
,
322 struct kvm_vcpu
*vcpu
)
325 unsigned long flags
, old_entryhi
= 0, vaddr
= 0;
326 unsigned long entrylo0
= 0, entrylo1
= 0;
329 pfn0
= CPHYSADDR(vcpu
->arch
.kseg0_commpage
) >> PAGE_SHIFT
;
331 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) | (1 << 2) |
335 local_irq_save(flags
);
337 old_entryhi
= read_c0_entryhi();
338 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
339 write_c0_entryhi(vaddr
| kvm_mips_get_kernel_asid(vcpu
));
341 write_c0_entrylo0(entrylo0
);
343 write_c0_entrylo1(entrylo1
);
345 write_c0_index(kvm_mips_get_commpage_asid(vcpu
));
352 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
353 vcpu
->arch
.pc
, read_c0_index(), read_c0_entryhi(),
354 read_c0_entrylo0(), read_c0_entrylo1());
357 /* Restore old ASID */
358 write_c0_entryhi(old_entryhi
);
361 local_irq_restore(flags
);
367 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu
*vcpu
,
368 struct kvm_mips_tlb
*tlb
, unsigned long *hpa0
, unsigned long *hpa1
)
370 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
371 struct kvm
*kvm
= vcpu
->kvm
;
375 if ((tlb
->tlb_hi
& VPN2_MASK
) == 0) {
379 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
) >> PAGE_SHIFT
) < 0)
382 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
) >> PAGE_SHIFT
) < 0)
385 pfn0
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
) >> PAGE_SHIFT
];
386 pfn1
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
) >> PAGE_SHIFT
];
390 *hpa0
= pfn0
<< PAGE_SHIFT
;
393 *hpa1
= pfn1
<< PAGE_SHIFT
;
395 /* Get attributes from the Guest TLB */
396 entryhi
= (tlb
->tlb_hi
& VPN2_MASK
) | (KVM_GUEST_KERNEL_MODE(vcpu
) ?
397 kvm_mips_get_kernel_asid(vcpu
) : kvm_mips_get_user_asid(vcpu
));
398 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
399 (tlb
->tlb_lo0
& MIPS3_PG_D
) | (tlb
->tlb_lo0
& MIPS3_PG_V
);
400 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) |
401 (tlb
->tlb_lo1
& MIPS3_PG_D
) | (tlb
->tlb_lo1
& MIPS3_PG_V
);
404 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu
->arch
.pc
,
405 tlb
->tlb_lo0
, tlb
->tlb_lo1
);
408 return kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
412 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long entryhi
)
416 struct kvm_mips_tlb
*tlb
= vcpu
->arch
.guest_tlb
;
419 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
420 if (((TLB_VPN2(tlb
[i
]) & ~tlb
[i
].tlb_mask
) == ((entryhi
& VPN2_MASK
) & ~tlb
[i
].tlb_mask
)) &&
421 (TLB_IS_GLOBAL(tlb
[i
]) || (TLB_ASID(tlb
[i
]) == (entryhi
& ASID_MASK
)))) {
428 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
429 __func__
, entryhi
, index
, tlb
[i
].tlb_lo0
, tlb
[i
].tlb_lo1
);
435 int kvm_mips_host_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long vaddr
)
437 unsigned long old_entryhi
, flags
;
441 local_irq_save(flags
);
443 old_entryhi
= read_c0_entryhi();
445 if (KVM_GUEST_KERNEL_MODE(vcpu
))
446 write_c0_entryhi((vaddr
& VPN2_MASK
) | kvm_mips_get_kernel_asid(vcpu
));
448 write_c0_entryhi((vaddr
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
));
455 idx
= read_c0_index();
457 /* Restore old ASID */
458 write_c0_entryhi(old_entryhi
);
462 local_irq_restore(flags
);
465 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr
, idx
);
471 int kvm_mips_host_tlb_inv(struct kvm_vcpu
*vcpu
, unsigned long va
)
474 unsigned long flags
, old_entryhi
;
476 local_irq_save(flags
);
479 old_entryhi
= read_c0_entryhi();
481 write_c0_entryhi((va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
));
486 idx
= read_c0_index();
488 if (idx
>= current_cpu_data
.tlbsize
)
492 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
495 write_c0_entrylo0(0);
498 write_c0_entrylo1(0);
505 write_c0_entryhi(old_entryhi
);
509 local_irq_restore(flags
);
513 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__
,
514 (va
& VPN2_MASK
) | (vcpu
->arch
.asid_map
[va
& ASID_MASK
] & ASID_MASK
), idx
);
521 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
522 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu
*vcpu
, int index
)
524 unsigned long flags
, old_entryhi
;
526 if (index
>= current_cpu_data
.tlbsize
)
529 local_irq_save(flags
);
532 old_entryhi
= read_c0_entryhi();
534 write_c0_entryhi(UNIQUE_ENTRYHI(index
));
537 write_c0_index(index
);
540 write_c0_entrylo0(0);
543 write_c0_entrylo1(0);
550 write_c0_entryhi(old_entryhi
);
554 local_irq_restore(flags
);
559 void kvm_mips_flush_host_tlb(int skip_kseg0
)
562 unsigned long old_entryhi
, entryhi
;
563 unsigned long old_pagemask
;
565 int maxentry
= current_cpu_data
.tlbsize
;
568 local_irq_save(flags
);
570 old_entryhi
= read_c0_entryhi();
571 old_pagemask
= read_c0_pagemask();
573 /* Blast 'em all away. */
574 for (entry
= 0; entry
< maxentry
; entry
++) {
576 write_c0_index(entry
);
583 entryhi
= read_c0_entryhi();
585 /* Don't blow away guest kernel entries */
586 if (KVM_GUEST_KSEGX(entryhi
) == KVM_GUEST_KSEG0
) {
591 /* Make sure all entries differ. */
592 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
594 write_c0_entrylo0(0);
596 write_c0_entrylo1(0);
605 write_c0_entryhi(old_entryhi
);
606 write_c0_pagemask(old_pagemask
);
610 local_irq_restore(flags
);
614 kvm_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
,
615 struct kvm_vcpu
*vcpu
)
617 unsigned long asid
= asid_cache(cpu
);
619 if (!((asid
+= ASID_INC
) & ASID_MASK
)) {
620 if (cpu_has_vtag_icache
) {
624 kvm_local_flush_tlb_all(); /* start new asid cycle */
626 if (!asid
) /* fix version if needed */
627 asid
= ASID_FIRST_VERSION
;
630 cpu_context(cpu
, mm
) = asid_cache(cpu
) = asid
;
633 void kvm_local_flush_tlb_all(void)
636 unsigned long old_ctx
;
639 local_irq_save(flags
);
640 /* Save old context and create impossible VPN2 value */
641 old_ctx
= read_c0_entryhi();
642 write_c0_entrylo0(0);
643 write_c0_entrylo1(0);
645 /* Blast 'em all away. */
646 while (entry
< current_cpu_data
.tlbsize
) {
647 /* Make sure all entries differ. */
648 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
649 write_c0_index(entry
);
655 write_c0_entryhi(old_ctx
);
658 local_irq_restore(flags
);
661 /* Restore ASID once we are scheduled back after preemption */
662 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
668 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__
, vcpu
, cpu
);
671 /* Alocate new kernel and user ASIDs if needed */
673 local_irq_save(flags
);
676 guest_kernel_asid
[cpu
] ^ asid_cache(cpu
)) & ASID_VERSION_MASK
)) {
677 kvm_get_new_mmu_context(&vcpu
->arch
.guest_kernel_mm
, cpu
, vcpu
);
678 vcpu
->arch
.guest_kernel_asid
[cpu
] =
679 vcpu
->arch
.guest_kernel_mm
.context
.asid
[cpu
];
680 kvm_get_new_mmu_context(&vcpu
->arch
.guest_user_mm
, cpu
, vcpu
);
681 vcpu
->arch
.guest_user_asid
[cpu
] =
682 vcpu
->arch
.guest_user_mm
.context
.asid
[cpu
];
685 kvm_info("[%d]: cpu_context: %#lx\n", cpu
,
686 cpu_context(cpu
, current
->mm
));
687 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
688 cpu
, vcpu
->arch
.guest_kernel_asid
[cpu
]);
689 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu
,
690 vcpu
->arch
.guest_user_asid
[cpu
]);
693 if (vcpu
->arch
.last_sched_cpu
!= cpu
) {
694 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
695 vcpu
->arch
.last_sched_cpu
, cpu
, vcpu
->vcpu_id
);
699 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
700 if (current
->flags
& PF_VCPU
) {
701 write_c0_entryhi(vcpu
->arch
.
702 preempt_entryhi
& ASID_MASK
);
706 /* New ASIDs were allocated for the VM */
708 /* Were we in guest context? If so then the pre-empted ASID is no longer
709 * valid, we need to set it to what it should be based on the mode of
710 * the Guest (Kernel/User)
712 if (current
->flags
& PF_VCPU
) {
713 if (KVM_GUEST_KERNEL_MODE(vcpu
))
714 write_c0_entryhi(vcpu
->arch
.
715 guest_kernel_asid
[cpu
] &
718 write_c0_entryhi(vcpu
->arch
.
719 guest_user_asid
[cpu
] &
725 local_irq_restore(flags
);
729 /* ASID can change if another task is scheduled during preemption */
730 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
735 local_irq_save(flags
);
737 cpu
= smp_processor_id();
740 vcpu
->arch
.preempt_entryhi
= read_c0_entryhi();
741 vcpu
->arch
.last_sched_cpu
= cpu
;
743 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
744 ASID_VERSION_MASK
)) {
745 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__
,
746 cpu_context(cpu
, current
->mm
));
747 drop_mmu_context(current
->mm
, cpu
);
749 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
752 local_irq_restore(flags
);
755 uint32_t kvm_get_inst(uint32_t *opc
, struct kvm_vcpu
*vcpu
)
757 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
758 unsigned long paddr
, flags
;
762 if (KVM_GUEST_KSEGX((unsigned long) opc
) < KVM_GUEST_KSEG0
||
763 KVM_GUEST_KSEGX((unsigned long) opc
) == KVM_GUEST_KSEG23
) {
764 local_irq_save(flags
);
765 index
= kvm_mips_host_tlb_lookup(vcpu
, (unsigned long) opc
);
770 kvm_mips_guest_tlb_lookup(vcpu
,
771 ((unsigned long) opc
& VPN2_MASK
)
773 (kvm_read_c0_guest_entryhi
774 (cop0
) & ASID_MASK
));
777 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
778 __func__
, opc
, vcpu
, read_c0_entryhi());
779 kvm_mips_dump_host_tlbs();
780 local_irq_restore(flags
);
781 return KVM_INVALID_INST
;
783 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
,
789 local_irq_restore(flags
);
790 } else if (KVM_GUEST_KSEGX(opc
) == KVM_GUEST_KSEG0
) {
792 kvm_mips_translate_guest_kseg0_to_hpa(vcpu
,
793 (unsigned long) opc
);
794 inst
= *(uint32_t *) CKSEG0ADDR(paddr
);
796 kvm_err("%s: illegal address: %p\n", __func__
, opc
);
797 return KVM_INVALID_INST
;
803 EXPORT_SYMBOL(kvm_local_flush_tlb_all
);
804 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault
);
805 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault
);
806 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs
);
807 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault
);
808 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup
);
809 EXPORT_SYMBOL(kvm_mips_flush_host_tlb
);
810 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup
);
811 EXPORT_SYMBOL(kvm_mips_host_tlb_inv
);
812 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa
);
813 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs
);
814 EXPORT_SYMBOL(kvm_get_inst
);
815 EXPORT_SYMBOL(kvm_arch_vcpu_load
);
816 EXPORT_SYMBOL(kvm_arch_vcpu_put
);