2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/log2.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgalloc.h>
21 #include "interrupt.h"
23 static gpa_t
kvm_trap_emul_gva_to_gpa_cb(gva_t gva
)
26 gva_t kseg
= KSEGX(gva
);
27 gva_t gkseg
= KVM_GUEST_KSEGX(gva
);
29 if ((kseg
== CKSEG0
) || (kseg
== CKSEG1
))
31 else if (gkseg
== KVM_GUEST_KSEG0
)
32 gpa
= KVM_GUEST_CPHYSADDR(gva
);
34 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__
, gva
);
35 kvm_mips_dump_host_tlbs();
36 gpa
= KVM_INVALID_ADDR
;
39 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__
, gva
, gpa
);
44 static int kvm_trap_emul_no_handler(struct kvm_vcpu
*vcpu
)
46 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
47 u32 cause
= vcpu
->arch
.host_cp0_cause
;
48 u32 exccode
= (cause
& CAUSEF_EXCCODE
) >> CAUSEB_EXCCODE
;
49 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
53 * Fetch the instruction.
55 if (cause
& CAUSEF_BD
)
57 kvm_get_badinstr(opc
, vcpu
, &inst
);
59 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
60 exccode
, opc
, inst
, badvaddr
,
61 kvm_read_c0_guest_status(vcpu
->arch
.cop0
));
62 kvm_arch_vcpu_dump_regs(vcpu
);
63 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
67 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu
*vcpu
)
69 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
70 struct kvm_run
*run
= vcpu
->run
;
71 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
72 u32 cause
= vcpu
->arch
.host_cp0_cause
;
73 enum emulation_result er
= EMULATE_DONE
;
74 int ret
= RESUME_GUEST
;
76 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 1) {
78 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
) ||
79 (kvm_read_c0_guest_status(cop0
) & ST0_CU1
) == 0) {
81 * Unusable/no FPU in guest:
82 * deliver guest COP1 Unusable Exception
84 er
= kvm_mips_emulate_fpu_exc(cause
, opc
, run
, vcpu
);
86 /* Restore FPU state */
91 er
= kvm_mips_emulate_inst(cause
, opc
, run
, vcpu
);
100 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
105 run
->exit_reason
= KVM_EXIT_INTR
;
109 case EMULATE_HYPERCALL
:
110 ret
= kvm_mips_handle_hypcall(vcpu
);
119 static int kvm_mips_bad_load(u32 cause
, u32
*opc
, struct kvm_run
*run
,
120 struct kvm_vcpu
*vcpu
)
122 enum emulation_result er
;
123 union mips_instruction inst
;
126 /* A code fetch fault doesn't count as an MMIO */
127 if (kvm_is_ifetch_fault(&vcpu
->arch
)) {
128 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
132 /* Fetch the instruction. */
133 if (cause
& CAUSEF_BD
)
135 err
= kvm_get_badinstr(opc
, vcpu
, &inst
.word
);
137 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
141 /* Emulate the load */
142 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
143 if (er
== EMULATE_FAIL
) {
144 kvm_err("Emulate load from MMIO space failed\n");
145 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
147 run
->exit_reason
= KVM_EXIT_MMIO
;
152 static int kvm_mips_bad_store(u32 cause
, u32
*opc
, struct kvm_run
*run
,
153 struct kvm_vcpu
*vcpu
)
155 enum emulation_result er
;
156 union mips_instruction inst
;
159 /* Fetch the instruction. */
160 if (cause
& CAUSEF_BD
)
162 err
= kvm_get_badinstr(opc
, vcpu
, &inst
.word
);
164 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
168 /* Emulate the store */
169 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
170 if (er
== EMULATE_FAIL
) {
171 kvm_err("Emulate store to MMIO space failed\n");
172 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
174 run
->exit_reason
= KVM_EXIT_MMIO
;
179 static int kvm_mips_bad_access(u32 cause
, u32
*opc
, struct kvm_run
*run
,
180 struct kvm_vcpu
*vcpu
, bool store
)
183 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
185 return kvm_mips_bad_load(cause
, opc
, run
, vcpu
);
188 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu
*vcpu
)
190 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
191 struct kvm_run
*run
= vcpu
->run
;
192 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
193 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
194 u32 cause
= vcpu
->arch
.host_cp0_cause
;
195 struct kvm_mips_tlb
*tlb
;
196 unsigned long entryhi
;
199 if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
200 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
202 * First find the mapping in the guest TLB. If the failure to
203 * write was due to the guest TLB, it should be up to the guest
206 entryhi
= (badvaddr
& VPN2_MASK
) |
207 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
208 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
211 * These should never happen.
212 * They would indicate stale host TLB entries.
214 if (unlikely(index
< 0)) {
215 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
218 tlb
= vcpu
->arch
.guest_tlb
+ index
;
219 if (unlikely(!TLB_IS_VALID(*tlb
, badvaddr
))) {
220 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
225 * Guest entry not dirty? That would explain the TLB modified
226 * exception. Relay that on to the guest so it can handle it.
228 if (!TLB_IS_DIRTY(*tlb
, badvaddr
)) {
229 kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
233 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
, badvaddr
,
235 /* Not writable, needs handling as MMIO */
236 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
238 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
239 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr
, vcpu
, true) < 0)
240 /* Not writable, needs handling as MMIO */
241 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
244 /* host kernel addresses are all handled as MMIO */
245 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
249 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu
*vcpu
, bool store
)
251 struct kvm_run
*run
= vcpu
->run
;
252 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
253 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
254 u32 cause
= vcpu
->arch
.host_cp0_cause
;
255 enum emulation_result er
= EMULATE_DONE
;
256 int ret
= RESUME_GUEST
;
258 if (((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
)
259 && KVM_GUEST_KERNEL_MODE(vcpu
)) {
260 if (kvm_mips_handle_commpage_tlb_fault(badvaddr
, vcpu
) < 0) {
261 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
264 } else if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
265 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
266 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
267 store
? "ST" : "LD", cause
, opc
, badvaddr
);
270 * User Address (UA) fault, this could happen if
271 * (1) TLB entry not present/valid in both Guest and shadow host
272 * TLBs, in this case we pass on the fault to the guest
273 * kernel and let it handle it.
274 * (2) TLB entry is present in the Guest TLB but not in the
275 * shadow, in this case we inject the TLB from the Guest TLB
276 * into the shadow host TLB
279 er
= kvm_mips_handle_tlbmiss(cause
, opc
, run
, vcpu
, store
);
280 if (er
== EMULATE_DONE
)
283 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
286 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
288 * All KSEG0 faults are handled by KVM, as the guest kernel does
289 * not expect to ever get them
291 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr
, vcpu
, store
) < 0)
292 ret
= kvm_mips_bad_access(cause
, opc
, run
, vcpu
, store
);
293 } else if (KVM_GUEST_KERNEL_MODE(vcpu
)
294 && (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
)) {
296 * With EVA we may get a TLB exception instead of an address
297 * error when the guest performs MMIO to KSeg1 addresses.
299 ret
= kvm_mips_bad_access(cause
, opc
, run
, vcpu
, store
);
301 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
302 store
? "ST" : "LD", cause
, opc
, badvaddr
);
303 kvm_mips_dump_host_tlbs();
304 kvm_arch_vcpu_dump_regs(vcpu
);
305 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
311 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu
*vcpu
)
313 return kvm_trap_emul_handle_tlb_miss(vcpu
, true);
316 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu
*vcpu
)
318 return kvm_trap_emul_handle_tlb_miss(vcpu
, false);
321 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu
*vcpu
)
323 struct kvm_run
*run
= vcpu
->run
;
324 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
325 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
326 u32 cause
= vcpu
->arch
.host_cp0_cause
;
327 int ret
= RESUME_GUEST
;
329 if (KVM_GUEST_KERNEL_MODE(vcpu
)
330 && (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
)) {
331 ret
= kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
333 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
334 cause
, opc
, badvaddr
);
335 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
341 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu
*vcpu
)
343 struct kvm_run
*run
= vcpu
->run
;
344 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
345 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
346 u32 cause
= vcpu
->arch
.host_cp0_cause
;
347 int ret
= RESUME_GUEST
;
349 if (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
) {
350 ret
= kvm_mips_bad_load(cause
, opc
, run
, vcpu
);
352 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
353 cause
, opc
, badvaddr
);
354 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
360 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu
*vcpu
)
362 struct kvm_run
*run
= vcpu
->run
;
363 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
364 u32 cause
= vcpu
->arch
.host_cp0_cause
;
365 enum emulation_result er
= EMULATE_DONE
;
366 int ret
= RESUME_GUEST
;
368 er
= kvm_mips_emulate_syscall(cause
, opc
, run
, vcpu
);
369 if (er
== EMULATE_DONE
)
372 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
378 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu
*vcpu
)
380 struct kvm_run
*run
= vcpu
->run
;
381 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
382 u32 cause
= vcpu
->arch
.host_cp0_cause
;
383 enum emulation_result er
= EMULATE_DONE
;
384 int ret
= RESUME_GUEST
;
386 er
= kvm_mips_handle_ri(cause
, opc
, run
, vcpu
);
387 if (er
== EMULATE_DONE
)
390 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
396 static int kvm_trap_emul_handle_break(struct kvm_vcpu
*vcpu
)
398 struct kvm_run
*run
= vcpu
->run
;
399 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
400 u32 cause
= vcpu
->arch
.host_cp0_cause
;
401 enum emulation_result er
= EMULATE_DONE
;
402 int ret
= RESUME_GUEST
;
404 er
= kvm_mips_emulate_bp_exc(cause
, opc
, run
, vcpu
);
405 if (er
== EMULATE_DONE
)
408 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
414 static int kvm_trap_emul_handle_trap(struct kvm_vcpu
*vcpu
)
416 struct kvm_run
*run
= vcpu
->run
;
417 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
418 u32 cause
= vcpu
->arch
.host_cp0_cause
;
419 enum emulation_result er
= EMULATE_DONE
;
420 int ret
= RESUME_GUEST
;
422 er
= kvm_mips_emulate_trap_exc(cause
, opc
, run
, vcpu
);
423 if (er
== EMULATE_DONE
) {
426 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
432 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu
*vcpu
)
434 struct kvm_run
*run
= vcpu
->run
;
435 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
436 u32 cause
= vcpu
->arch
.host_cp0_cause
;
437 enum emulation_result er
= EMULATE_DONE
;
438 int ret
= RESUME_GUEST
;
440 er
= kvm_mips_emulate_msafpe_exc(cause
, opc
, run
, vcpu
);
441 if (er
== EMULATE_DONE
) {
444 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
450 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu
*vcpu
)
452 struct kvm_run
*run
= vcpu
->run
;
453 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
454 u32 cause
= vcpu
->arch
.host_cp0_cause
;
455 enum emulation_result er
= EMULATE_DONE
;
456 int ret
= RESUME_GUEST
;
458 er
= kvm_mips_emulate_fpe_exc(cause
, opc
, run
, vcpu
);
459 if (er
== EMULATE_DONE
) {
462 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
469 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
470 * @vcpu: Virtual CPU context.
472 * Handle when the guest attempts to use MSA when it is disabled.
474 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu
*vcpu
)
476 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
477 struct kvm_run
*run
= vcpu
->run
;
478 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
479 u32 cause
= vcpu
->arch
.host_cp0_cause
;
480 enum emulation_result er
= EMULATE_DONE
;
481 int ret
= RESUME_GUEST
;
483 if (!kvm_mips_guest_has_msa(&vcpu
->arch
) ||
484 (kvm_read_c0_guest_status(cop0
) & (ST0_CU1
| ST0_FR
)) == ST0_CU1
) {
486 * No MSA in guest, or FPU enabled and not in FR=1 mode,
487 * guest reserved instruction exception
489 er
= kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
490 } else if (!(kvm_read_c0_guest_config5(cop0
) & MIPS_CONF5_MSAEN
)) {
491 /* MSA disabled by guest, guest MSA disabled exception */
492 er
= kvm_mips_emulate_msadis_exc(cause
, opc
, run
, vcpu
);
494 /* Restore MSA/FPU state */
505 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
515 static int kvm_trap_emul_hardware_enable(void)
520 static void kvm_trap_emul_hardware_disable(void)
524 static int kvm_trap_emul_check_extension(struct kvm
*kvm
, long ext
)
529 case KVM_CAP_MIPS_TE
:
540 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu
*vcpu
)
542 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
543 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
546 * Allocate GVA -> HPA page tables.
547 * MIPS doesn't use the mm_struct pointer argument.
549 kern_mm
->pgd
= pgd_alloc(kern_mm
);
553 user_mm
->pgd
= pgd_alloc(user_mm
);
555 pgd_free(kern_mm
, kern_mm
->pgd
);
562 static void kvm_mips_emul_free_gva_pt(pgd_t
*pgd
)
564 /* Don't free host kernel page tables copied from init_mm.pgd */
565 const unsigned long end
= 0x80000000;
566 unsigned long pgd_va
, pud_va
, pmd_va
;
573 for (i
= 0; i
< USER_PTRS_PER_PGD
; i
++) {
574 if (pgd_none(pgd
[i
]))
577 pgd_va
= (unsigned long)i
<< PGDIR_SHIFT
;
580 p4d
= p4d_offset(pgd
, 0);
581 pud
= pud_offset(p4d
+ i
, 0);
582 for (j
= 0; j
< PTRS_PER_PUD
; j
++) {
583 if (pud_none(pud
[j
]))
586 pud_va
= pgd_va
| ((unsigned long)j
<< PUD_SHIFT
);
589 pmd
= pmd_offset(pud
+ j
, 0);
590 for (k
= 0; k
< PTRS_PER_PMD
; k
++) {
591 if (pmd_none(pmd
[k
]))
594 pmd_va
= pud_va
| (k
<< PMD_SHIFT
);
597 pte
= pte_offset(pmd
+ k
, 0);
598 pte_free_kernel(NULL
, pte
);
607 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu
*vcpu
)
609 kvm_mips_emul_free_gva_pt(vcpu
->arch
.guest_kernel_mm
.pgd
);
610 kvm_mips_emul_free_gva_pt(vcpu
->arch
.guest_user_mm
.pgd
);
613 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu
*vcpu
)
615 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
617 int vcpu_id
= vcpu
->vcpu_id
;
619 /* Start off the timer at 100 MHz */
620 kvm_mips_init_count(vcpu
, 100*1000*1000);
623 * Arch specific stuff, set up config registers properly so that the
624 * guest will come up as expected
626 #ifndef CONFIG_CPU_MIPSR6
627 /* r2-r5, simulate a MIPS 24kc */
628 kvm_write_c0_guest_prid(cop0
, 0x00019300);
630 /* r6+, simulate a generic QEMU machine */
631 kvm_write_c0_guest_prid(cop0
, 0x00010000);
634 * Have config1, Cacheable, noncoherent, write-back, write allocate.
635 * Endianness, arch revision & virtually tagged icache should match
638 config
= read_c0_config() & MIPS_CONF_AR
;
639 config
|= MIPS_CONF_M
| CONF_CM_CACHABLE_NONCOHERENT
| MIPS_CONF_MT_TLB
;
640 #ifdef CONFIG_CPU_BIG_ENDIAN
643 if (cpu_has_vtag_icache
)
644 config
|= MIPS_CONF_VI
;
645 kvm_write_c0_guest_config(cop0
, config
);
647 /* Read the cache characteristics from the host Config1 Register */
648 config1
= (read_c0_config1() & ~0x7f);
650 /* DCache line size not correctly reported in Config1 on Octeon CPUs */
651 if (cpu_dcache_line_size()) {
652 config1
&= ~MIPS_CONF1_DL
;
653 config1
|= ((ilog2(cpu_dcache_line_size()) - 1) <<
654 MIPS_CONF1_DL_SHF
) & MIPS_CONF1_DL
;
657 /* Set up MMU size */
658 config1
&= ~(0x3f << 25);
659 config1
|= ((KVM_MIPS_GUEST_TLB_SIZE
- 1) << 25);
661 /* We unset some bits that we aren't emulating */
662 config1
&= ~(MIPS_CONF1_C2
| MIPS_CONF1_MD
| MIPS_CONF1_PC
|
663 MIPS_CONF1_WR
| MIPS_CONF1_CA
);
664 kvm_write_c0_guest_config1(cop0
, config1
);
666 /* Have config3, no tertiary/secondary caches implemented */
667 kvm_write_c0_guest_config2(cop0
, MIPS_CONF_M
);
668 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
670 /* Have config4, UserLocal */
671 kvm_write_c0_guest_config3(cop0
, MIPS_CONF_M
| MIPS_CONF3_ULRI
);
674 kvm_write_c0_guest_config4(cop0
, MIPS_CONF_M
);
677 kvm_write_c0_guest_config5(cop0
, 0);
679 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
680 kvm_write_c0_guest_config7(cop0
, (MIPS_CONF7_WII
) | (1 << 10));
683 kvm_write_c0_guest_status(cop0
, ST0_BEV
| ST0_ERL
);
686 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
688 kvm_write_c0_guest_intctl(cop0
, 0xFC000000);
690 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
691 kvm_write_c0_guest_ebase(cop0
, KVM_GUEST_KSEG0
|
692 (vcpu_id
& MIPS_EBASE_CPUNUM
));
694 /* Put PC at guest reset vector */
695 vcpu
->arch
.pc
= KVM_GUEST_CKSEG1ADDR(0x1fc00000);
700 static void kvm_trap_emul_flush_shadow_all(struct kvm
*kvm
)
702 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
703 kvm_flush_remote_tlbs(kvm
);
706 static void kvm_trap_emul_flush_shadow_memslot(struct kvm
*kvm
,
707 const struct kvm_memory_slot
*slot
)
709 kvm_trap_emul_flush_shadow_all(kvm
);
712 static u64 kvm_trap_emul_get_one_regs
[] = {
713 KVM_REG_MIPS_CP0_INDEX
,
714 KVM_REG_MIPS_CP0_ENTRYLO0
,
715 KVM_REG_MIPS_CP0_ENTRYLO1
,
716 KVM_REG_MIPS_CP0_CONTEXT
,
717 KVM_REG_MIPS_CP0_USERLOCAL
,
718 KVM_REG_MIPS_CP0_PAGEMASK
,
719 KVM_REG_MIPS_CP0_WIRED
,
720 KVM_REG_MIPS_CP0_HWRENA
,
721 KVM_REG_MIPS_CP0_BADVADDR
,
722 KVM_REG_MIPS_CP0_COUNT
,
723 KVM_REG_MIPS_CP0_ENTRYHI
,
724 KVM_REG_MIPS_CP0_COMPARE
,
725 KVM_REG_MIPS_CP0_STATUS
,
726 KVM_REG_MIPS_CP0_INTCTL
,
727 KVM_REG_MIPS_CP0_CAUSE
,
728 KVM_REG_MIPS_CP0_EPC
,
729 KVM_REG_MIPS_CP0_PRID
,
730 KVM_REG_MIPS_CP0_EBASE
,
731 KVM_REG_MIPS_CP0_CONFIG
,
732 KVM_REG_MIPS_CP0_CONFIG1
,
733 KVM_REG_MIPS_CP0_CONFIG2
,
734 KVM_REG_MIPS_CP0_CONFIG3
,
735 KVM_REG_MIPS_CP0_CONFIG4
,
736 KVM_REG_MIPS_CP0_CONFIG5
,
737 KVM_REG_MIPS_CP0_CONFIG7
,
738 KVM_REG_MIPS_CP0_ERROREPC
,
739 KVM_REG_MIPS_CP0_KSCRATCH1
,
740 KVM_REG_MIPS_CP0_KSCRATCH2
,
741 KVM_REG_MIPS_CP0_KSCRATCH3
,
742 KVM_REG_MIPS_CP0_KSCRATCH4
,
743 KVM_REG_MIPS_CP0_KSCRATCH5
,
744 KVM_REG_MIPS_CP0_KSCRATCH6
,
746 KVM_REG_MIPS_COUNT_CTL
,
747 KVM_REG_MIPS_COUNT_RESUME
,
748 KVM_REG_MIPS_COUNT_HZ
,
751 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu
*vcpu
)
753 return ARRAY_SIZE(kvm_trap_emul_get_one_regs
);
756 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu
*vcpu
,
759 if (copy_to_user(indices
, kvm_trap_emul_get_one_regs
,
760 sizeof(kvm_trap_emul_get_one_regs
)))
762 indices
+= ARRAY_SIZE(kvm_trap_emul_get_one_regs
);
767 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu
*vcpu
,
768 const struct kvm_one_reg
*reg
,
771 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
774 case KVM_REG_MIPS_CP0_INDEX
:
775 *v
= (long)kvm_read_c0_guest_index(cop0
);
777 case KVM_REG_MIPS_CP0_ENTRYLO0
:
778 *v
= kvm_read_c0_guest_entrylo0(cop0
);
780 case KVM_REG_MIPS_CP0_ENTRYLO1
:
781 *v
= kvm_read_c0_guest_entrylo1(cop0
);
783 case KVM_REG_MIPS_CP0_CONTEXT
:
784 *v
= (long)kvm_read_c0_guest_context(cop0
);
786 case KVM_REG_MIPS_CP0_USERLOCAL
:
787 *v
= (long)kvm_read_c0_guest_userlocal(cop0
);
789 case KVM_REG_MIPS_CP0_PAGEMASK
:
790 *v
= (long)kvm_read_c0_guest_pagemask(cop0
);
792 case KVM_REG_MIPS_CP0_WIRED
:
793 *v
= (long)kvm_read_c0_guest_wired(cop0
);
795 case KVM_REG_MIPS_CP0_HWRENA
:
796 *v
= (long)kvm_read_c0_guest_hwrena(cop0
);
798 case KVM_REG_MIPS_CP0_BADVADDR
:
799 *v
= (long)kvm_read_c0_guest_badvaddr(cop0
);
801 case KVM_REG_MIPS_CP0_ENTRYHI
:
802 *v
= (long)kvm_read_c0_guest_entryhi(cop0
);
804 case KVM_REG_MIPS_CP0_COMPARE
:
805 *v
= (long)kvm_read_c0_guest_compare(cop0
);
807 case KVM_REG_MIPS_CP0_STATUS
:
808 *v
= (long)kvm_read_c0_guest_status(cop0
);
810 case KVM_REG_MIPS_CP0_INTCTL
:
811 *v
= (long)kvm_read_c0_guest_intctl(cop0
);
813 case KVM_REG_MIPS_CP0_CAUSE
:
814 *v
= (long)kvm_read_c0_guest_cause(cop0
);
816 case KVM_REG_MIPS_CP0_EPC
:
817 *v
= (long)kvm_read_c0_guest_epc(cop0
);
819 case KVM_REG_MIPS_CP0_PRID
:
820 *v
= (long)kvm_read_c0_guest_prid(cop0
);
822 case KVM_REG_MIPS_CP0_EBASE
:
823 *v
= (long)kvm_read_c0_guest_ebase(cop0
);
825 case KVM_REG_MIPS_CP0_CONFIG
:
826 *v
= (long)kvm_read_c0_guest_config(cop0
);
828 case KVM_REG_MIPS_CP0_CONFIG1
:
829 *v
= (long)kvm_read_c0_guest_config1(cop0
);
831 case KVM_REG_MIPS_CP0_CONFIG2
:
832 *v
= (long)kvm_read_c0_guest_config2(cop0
);
834 case KVM_REG_MIPS_CP0_CONFIG3
:
835 *v
= (long)kvm_read_c0_guest_config3(cop0
);
837 case KVM_REG_MIPS_CP0_CONFIG4
:
838 *v
= (long)kvm_read_c0_guest_config4(cop0
);
840 case KVM_REG_MIPS_CP0_CONFIG5
:
841 *v
= (long)kvm_read_c0_guest_config5(cop0
);
843 case KVM_REG_MIPS_CP0_CONFIG7
:
844 *v
= (long)kvm_read_c0_guest_config7(cop0
);
846 case KVM_REG_MIPS_CP0_COUNT
:
847 *v
= kvm_mips_read_count(vcpu
);
849 case KVM_REG_MIPS_COUNT_CTL
:
850 *v
= vcpu
->arch
.count_ctl
;
852 case KVM_REG_MIPS_COUNT_RESUME
:
853 *v
= ktime_to_ns(vcpu
->arch
.count_resume
);
855 case KVM_REG_MIPS_COUNT_HZ
:
856 *v
= vcpu
->arch
.count_hz
;
858 case KVM_REG_MIPS_CP0_ERROREPC
:
859 *v
= (long)kvm_read_c0_guest_errorepc(cop0
);
861 case KVM_REG_MIPS_CP0_KSCRATCH1
:
862 *v
= (long)kvm_read_c0_guest_kscratch1(cop0
);
864 case KVM_REG_MIPS_CP0_KSCRATCH2
:
865 *v
= (long)kvm_read_c0_guest_kscratch2(cop0
);
867 case KVM_REG_MIPS_CP0_KSCRATCH3
:
868 *v
= (long)kvm_read_c0_guest_kscratch3(cop0
);
870 case KVM_REG_MIPS_CP0_KSCRATCH4
:
871 *v
= (long)kvm_read_c0_guest_kscratch4(cop0
);
873 case KVM_REG_MIPS_CP0_KSCRATCH5
:
874 *v
= (long)kvm_read_c0_guest_kscratch5(cop0
);
876 case KVM_REG_MIPS_CP0_KSCRATCH6
:
877 *v
= (long)kvm_read_c0_guest_kscratch6(cop0
);
885 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu
*vcpu
,
886 const struct kvm_one_reg
*reg
,
889 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
891 unsigned int cur
, change
;
894 case KVM_REG_MIPS_CP0_INDEX
:
895 kvm_write_c0_guest_index(cop0
, v
);
897 case KVM_REG_MIPS_CP0_ENTRYLO0
:
898 kvm_write_c0_guest_entrylo0(cop0
, v
);
900 case KVM_REG_MIPS_CP0_ENTRYLO1
:
901 kvm_write_c0_guest_entrylo1(cop0
, v
);
903 case KVM_REG_MIPS_CP0_CONTEXT
:
904 kvm_write_c0_guest_context(cop0
, v
);
906 case KVM_REG_MIPS_CP0_USERLOCAL
:
907 kvm_write_c0_guest_userlocal(cop0
, v
);
909 case KVM_REG_MIPS_CP0_PAGEMASK
:
910 kvm_write_c0_guest_pagemask(cop0
, v
);
912 case KVM_REG_MIPS_CP0_WIRED
:
913 kvm_write_c0_guest_wired(cop0
, v
);
915 case KVM_REG_MIPS_CP0_HWRENA
:
916 kvm_write_c0_guest_hwrena(cop0
, v
);
918 case KVM_REG_MIPS_CP0_BADVADDR
:
919 kvm_write_c0_guest_badvaddr(cop0
, v
);
921 case KVM_REG_MIPS_CP0_ENTRYHI
:
922 kvm_write_c0_guest_entryhi(cop0
, v
);
924 case KVM_REG_MIPS_CP0_STATUS
:
925 kvm_write_c0_guest_status(cop0
, v
);
927 case KVM_REG_MIPS_CP0_INTCTL
:
928 /* No VInt, so no VS, read-only for now */
930 case KVM_REG_MIPS_CP0_EPC
:
931 kvm_write_c0_guest_epc(cop0
, v
);
933 case KVM_REG_MIPS_CP0_PRID
:
934 kvm_write_c0_guest_prid(cop0
, v
);
936 case KVM_REG_MIPS_CP0_EBASE
:
938 * Allow core number to be written, but the exception base must
939 * remain in guest KSeg0.
941 kvm_change_c0_guest_ebase(cop0
, 0x1ffff000 | MIPS_EBASE_CPUNUM
,
944 case KVM_REG_MIPS_CP0_COUNT
:
945 kvm_mips_write_count(vcpu
, v
);
947 case KVM_REG_MIPS_CP0_COMPARE
:
948 kvm_mips_write_compare(vcpu
, v
, false);
950 case KVM_REG_MIPS_CP0_CAUSE
:
952 * If the timer is stopped or started (DC bit) it must look
953 * atomic with changes to the interrupt pending bits (TI, IRQ5).
954 * A timer interrupt should not happen in between.
956 if ((kvm_read_c0_guest_cause(cop0
) ^ v
) & CAUSEF_DC
) {
958 /* disable timer first */
959 kvm_mips_count_disable_cause(vcpu
);
960 kvm_change_c0_guest_cause(cop0
, (u32
)~CAUSEF_DC
,
963 /* enable timer last */
964 kvm_change_c0_guest_cause(cop0
, (u32
)~CAUSEF_DC
,
966 kvm_mips_count_enable_cause(vcpu
);
969 kvm_write_c0_guest_cause(cop0
, v
);
972 case KVM_REG_MIPS_CP0_CONFIG
:
973 /* read-only for now */
975 case KVM_REG_MIPS_CP0_CONFIG1
:
976 cur
= kvm_read_c0_guest_config1(cop0
);
977 change
= (cur
^ v
) & kvm_mips_config1_wrmask(vcpu
);
980 kvm_write_c0_guest_config1(cop0
, v
);
983 case KVM_REG_MIPS_CP0_CONFIG2
:
984 /* read-only for now */
986 case KVM_REG_MIPS_CP0_CONFIG3
:
987 cur
= kvm_read_c0_guest_config3(cop0
);
988 change
= (cur
^ v
) & kvm_mips_config3_wrmask(vcpu
);
991 kvm_write_c0_guest_config3(cop0
, v
);
994 case KVM_REG_MIPS_CP0_CONFIG4
:
995 cur
= kvm_read_c0_guest_config4(cop0
);
996 change
= (cur
^ v
) & kvm_mips_config4_wrmask(vcpu
);
999 kvm_write_c0_guest_config4(cop0
, v
);
1002 case KVM_REG_MIPS_CP0_CONFIG5
:
1003 cur
= kvm_read_c0_guest_config5(cop0
);
1004 change
= (cur
^ v
) & kvm_mips_config5_wrmask(vcpu
);
1007 kvm_write_c0_guest_config5(cop0
, v
);
1010 case KVM_REG_MIPS_CP0_CONFIG7
:
1011 /* writes ignored */
1013 case KVM_REG_MIPS_COUNT_CTL
:
1014 ret
= kvm_mips_set_count_ctl(vcpu
, v
);
1016 case KVM_REG_MIPS_COUNT_RESUME
:
1017 ret
= kvm_mips_set_count_resume(vcpu
, v
);
1019 case KVM_REG_MIPS_COUNT_HZ
:
1020 ret
= kvm_mips_set_count_hz(vcpu
, v
);
1022 case KVM_REG_MIPS_CP0_ERROREPC
:
1023 kvm_write_c0_guest_errorepc(cop0
, v
);
1025 case KVM_REG_MIPS_CP0_KSCRATCH1
:
1026 kvm_write_c0_guest_kscratch1(cop0
, v
);
1028 case KVM_REG_MIPS_CP0_KSCRATCH2
:
1029 kvm_write_c0_guest_kscratch2(cop0
, v
);
1031 case KVM_REG_MIPS_CP0_KSCRATCH3
:
1032 kvm_write_c0_guest_kscratch3(cop0
, v
);
1034 case KVM_REG_MIPS_CP0_KSCRATCH4
:
1035 kvm_write_c0_guest_kscratch4(cop0
, v
);
1037 case KVM_REG_MIPS_CP0_KSCRATCH5
:
1038 kvm_write_c0_guest_kscratch5(cop0
, v
);
1040 case KVM_REG_MIPS_CP0_KSCRATCH6
:
1041 kvm_write_c0_guest_kscratch6(cop0
, v
);
1049 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1051 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1052 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1053 struct mm_struct
*mm
;
1056 * Were we in guest context? If so, restore the appropriate ASID based
1057 * on the mode of the Guest (Kernel/User).
1059 if (current
->flags
& PF_VCPU
) {
1060 mm
= KVM_GUEST_KERNEL_MODE(vcpu
) ? kern_mm
: user_mm
;
1061 check_switch_mmu_context(mm
);
1062 kvm_mips_suspend_mm(cpu
);
1069 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu
*vcpu
, int cpu
)
1073 if (current
->flags
& PF_VCPU
) {
1074 /* Restore normal Linux process memory map */
1075 check_switch_mmu_context(current
->mm
);
1076 kvm_mips_resume_mm(cpu
);
1083 static void kvm_trap_emul_check_requests(struct kvm_vcpu
*vcpu
, int cpu
,
1086 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1087 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1088 struct mm_struct
*mm
;
1091 if (likely(!kvm_request_pending(vcpu
)))
1094 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1096 * Both kernel & user GVA mappings must be invalidated. The
1097 * caller is just about to check whether the ASID is stale
1098 * anyway so no need to reload it here.
1100 kvm_mips_flush_gva_pt(kern_mm
->pgd
, KMF_GPA
| KMF_KERN
);
1101 kvm_mips_flush_gva_pt(user_mm
->pgd
, KMF_GPA
| KMF_USER
);
1102 for_each_possible_cpu(i
) {
1103 set_cpu_context(i
, kern_mm
, 0);
1104 set_cpu_context(i
, user_mm
, 0);
1107 /* Generate new ASID for current mode */
1109 mm
= KVM_GUEST_KERNEL_MODE(vcpu
) ? kern_mm
: user_mm
;
1110 get_new_mmu_context(mm
);
1112 write_c0_entryhi(cpu_asid(cpu
, mm
));
1113 TLBMISS_HANDLER_SETUP_PGD(mm
->pgd
);
1120 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1121 * @vcpu: VCPU pointer.
1123 * Call before a GVA space access outside of guest mode, to ensure that
1124 * asynchronous TLB flush requests are handled or delayed until completion of
1125 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1127 * Should be called with IRQs already enabled.
1129 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu
*vcpu
)
1131 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1132 WARN_ON_ONCE(irqs_disabled());
1135 * The caller is about to access the GVA space, so we set the mode to
1136 * force TLB flush requests to send an IPI, and also disable IRQs to
1137 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1139 local_irq_disable();
1142 * Make sure the read of VCPU requests is not reordered ahead of the
1143 * write to vcpu->mode, or we could miss a TLB flush request while
1144 * the requester sees the VCPU as outside of guest mode and not needing
1147 smp_store_mb(vcpu
->mode
, READING_SHADOW_PAGE_TABLES
);
1150 * If a TLB flush has been requested (potentially while
1151 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1152 * before accessing the GVA space, and be sure to reload the ASID if
1153 * necessary as it'll be immediately used.
1155 * TLB flush requests after this check will trigger an IPI due to the
1156 * mode change above, which will be delayed due to IRQs disabled.
1158 kvm_trap_emul_check_requests(vcpu
, smp_processor_id(), true);
1162 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1163 * @vcpu: VCPU pointer.
1165 * Called after a GVA space access outside of guest mode. Should have a matching
1166 * call to kvm_trap_emul_gva_lockless_begin().
1168 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu
*vcpu
)
1171 * Make sure the write to vcpu->mode is not reordered in front of GVA
1172 * accesses, or a TLB flush requester may not think it necessary to send
1175 smp_store_release(&vcpu
->mode
, OUTSIDE_GUEST_MODE
);
1178 * Now that the access to GVA space is complete, its safe for pending
1179 * TLB flush request IPIs to be handled (which indicates completion).
1184 static void kvm_trap_emul_vcpu_reenter(struct kvm_run
*run
,
1185 struct kvm_vcpu
*vcpu
)
1187 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1188 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1189 struct mm_struct
*mm
;
1190 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1191 int i
, cpu
= smp_processor_id();
1195 * No need to reload ASID, IRQs are disabled already so there's no rush,
1196 * and we'll check if we need to regenerate below anyway before
1197 * re-entering the guest.
1199 kvm_trap_emul_check_requests(vcpu
, cpu
, false);
1201 if (KVM_GUEST_KERNEL_MODE(vcpu
)) {
1207 * Lazy host ASID regeneration / PT flush for guest user mode.
1208 * If the guest ASID has changed since the last guest usermode
1209 * execution, invalidate the stale TLB entries and flush GVA PT
1212 gasid
= kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
;
1213 if (gasid
!= vcpu
->arch
.last_user_gasid
) {
1214 kvm_mips_flush_gva_pt(user_mm
->pgd
, KMF_USER
);
1215 for_each_possible_cpu(i
)
1216 set_cpu_context(i
, user_mm
, 0);
1217 vcpu
->arch
.last_user_gasid
= gasid
;
1222 * Check if ASID is stale. This may happen due to a TLB flush request or
1223 * a lazy user MM invalidation.
1225 check_mmu_context(mm
);
1228 static int kvm_trap_emul_vcpu_run(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1230 int cpu
= smp_processor_id();
1233 /* Check if we have any exceptions/interrupts pending */
1234 kvm_mips_deliver_interrupts(vcpu
,
1235 kvm_read_c0_guest_cause(vcpu
->arch
.cop0
));
1237 kvm_trap_emul_vcpu_reenter(run
, vcpu
);
1240 * We use user accessors to access guest memory, but we don't want to
1241 * invoke Linux page faulting.
1243 pagefault_disable();
1245 /* Disable hardware page table walking while in guest */
1249 * While in guest context we're in the guest's address space, not the
1250 * host process address space, so we need to be careful not to confuse
1251 * e.g. cache management IPIs.
1253 kvm_mips_suspend_mm(cpu
);
1255 r
= vcpu
->arch
.vcpu_run(run
, vcpu
);
1257 /* We may have migrated while handling guest exits */
1258 cpu
= smp_processor_id();
1260 /* Restore normal Linux process memory map */
1261 check_switch_mmu_context(current
->mm
);
1262 kvm_mips_resume_mm(cpu
);
1271 static struct kvm_mips_callbacks kvm_trap_emul_callbacks
= {
1273 .handle_cop_unusable
= kvm_trap_emul_handle_cop_unusable
,
1274 .handle_tlb_mod
= kvm_trap_emul_handle_tlb_mod
,
1275 .handle_tlb_st_miss
= kvm_trap_emul_handle_tlb_st_miss
,
1276 .handle_tlb_ld_miss
= kvm_trap_emul_handle_tlb_ld_miss
,
1277 .handle_addr_err_st
= kvm_trap_emul_handle_addr_err_st
,
1278 .handle_addr_err_ld
= kvm_trap_emul_handle_addr_err_ld
,
1279 .handle_syscall
= kvm_trap_emul_handle_syscall
,
1280 .handle_res_inst
= kvm_trap_emul_handle_res_inst
,
1281 .handle_break
= kvm_trap_emul_handle_break
,
1282 .handle_trap
= kvm_trap_emul_handle_trap
,
1283 .handle_msa_fpe
= kvm_trap_emul_handle_msa_fpe
,
1284 .handle_fpe
= kvm_trap_emul_handle_fpe
,
1285 .handle_msa_disabled
= kvm_trap_emul_handle_msa_disabled
,
1286 .handle_guest_exit
= kvm_trap_emul_no_handler
,
1288 .hardware_enable
= kvm_trap_emul_hardware_enable
,
1289 .hardware_disable
= kvm_trap_emul_hardware_disable
,
1290 .check_extension
= kvm_trap_emul_check_extension
,
1291 .vcpu_init
= kvm_trap_emul_vcpu_init
,
1292 .vcpu_uninit
= kvm_trap_emul_vcpu_uninit
,
1293 .vcpu_setup
= kvm_trap_emul_vcpu_setup
,
1294 .flush_shadow_all
= kvm_trap_emul_flush_shadow_all
,
1295 .flush_shadow_memslot
= kvm_trap_emul_flush_shadow_memslot
,
1296 .gva_to_gpa
= kvm_trap_emul_gva_to_gpa_cb
,
1297 .queue_timer_int
= kvm_mips_queue_timer_int_cb
,
1298 .dequeue_timer_int
= kvm_mips_dequeue_timer_int_cb
,
1299 .queue_io_int
= kvm_mips_queue_io_int_cb
,
1300 .dequeue_io_int
= kvm_mips_dequeue_io_int_cb
,
1301 .irq_deliver
= kvm_mips_irq_deliver_cb
,
1302 .irq_clear
= kvm_mips_irq_clear_cb
,
1303 .num_regs
= kvm_trap_emul_num_regs
,
1304 .copy_reg_indices
= kvm_trap_emul_copy_reg_indices
,
1305 .get_one_reg
= kvm_trap_emul_get_one_reg
,
1306 .set_one_reg
= kvm_trap_emul_set_one_reg
,
1307 .vcpu_load
= kvm_trap_emul_vcpu_load
,
1308 .vcpu_put
= kvm_trap_emul_vcpu_put
,
1309 .vcpu_run
= kvm_trap_emul_vcpu_run
,
1310 .vcpu_reenter
= kvm_trap_emul_vcpu_reenter
,
1313 int kvm_mips_emulation_init(struct kvm_mips_callbacks
**install_callbacks
)
1315 *install_callbacks
= &kvm_trap_emul_callbacks
;