2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/log2.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgalloc.h>
21 #include "interrupt.h"
23 static gpa_t
kvm_trap_emul_gva_to_gpa_cb(gva_t gva
)
26 gva_t kseg
= KSEGX(gva
);
27 gva_t gkseg
= KVM_GUEST_KSEGX(gva
);
29 if ((kseg
== CKSEG0
) || (kseg
== CKSEG1
))
31 else if (gkseg
== KVM_GUEST_KSEG0
)
32 gpa
= KVM_GUEST_CPHYSADDR(gva
);
34 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__
, gva
);
35 kvm_mips_dump_host_tlbs();
36 gpa
= KVM_INVALID_ADDR
;
39 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__
, gva
, gpa
);
44 static int kvm_trap_emul_no_handler(struct kvm_vcpu
*vcpu
)
46 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
47 u32 cause
= vcpu
->arch
.host_cp0_cause
;
48 u32 exccode
= (cause
& CAUSEF_EXCCODE
) >> CAUSEB_EXCCODE
;
49 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
53 * Fetch the instruction.
55 if (cause
& CAUSEF_BD
)
57 kvm_get_badinstr(opc
, vcpu
, &inst
);
59 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
60 exccode
, opc
, inst
, badvaddr
,
61 kvm_read_c0_guest_status(vcpu
->arch
.cop0
));
62 kvm_arch_vcpu_dump_regs(vcpu
);
63 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
67 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu
*vcpu
)
69 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
70 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
71 u32 cause
= vcpu
->arch
.host_cp0_cause
;
72 enum emulation_result er
= EMULATE_DONE
;
73 int ret
= RESUME_GUEST
;
75 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 1) {
77 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
) ||
78 (kvm_read_c0_guest_status(cop0
) & ST0_CU1
) == 0) {
80 * Unusable/no FPU in guest:
81 * deliver guest COP1 Unusable Exception
83 er
= kvm_mips_emulate_fpu_exc(cause
, opc
, vcpu
);
85 /* Restore FPU state */
90 er
= kvm_mips_emulate_inst(cause
, opc
, vcpu
);
99 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
104 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
108 case EMULATE_HYPERCALL
:
109 ret
= kvm_mips_handle_hypcall(vcpu
);
118 static int kvm_mips_bad_load(u32 cause
, u32
*opc
, struct kvm_vcpu
*vcpu
)
120 enum emulation_result er
;
121 union mips_instruction inst
;
124 /* A code fetch fault doesn't count as an MMIO */
125 if (kvm_is_ifetch_fault(&vcpu
->arch
)) {
126 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
130 /* Fetch the instruction. */
131 if (cause
& CAUSEF_BD
)
133 err
= kvm_get_badinstr(opc
, vcpu
, &inst
.word
);
135 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
139 /* Emulate the load */
140 er
= kvm_mips_emulate_load(inst
, cause
, vcpu
);
141 if (er
== EMULATE_FAIL
) {
142 kvm_err("Emulate load from MMIO space failed\n");
143 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
145 vcpu
->run
->exit_reason
= KVM_EXIT_MMIO
;
150 static int kvm_mips_bad_store(u32 cause
, u32
*opc
, struct kvm_vcpu
*vcpu
)
152 enum emulation_result er
;
153 union mips_instruction inst
;
156 /* Fetch the instruction. */
157 if (cause
& CAUSEF_BD
)
159 err
= kvm_get_badinstr(opc
, vcpu
, &inst
.word
);
161 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
165 /* Emulate the store */
166 er
= kvm_mips_emulate_store(inst
, cause
, vcpu
);
167 if (er
== EMULATE_FAIL
) {
168 kvm_err("Emulate store to MMIO space failed\n");
169 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
171 vcpu
->run
->exit_reason
= KVM_EXIT_MMIO
;
176 static int kvm_mips_bad_access(u32 cause
, u32
*opc
,
177 struct kvm_vcpu
*vcpu
, bool store
)
180 return kvm_mips_bad_store(cause
, opc
, vcpu
);
182 return kvm_mips_bad_load(cause
, opc
, vcpu
);
185 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu
*vcpu
)
187 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
188 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
189 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
190 u32 cause
= vcpu
->arch
.host_cp0_cause
;
191 struct kvm_mips_tlb
*tlb
;
192 unsigned long entryhi
;
195 if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
196 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
198 * First find the mapping in the guest TLB. If the failure to
199 * write was due to the guest TLB, it should be up to the guest
202 entryhi
= (badvaddr
& VPN2_MASK
) |
203 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
204 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
207 * These should never happen.
208 * They would indicate stale host TLB entries.
210 if (unlikely(index
< 0)) {
211 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
214 tlb
= vcpu
->arch
.guest_tlb
+ index
;
215 if (unlikely(!TLB_IS_VALID(*tlb
, badvaddr
))) {
216 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
221 * Guest entry not dirty? That would explain the TLB modified
222 * exception. Relay that on to the guest so it can handle it.
224 if (!TLB_IS_DIRTY(*tlb
, badvaddr
)) {
225 kvm_mips_emulate_tlbmod(cause
, opc
, vcpu
);
229 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
, badvaddr
,
231 /* Not writable, needs handling as MMIO */
232 return kvm_mips_bad_store(cause
, opc
, vcpu
);
234 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
235 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr
, vcpu
, true) < 0)
236 /* Not writable, needs handling as MMIO */
237 return kvm_mips_bad_store(cause
, opc
, vcpu
);
240 /* host kernel addresses are all handled as MMIO */
241 return kvm_mips_bad_store(cause
, opc
, vcpu
);
245 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu
*vcpu
, bool store
)
247 struct kvm_run
*run
= vcpu
->run
;
248 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
249 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
250 u32 cause
= vcpu
->arch
.host_cp0_cause
;
251 enum emulation_result er
= EMULATE_DONE
;
252 int ret
= RESUME_GUEST
;
254 if (((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
)
255 && KVM_GUEST_KERNEL_MODE(vcpu
)) {
256 if (kvm_mips_handle_commpage_tlb_fault(badvaddr
, vcpu
) < 0) {
257 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
260 } else if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
261 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
262 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
263 store
? "ST" : "LD", cause
, opc
, badvaddr
);
266 * User Address (UA) fault, this could happen if
267 * (1) TLB entry not present/valid in both Guest and shadow host
268 * TLBs, in this case we pass on the fault to the guest
269 * kernel and let it handle it.
270 * (2) TLB entry is present in the Guest TLB but not in the
271 * shadow, in this case we inject the TLB from the Guest TLB
272 * into the shadow host TLB
275 er
= kvm_mips_handle_tlbmiss(cause
, opc
, vcpu
, store
);
276 if (er
== EMULATE_DONE
)
279 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
282 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
284 * All KSEG0 faults are handled by KVM, as the guest kernel does
285 * not expect to ever get them
287 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr
, vcpu
, store
) < 0)
288 ret
= kvm_mips_bad_access(cause
, opc
, vcpu
, store
);
289 } else if (KVM_GUEST_KERNEL_MODE(vcpu
)
290 && (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
)) {
292 * With EVA we may get a TLB exception instead of an address
293 * error when the guest performs MMIO to KSeg1 addresses.
295 ret
= kvm_mips_bad_access(cause
, opc
, vcpu
, store
);
297 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
298 store
? "ST" : "LD", cause
, opc
, badvaddr
);
299 kvm_mips_dump_host_tlbs();
300 kvm_arch_vcpu_dump_regs(vcpu
);
301 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
307 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu
*vcpu
)
309 return kvm_trap_emul_handle_tlb_miss(vcpu
, true);
312 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu
*vcpu
)
314 return kvm_trap_emul_handle_tlb_miss(vcpu
, false);
317 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu
*vcpu
)
319 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
320 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
321 u32 cause
= vcpu
->arch
.host_cp0_cause
;
322 int ret
= RESUME_GUEST
;
324 if (KVM_GUEST_KERNEL_MODE(vcpu
)
325 && (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
)) {
326 ret
= kvm_mips_bad_store(cause
, opc
, vcpu
);
328 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
329 cause
, opc
, badvaddr
);
330 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
336 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu
*vcpu
)
338 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
339 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
340 u32 cause
= vcpu
->arch
.host_cp0_cause
;
341 int ret
= RESUME_GUEST
;
343 if (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
) {
344 ret
= kvm_mips_bad_load(cause
, opc
, vcpu
);
346 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
347 cause
, opc
, badvaddr
);
348 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
354 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu
*vcpu
)
356 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
357 u32 cause
= vcpu
->arch
.host_cp0_cause
;
358 enum emulation_result er
= EMULATE_DONE
;
359 int ret
= RESUME_GUEST
;
361 er
= kvm_mips_emulate_syscall(cause
, opc
, vcpu
);
362 if (er
== EMULATE_DONE
)
365 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
371 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu
*vcpu
)
373 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
374 u32 cause
= vcpu
->arch
.host_cp0_cause
;
375 enum emulation_result er
= EMULATE_DONE
;
376 int ret
= RESUME_GUEST
;
378 er
= kvm_mips_handle_ri(cause
, opc
, vcpu
);
379 if (er
== EMULATE_DONE
)
382 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
388 static int kvm_trap_emul_handle_break(struct kvm_vcpu
*vcpu
)
390 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
391 u32 cause
= vcpu
->arch
.host_cp0_cause
;
392 enum emulation_result er
= EMULATE_DONE
;
393 int ret
= RESUME_GUEST
;
395 er
= kvm_mips_emulate_bp_exc(cause
, opc
, vcpu
);
396 if (er
== EMULATE_DONE
)
399 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
405 static int kvm_trap_emul_handle_trap(struct kvm_vcpu
*vcpu
)
407 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
408 u32 cause
= vcpu
->arch
.host_cp0_cause
;
409 enum emulation_result er
= EMULATE_DONE
;
410 int ret
= RESUME_GUEST
;
412 er
= kvm_mips_emulate_trap_exc(cause
, opc
, vcpu
);
413 if (er
== EMULATE_DONE
) {
416 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
422 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu
*vcpu
)
424 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
425 u32 cause
= vcpu
->arch
.host_cp0_cause
;
426 enum emulation_result er
= EMULATE_DONE
;
427 int ret
= RESUME_GUEST
;
429 er
= kvm_mips_emulate_msafpe_exc(cause
, opc
, vcpu
);
430 if (er
== EMULATE_DONE
) {
433 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
439 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu
*vcpu
)
441 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
442 u32 cause
= vcpu
->arch
.host_cp0_cause
;
443 enum emulation_result er
= EMULATE_DONE
;
444 int ret
= RESUME_GUEST
;
446 er
= kvm_mips_emulate_fpe_exc(cause
, opc
, vcpu
);
447 if (er
== EMULATE_DONE
) {
450 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
457 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
458 * @vcpu: Virtual CPU context.
460 * Handle when the guest attempts to use MSA when it is disabled.
462 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu
*vcpu
)
464 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
465 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
466 u32 cause
= vcpu
->arch
.host_cp0_cause
;
467 enum emulation_result er
= EMULATE_DONE
;
468 int ret
= RESUME_GUEST
;
470 if (!kvm_mips_guest_has_msa(&vcpu
->arch
) ||
471 (kvm_read_c0_guest_status(cop0
) & (ST0_CU1
| ST0_FR
)) == ST0_CU1
) {
473 * No MSA in guest, or FPU enabled and not in FR=1 mode,
474 * guest reserved instruction exception
476 er
= kvm_mips_emulate_ri_exc(cause
, opc
, vcpu
);
477 } else if (!(kvm_read_c0_guest_config5(cop0
) & MIPS_CONF5_MSAEN
)) {
478 /* MSA disabled by guest, guest MSA disabled exception */
479 er
= kvm_mips_emulate_msadis_exc(cause
, opc
, vcpu
);
481 /* Restore MSA/FPU state */
492 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
502 static int kvm_trap_emul_hardware_enable(void)
507 static void kvm_trap_emul_hardware_disable(void)
511 static int kvm_trap_emul_check_extension(struct kvm
*kvm
, long ext
)
516 case KVM_CAP_MIPS_TE
:
519 case KVM_CAP_IOEVENTFD
:
530 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu
*vcpu
)
532 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
533 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
536 * Allocate GVA -> HPA page tables.
537 * MIPS doesn't use the mm_struct pointer argument.
539 kern_mm
->pgd
= pgd_alloc(kern_mm
);
543 user_mm
->pgd
= pgd_alloc(user_mm
);
545 pgd_free(kern_mm
, kern_mm
->pgd
);
552 static void kvm_mips_emul_free_gva_pt(pgd_t
*pgd
)
554 /* Don't free host kernel page tables copied from init_mm.pgd */
555 const unsigned long end
= 0x80000000;
556 unsigned long pgd_va
, pud_va
, pmd_va
;
563 for (i
= 0; i
< USER_PTRS_PER_PGD
; i
++) {
564 if (pgd_none(pgd
[i
]))
567 pgd_va
= (unsigned long)i
<< PGDIR_SHIFT
;
570 p4d
= p4d_offset(pgd
, 0);
571 pud
= pud_offset(p4d
+ i
, 0);
572 for (j
= 0; j
< PTRS_PER_PUD
; j
++) {
573 if (pud_none(pud
[j
]))
576 pud_va
= pgd_va
| ((unsigned long)j
<< PUD_SHIFT
);
579 pmd
= pmd_offset(pud
+ j
, 0);
580 for (k
= 0; k
< PTRS_PER_PMD
; k
++) {
581 if (pmd_none(pmd
[k
]))
584 pmd_va
= pud_va
| (k
<< PMD_SHIFT
);
587 pte
= pte_offset_kernel(pmd
+ k
, 0);
588 pte_free_kernel(NULL
, pte
);
597 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu
*vcpu
)
599 kvm_mips_emul_free_gva_pt(vcpu
->arch
.guest_kernel_mm
.pgd
);
600 kvm_mips_emul_free_gva_pt(vcpu
->arch
.guest_user_mm
.pgd
);
603 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu
*vcpu
)
605 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
607 int vcpu_id
= vcpu
->vcpu_id
;
609 /* Start off the timer at 100 MHz */
610 kvm_mips_init_count(vcpu
, 100*1000*1000);
613 * Arch specific stuff, set up config registers properly so that the
614 * guest will come up as expected
616 #ifndef CONFIG_CPU_MIPSR6
617 /* r2-r5, simulate a MIPS 24kc */
618 kvm_write_c0_guest_prid(cop0
, 0x00019300);
620 /* r6+, simulate a generic QEMU machine */
621 kvm_write_c0_guest_prid(cop0
, 0x00010000);
624 * Have config1, Cacheable, noncoherent, write-back, write allocate.
625 * Endianness, arch revision & virtually tagged icache should match
628 config
= read_c0_config() & MIPS_CONF_AR
;
629 config
|= MIPS_CONF_M
| CONF_CM_CACHABLE_NONCOHERENT
| MIPS_CONF_MT_TLB
;
630 #ifdef CONFIG_CPU_BIG_ENDIAN
633 if (cpu_has_vtag_icache
)
634 config
|= MIPS_CONF_VI
;
635 kvm_write_c0_guest_config(cop0
, config
);
637 /* Read the cache characteristics from the host Config1 Register */
638 config1
= (read_c0_config1() & ~0x7f);
640 /* DCache line size not correctly reported in Config1 on Octeon CPUs */
641 if (cpu_dcache_line_size()) {
642 config1
&= ~MIPS_CONF1_DL
;
643 config1
|= ((ilog2(cpu_dcache_line_size()) - 1) <<
644 MIPS_CONF1_DL_SHF
) & MIPS_CONF1_DL
;
647 /* Set up MMU size */
648 config1
&= ~(0x3f << 25);
649 config1
|= ((KVM_MIPS_GUEST_TLB_SIZE
- 1) << 25);
651 /* We unset some bits that we aren't emulating */
652 config1
&= ~(MIPS_CONF1_C2
| MIPS_CONF1_MD
| MIPS_CONF1_PC
|
653 MIPS_CONF1_WR
| MIPS_CONF1_CA
);
654 kvm_write_c0_guest_config1(cop0
, config1
);
656 /* Have config3, no tertiary/secondary caches implemented */
657 kvm_write_c0_guest_config2(cop0
, MIPS_CONF_M
);
658 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
660 /* Have config4, UserLocal */
661 kvm_write_c0_guest_config3(cop0
, MIPS_CONF_M
| MIPS_CONF3_ULRI
);
664 kvm_write_c0_guest_config4(cop0
, MIPS_CONF_M
);
667 kvm_write_c0_guest_config5(cop0
, 0);
669 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
670 kvm_write_c0_guest_config7(cop0
, (MIPS_CONF7_WII
) | (1 << 10));
673 kvm_write_c0_guest_status(cop0
, ST0_BEV
| ST0_ERL
);
676 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
678 kvm_write_c0_guest_intctl(cop0
, 0xFC000000);
680 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
681 kvm_write_c0_guest_ebase(cop0
, KVM_GUEST_KSEG0
|
682 (vcpu_id
& MIPS_EBASE_CPUNUM
));
684 /* Put PC at guest reset vector */
685 vcpu
->arch
.pc
= KVM_GUEST_CKSEG1ADDR(0x1fc00000);
690 static void kvm_trap_emul_flush_shadow_all(struct kvm
*kvm
)
692 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
693 kvm_flush_remote_tlbs(kvm
);
696 static void kvm_trap_emul_flush_shadow_memslot(struct kvm
*kvm
,
697 const struct kvm_memory_slot
*slot
)
699 kvm_trap_emul_flush_shadow_all(kvm
);
702 static u64 kvm_trap_emul_get_one_regs
[] = {
703 KVM_REG_MIPS_CP0_INDEX
,
704 KVM_REG_MIPS_CP0_ENTRYLO0
,
705 KVM_REG_MIPS_CP0_ENTRYLO1
,
706 KVM_REG_MIPS_CP0_CONTEXT
,
707 KVM_REG_MIPS_CP0_USERLOCAL
,
708 KVM_REG_MIPS_CP0_PAGEMASK
,
709 KVM_REG_MIPS_CP0_WIRED
,
710 KVM_REG_MIPS_CP0_HWRENA
,
711 KVM_REG_MIPS_CP0_BADVADDR
,
712 KVM_REG_MIPS_CP0_COUNT
,
713 KVM_REG_MIPS_CP0_ENTRYHI
,
714 KVM_REG_MIPS_CP0_COMPARE
,
715 KVM_REG_MIPS_CP0_STATUS
,
716 KVM_REG_MIPS_CP0_INTCTL
,
717 KVM_REG_MIPS_CP0_CAUSE
,
718 KVM_REG_MIPS_CP0_EPC
,
719 KVM_REG_MIPS_CP0_PRID
,
720 KVM_REG_MIPS_CP0_EBASE
,
721 KVM_REG_MIPS_CP0_CONFIG
,
722 KVM_REG_MIPS_CP0_CONFIG1
,
723 KVM_REG_MIPS_CP0_CONFIG2
,
724 KVM_REG_MIPS_CP0_CONFIG3
,
725 KVM_REG_MIPS_CP0_CONFIG4
,
726 KVM_REG_MIPS_CP0_CONFIG5
,
727 KVM_REG_MIPS_CP0_CONFIG7
,
728 KVM_REG_MIPS_CP0_ERROREPC
,
729 KVM_REG_MIPS_CP0_KSCRATCH1
,
730 KVM_REG_MIPS_CP0_KSCRATCH2
,
731 KVM_REG_MIPS_CP0_KSCRATCH3
,
732 KVM_REG_MIPS_CP0_KSCRATCH4
,
733 KVM_REG_MIPS_CP0_KSCRATCH5
,
734 KVM_REG_MIPS_CP0_KSCRATCH6
,
736 KVM_REG_MIPS_COUNT_CTL
,
737 KVM_REG_MIPS_COUNT_RESUME
,
738 KVM_REG_MIPS_COUNT_HZ
,
741 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu
*vcpu
)
743 return ARRAY_SIZE(kvm_trap_emul_get_one_regs
);
746 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu
*vcpu
,
749 if (copy_to_user(indices
, kvm_trap_emul_get_one_regs
,
750 sizeof(kvm_trap_emul_get_one_regs
)))
752 indices
+= ARRAY_SIZE(kvm_trap_emul_get_one_regs
);
757 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu
*vcpu
,
758 const struct kvm_one_reg
*reg
,
761 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
764 case KVM_REG_MIPS_CP0_INDEX
:
765 *v
= (long)kvm_read_c0_guest_index(cop0
);
767 case KVM_REG_MIPS_CP0_ENTRYLO0
:
768 *v
= kvm_read_c0_guest_entrylo0(cop0
);
770 case KVM_REG_MIPS_CP0_ENTRYLO1
:
771 *v
= kvm_read_c0_guest_entrylo1(cop0
);
773 case KVM_REG_MIPS_CP0_CONTEXT
:
774 *v
= (long)kvm_read_c0_guest_context(cop0
);
776 case KVM_REG_MIPS_CP0_USERLOCAL
:
777 *v
= (long)kvm_read_c0_guest_userlocal(cop0
);
779 case KVM_REG_MIPS_CP0_PAGEMASK
:
780 *v
= (long)kvm_read_c0_guest_pagemask(cop0
);
782 case KVM_REG_MIPS_CP0_WIRED
:
783 *v
= (long)kvm_read_c0_guest_wired(cop0
);
785 case KVM_REG_MIPS_CP0_HWRENA
:
786 *v
= (long)kvm_read_c0_guest_hwrena(cop0
);
788 case KVM_REG_MIPS_CP0_BADVADDR
:
789 *v
= (long)kvm_read_c0_guest_badvaddr(cop0
);
791 case KVM_REG_MIPS_CP0_ENTRYHI
:
792 *v
= (long)kvm_read_c0_guest_entryhi(cop0
);
794 case KVM_REG_MIPS_CP0_COMPARE
:
795 *v
= (long)kvm_read_c0_guest_compare(cop0
);
797 case KVM_REG_MIPS_CP0_STATUS
:
798 *v
= (long)kvm_read_c0_guest_status(cop0
);
800 case KVM_REG_MIPS_CP0_INTCTL
:
801 *v
= (long)kvm_read_c0_guest_intctl(cop0
);
803 case KVM_REG_MIPS_CP0_CAUSE
:
804 *v
= (long)kvm_read_c0_guest_cause(cop0
);
806 case KVM_REG_MIPS_CP0_EPC
:
807 *v
= (long)kvm_read_c0_guest_epc(cop0
);
809 case KVM_REG_MIPS_CP0_PRID
:
810 *v
= (long)kvm_read_c0_guest_prid(cop0
);
812 case KVM_REG_MIPS_CP0_EBASE
:
813 *v
= (long)kvm_read_c0_guest_ebase(cop0
);
815 case KVM_REG_MIPS_CP0_CONFIG
:
816 *v
= (long)kvm_read_c0_guest_config(cop0
);
818 case KVM_REG_MIPS_CP0_CONFIG1
:
819 *v
= (long)kvm_read_c0_guest_config1(cop0
);
821 case KVM_REG_MIPS_CP0_CONFIG2
:
822 *v
= (long)kvm_read_c0_guest_config2(cop0
);
824 case KVM_REG_MIPS_CP0_CONFIG3
:
825 *v
= (long)kvm_read_c0_guest_config3(cop0
);
827 case KVM_REG_MIPS_CP0_CONFIG4
:
828 *v
= (long)kvm_read_c0_guest_config4(cop0
);
830 case KVM_REG_MIPS_CP0_CONFIG5
:
831 *v
= (long)kvm_read_c0_guest_config5(cop0
);
833 case KVM_REG_MIPS_CP0_CONFIG7
:
834 *v
= (long)kvm_read_c0_guest_config7(cop0
);
836 case KVM_REG_MIPS_CP0_COUNT
:
837 *v
= kvm_mips_read_count(vcpu
);
839 case KVM_REG_MIPS_COUNT_CTL
:
840 *v
= vcpu
->arch
.count_ctl
;
842 case KVM_REG_MIPS_COUNT_RESUME
:
843 *v
= ktime_to_ns(vcpu
->arch
.count_resume
);
845 case KVM_REG_MIPS_COUNT_HZ
:
846 *v
= vcpu
->arch
.count_hz
;
848 case KVM_REG_MIPS_CP0_ERROREPC
:
849 *v
= (long)kvm_read_c0_guest_errorepc(cop0
);
851 case KVM_REG_MIPS_CP0_KSCRATCH1
:
852 *v
= (long)kvm_read_c0_guest_kscratch1(cop0
);
854 case KVM_REG_MIPS_CP0_KSCRATCH2
:
855 *v
= (long)kvm_read_c0_guest_kscratch2(cop0
);
857 case KVM_REG_MIPS_CP0_KSCRATCH3
:
858 *v
= (long)kvm_read_c0_guest_kscratch3(cop0
);
860 case KVM_REG_MIPS_CP0_KSCRATCH4
:
861 *v
= (long)kvm_read_c0_guest_kscratch4(cop0
);
863 case KVM_REG_MIPS_CP0_KSCRATCH5
:
864 *v
= (long)kvm_read_c0_guest_kscratch5(cop0
);
866 case KVM_REG_MIPS_CP0_KSCRATCH6
:
867 *v
= (long)kvm_read_c0_guest_kscratch6(cop0
);
875 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu
*vcpu
,
876 const struct kvm_one_reg
*reg
,
879 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
881 unsigned int cur
, change
;
884 case KVM_REG_MIPS_CP0_INDEX
:
885 kvm_write_c0_guest_index(cop0
, v
);
887 case KVM_REG_MIPS_CP0_ENTRYLO0
:
888 kvm_write_c0_guest_entrylo0(cop0
, v
);
890 case KVM_REG_MIPS_CP0_ENTRYLO1
:
891 kvm_write_c0_guest_entrylo1(cop0
, v
);
893 case KVM_REG_MIPS_CP0_CONTEXT
:
894 kvm_write_c0_guest_context(cop0
, v
);
896 case KVM_REG_MIPS_CP0_USERLOCAL
:
897 kvm_write_c0_guest_userlocal(cop0
, v
);
899 case KVM_REG_MIPS_CP0_PAGEMASK
:
900 kvm_write_c0_guest_pagemask(cop0
, v
);
902 case KVM_REG_MIPS_CP0_WIRED
:
903 kvm_write_c0_guest_wired(cop0
, v
);
905 case KVM_REG_MIPS_CP0_HWRENA
:
906 kvm_write_c0_guest_hwrena(cop0
, v
);
908 case KVM_REG_MIPS_CP0_BADVADDR
:
909 kvm_write_c0_guest_badvaddr(cop0
, v
);
911 case KVM_REG_MIPS_CP0_ENTRYHI
:
912 kvm_write_c0_guest_entryhi(cop0
, v
);
914 case KVM_REG_MIPS_CP0_STATUS
:
915 kvm_write_c0_guest_status(cop0
, v
);
917 case KVM_REG_MIPS_CP0_INTCTL
:
918 /* No VInt, so no VS, read-only for now */
920 case KVM_REG_MIPS_CP0_EPC
:
921 kvm_write_c0_guest_epc(cop0
, v
);
923 case KVM_REG_MIPS_CP0_PRID
:
924 kvm_write_c0_guest_prid(cop0
, v
);
926 case KVM_REG_MIPS_CP0_EBASE
:
928 * Allow core number to be written, but the exception base must
929 * remain in guest KSeg0.
931 kvm_change_c0_guest_ebase(cop0
, 0x1ffff000 | MIPS_EBASE_CPUNUM
,
934 case KVM_REG_MIPS_CP0_COUNT
:
935 kvm_mips_write_count(vcpu
, v
);
937 case KVM_REG_MIPS_CP0_COMPARE
:
938 kvm_mips_write_compare(vcpu
, v
, false);
940 case KVM_REG_MIPS_CP0_CAUSE
:
942 * If the timer is stopped or started (DC bit) it must look
943 * atomic with changes to the interrupt pending bits (TI, IRQ5).
944 * A timer interrupt should not happen in between.
946 if ((kvm_read_c0_guest_cause(cop0
) ^ v
) & CAUSEF_DC
) {
948 /* disable timer first */
949 kvm_mips_count_disable_cause(vcpu
);
950 kvm_change_c0_guest_cause(cop0
, (u32
)~CAUSEF_DC
,
953 /* enable timer last */
954 kvm_change_c0_guest_cause(cop0
, (u32
)~CAUSEF_DC
,
956 kvm_mips_count_enable_cause(vcpu
);
959 kvm_write_c0_guest_cause(cop0
, v
);
962 case KVM_REG_MIPS_CP0_CONFIG
:
963 /* read-only for now */
965 case KVM_REG_MIPS_CP0_CONFIG1
:
966 cur
= kvm_read_c0_guest_config1(cop0
);
967 change
= (cur
^ v
) & kvm_mips_config1_wrmask(vcpu
);
970 kvm_write_c0_guest_config1(cop0
, v
);
973 case KVM_REG_MIPS_CP0_CONFIG2
:
974 /* read-only for now */
976 case KVM_REG_MIPS_CP0_CONFIG3
:
977 cur
= kvm_read_c0_guest_config3(cop0
);
978 change
= (cur
^ v
) & kvm_mips_config3_wrmask(vcpu
);
981 kvm_write_c0_guest_config3(cop0
, v
);
984 case KVM_REG_MIPS_CP0_CONFIG4
:
985 cur
= kvm_read_c0_guest_config4(cop0
);
986 change
= (cur
^ v
) & kvm_mips_config4_wrmask(vcpu
);
989 kvm_write_c0_guest_config4(cop0
, v
);
992 case KVM_REG_MIPS_CP0_CONFIG5
:
993 cur
= kvm_read_c0_guest_config5(cop0
);
994 change
= (cur
^ v
) & kvm_mips_config5_wrmask(vcpu
);
997 kvm_write_c0_guest_config5(cop0
, v
);
1000 case KVM_REG_MIPS_CP0_CONFIG7
:
1001 /* writes ignored */
1003 case KVM_REG_MIPS_COUNT_CTL
:
1004 ret
= kvm_mips_set_count_ctl(vcpu
, v
);
1006 case KVM_REG_MIPS_COUNT_RESUME
:
1007 ret
= kvm_mips_set_count_resume(vcpu
, v
);
1009 case KVM_REG_MIPS_COUNT_HZ
:
1010 ret
= kvm_mips_set_count_hz(vcpu
, v
);
1012 case KVM_REG_MIPS_CP0_ERROREPC
:
1013 kvm_write_c0_guest_errorepc(cop0
, v
);
1015 case KVM_REG_MIPS_CP0_KSCRATCH1
:
1016 kvm_write_c0_guest_kscratch1(cop0
, v
);
1018 case KVM_REG_MIPS_CP0_KSCRATCH2
:
1019 kvm_write_c0_guest_kscratch2(cop0
, v
);
1021 case KVM_REG_MIPS_CP0_KSCRATCH3
:
1022 kvm_write_c0_guest_kscratch3(cop0
, v
);
1024 case KVM_REG_MIPS_CP0_KSCRATCH4
:
1025 kvm_write_c0_guest_kscratch4(cop0
, v
);
1027 case KVM_REG_MIPS_CP0_KSCRATCH5
:
1028 kvm_write_c0_guest_kscratch5(cop0
, v
);
1030 case KVM_REG_MIPS_CP0_KSCRATCH6
:
1031 kvm_write_c0_guest_kscratch6(cop0
, v
);
1039 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1041 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1042 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1043 struct mm_struct
*mm
;
1046 * Were we in guest context? If so, restore the appropriate ASID based
1047 * on the mode of the Guest (Kernel/User).
1049 if (current
->flags
& PF_VCPU
) {
1050 mm
= KVM_GUEST_KERNEL_MODE(vcpu
) ? kern_mm
: user_mm
;
1051 check_switch_mmu_context(mm
);
1052 kvm_mips_suspend_mm(cpu
);
1059 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu
*vcpu
, int cpu
)
1063 if (current
->flags
& PF_VCPU
) {
1064 /* Restore normal Linux process memory map */
1065 check_switch_mmu_context(current
->mm
);
1066 kvm_mips_resume_mm(cpu
);
1073 static void kvm_trap_emul_check_requests(struct kvm_vcpu
*vcpu
, int cpu
,
1076 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1077 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1078 struct mm_struct
*mm
;
1081 if (likely(!kvm_request_pending(vcpu
)))
1084 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1086 * Both kernel & user GVA mappings must be invalidated. The
1087 * caller is just about to check whether the ASID is stale
1088 * anyway so no need to reload it here.
1090 kvm_mips_flush_gva_pt(kern_mm
->pgd
, KMF_GPA
| KMF_KERN
);
1091 kvm_mips_flush_gva_pt(user_mm
->pgd
, KMF_GPA
| KMF_USER
);
1092 for_each_possible_cpu(i
) {
1093 set_cpu_context(i
, kern_mm
, 0);
1094 set_cpu_context(i
, user_mm
, 0);
1097 /* Generate new ASID for current mode */
1099 mm
= KVM_GUEST_KERNEL_MODE(vcpu
) ? kern_mm
: user_mm
;
1100 get_new_mmu_context(mm
);
1102 write_c0_entryhi(cpu_asid(cpu
, mm
));
1103 TLBMISS_HANDLER_SETUP_PGD(mm
->pgd
);
1110 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1111 * @vcpu: VCPU pointer.
1113 * Call before a GVA space access outside of guest mode, to ensure that
1114 * asynchronous TLB flush requests are handled or delayed until completion of
1115 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1117 * Should be called with IRQs already enabled.
1119 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu
*vcpu
)
1121 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1122 WARN_ON_ONCE(irqs_disabled());
1125 * The caller is about to access the GVA space, so we set the mode to
1126 * force TLB flush requests to send an IPI, and also disable IRQs to
1127 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1129 local_irq_disable();
1132 * Make sure the read of VCPU requests is not reordered ahead of the
1133 * write to vcpu->mode, or we could miss a TLB flush request while
1134 * the requester sees the VCPU as outside of guest mode and not needing
1137 smp_store_mb(vcpu
->mode
, READING_SHADOW_PAGE_TABLES
);
1140 * If a TLB flush has been requested (potentially while
1141 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1142 * before accessing the GVA space, and be sure to reload the ASID if
1143 * necessary as it'll be immediately used.
1145 * TLB flush requests after this check will trigger an IPI due to the
1146 * mode change above, which will be delayed due to IRQs disabled.
1148 kvm_trap_emul_check_requests(vcpu
, smp_processor_id(), true);
1152 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1153 * @vcpu: VCPU pointer.
1155 * Called after a GVA space access outside of guest mode. Should have a matching
1156 * call to kvm_trap_emul_gva_lockless_begin().
1158 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu
*vcpu
)
1161 * Make sure the write to vcpu->mode is not reordered in front of GVA
1162 * accesses, or a TLB flush requester may not think it necessary to send
1165 smp_store_release(&vcpu
->mode
, OUTSIDE_GUEST_MODE
);
1168 * Now that the access to GVA space is complete, its safe for pending
1169 * TLB flush request IPIs to be handled (which indicates completion).
1174 static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu
*vcpu
)
1176 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1177 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1178 struct mm_struct
*mm
;
1179 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1180 int i
, cpu
= smp_processor_id();
1184 * No need to reload ASID, IRQs are disabled already so there's no rush,
1185 * and we'll check if we need to regenerate below anyway before
1186 * re-entering the guest.
1188 kvm_trap_emul_check_requests(vcpu
, cpu
, false);
1190 if (KVM_GUEST_KERNEL_MODE(vcpu
)) {
1196 * Lazy host ASID regeneration / PT flush for guest user mode.
1197 * If the guest ASID has changed since the last guest usermode
1198 * execution, invalidate the stale TLB entries and flush GVA PT
1201 gasid
= kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
;
1202 if (gasid
!= vcpu
->arch
.last_user_gasid
) {
1203 kvm_mips_flush_gva_pt(user_mm
->pgd
, KMF_USER
);
1204 for_each_possible_cpu(i
)
1205 set_cpu_context(i
, user_mm
, 0);
1206 vcpu
->arch
.last_user_gasid
= gasid
;
1211 * Check if ASID is stale. This may happen due to a TLB flush request or
1212 * a lazy user MM invalidation.
1214 check_mmu_context(mm
);
1217 static int kvm_trap_emul_vcpu_run(struct kvm_vcpu
*vcpu
)
1219 int cpu
= smp_processor_id();
1222 /* Check if we have any exceptions/interrupts pending */
1223 kvm_mips_deliver_interrupts(vcpu
,
1224 kvm_read_c0_guest_cause(vcpu
->arch
.cop0
));
1226 kvm_trap_emul_vcpu_reenter(vcpu
);
1229 * We use user accessors to access guest memory, but we don't want to
1230 * invoke Linux page faulting.
1232 pagefault_disable();
1234 /* Disable hardware page table walking while in guest */
1238 * While in guest context we're in the guest's address space, not the
1239 * host process address space, so we need to be careful not to confuse
1240 * e.g. cache management IPIs.
1242 kvm_mips_suspend_mm(cpu
);
1244 r
= vcpu
->arch
.vcpu_run(vcpu
);
1246 /* We may have migrated while handling guest exits */
1247 cpu
= smp_processor_id();
1249 /* Restore normal Linux process memory map */
1250 check_switch_mmu_context(current
->mm
);
1251 kvm_mips_resume_mm(cpu
);
1260 static struct kvm_mips_callbacks kvm_trap_emul_callbacks
= {
1262 .handle_cop_unusable
= kvm_trap_emul_handle_cop_unusable
,
1263 .handle_tlb_mod
= kvm_trap_emul_handle_tlb_mod
,
1264 .handle_tlb_st_miss
= kvm_trap_emul_handle_tlb_st_miss
,
1265 .handle_tlb_ld_miss
= kvm_trap_emul_handle_tlb_ld_miss
,
1266 .handle_addr_err_st
= kvm_trap_emul_handle_addr_err_st
,
1267 .handle_addr_err_ld
= kvm_trap_emul_handle_addr_err_ld
,
1268 .handle_syscall
= kvm_trap_emul_handle_syscall
,
1269 .handle_res_inst
= kvm_trap_emul_handle_res_inst
,
1270 .handle_break
= kvm_trap_emul_handle_break
,
1271 .handle_trap
= kvm_trap_emul_handle_trap
,
1272 .handle_msa_fpe
= kvm_trap_emul_handle_msa_fpe
,
1273 .handle_fpe
= kvm_trap_emul_handle_fpe
,
1274 .handle_msa_disabled
= kvm_trap_emul_handle_msa_disabled
,
1275 .handle_guest_exit
= kvm_trap_emul_no_handler
,
1277 .hardware_enable
= kvm_trap_emul_hardware_enable
,
1278 .hardware_disable
= kvm_trap_emul_hardware_disable
,
1279 .check_extension
= kvm_trap_emul_check_extension
,
1280 .vcpu_init
= kvm_trap_emul_vcpu_init
,
1281 .vcpu_uninit
= kvm_trap_emul_vcpu_uninit
,
1282 .vcpu_setup
= kvm_trap_emul_vcpu_setup
,
1283 .flush_shadow_all
= kvm_trap_emul_flush_shadow_all
,
1284 .flush_shadow_memslot
= kvm_trap_emul_flush_shadow_memslot
,
1285 .gva_to_gpa
= kvm_trap_emul_gva_to_gpa_cb
,
1286 .queue_timer_int
= kvm_mips_queue_timer_int_cb
,
1287 .dequeue_timer_int
= kvm_mips_dequeue_timer_int_cb
,
1288 .queue_io_int
= kvm_mips_queue_io_int_cb
,
1289 .dequeue_io_int
= kvm_mips_dequeue_io_int_cb
,
1290 .irq_deliver
= kvm_mips_irq_deliver_cb
,
1291 .irq_clear
= kvm_mips_irq_clear_cb
,
1292 .num_regs
= kvm_trap_emul_num_regs
,
1293 .copy_reg_indices
= kvm_trap_emul_copy_reg_indices
,
1294 .get_one_reg
= kvm_trap_emul_get_one_reg
,
1295 .set_one_reg
= kvm_trap_emul_set_one_reg
,
1296 .vcpu_load
= kvm_trap_emul_vcpu_load
,
1297 .vcpu_put
= kvm_trap_emul_vcpu_put
,
1298 .vcpu_run
= kvm_trap_emul_vcpu_run
,
1299 .vcpu_reenter
= kvm_trap_emul_vcpu_reenter
,
1302 int kvm_mips_emulation_init(struct kvm_mips_callbacks
**install_callbacks
)
1304 *install_callbacks
= &kvm_trap_emul_callbacks
;