2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
17 #include <linux/kvm_host.h>
20 #include "interrupt.h"
22 static gpa_t
kvm_trap_emul_gva_to_gpa_cb(gva_t gva
)
25 uint32_t kseg
= KSEGX(gva
);
27 if ((kseg
== CKSEG0
) || (kseg
== CKSEG1
))
30 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__
, gva
);
31 kvm_mips_dump_host_tlbs();
32 gpa
= KVM_INVALID_ADDR
;
35 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__
, gva
, gpa
);
40 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu
*vcpu
)
42 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
43 struct kvm_run
*run
= vcpu
->run
;
44 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
45 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
46 enum emulation_result er
= EMULATE_DONE
;
47 int ret
= RESUME_GUEST
;
49 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 1) {
51 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
) ||
52 (kvm_read_c0_guest_status(cop0
) & ST0_CU1
) == 0) {
54 * Unusable/no FPU in guest:
55 * deliver guest COP1 Unusable Exception
57 er
= kvm_mips_emulate_fpu_exc(cause
, opc
, run
, vcpu
);
59 /* Restore FPU state */
64 er
= kvm_mips_emulate_inst(cause
, opc
, run
, vcpu
);
73 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
78 run
->exit_reason
= KVM_EXIT_INTR
;
88 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu
*vcpu
)
90 struct kvm_run
*run
= vcpu
->run
;
91 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
92 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
93 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
94 enum emulation_result er
= EMULATE_DONE
;
95 int ret
= RESUME_GUEST
;
97 if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
98 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
99 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
100 cause
, opc
, badvaddr
);
101 er
= kvm_mips_handle_tlbmod(cause
, opc
, run
, vcpu
);
103 if (er
== EMULATE_DONE
)
106 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
109 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
111 * XXXKYMA: The guest kernel does not expect to get this fault
112 * when we are not using HIGHMEM. Need to address this in a
115 kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
116 cause
, opc
, badvaddr
);
117 kvm_mips_dump_host_tlbs();
118 kvm_arch_vcpu_dump_regs(vcpu
);
119 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
122 kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
123 cause
, opc
, badvaddr
);
124 kvm_mips_dump_host_tlbs();
125 kvm_arch_vcpu_dump_regs(vcpu
);
126 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
132 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu
*vcpu
)
134 struct kvm_run
*run
= vcpu
->run
;
135 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
136 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
137 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
138 enum emulation_result er
= EMULATE_DONE
;
139 int ret
= RESUME_GUEST
;
141 if (((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
)
142 && KVM_GUEST_KERNEL_MODE(vcpu
)) {
143 if (kvm_mips_handle_commpage_tlb_fault(badvaddr
, vcpu
) < 0) {
144 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
147 } else if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
148 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
149 kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
150 cause
, opc
, badvaddr
);
151 er
= kvm_mips_handle_tlbmiss(cause
, opc
, run
, vcpu
);
152 if (er
== EMULATE_DONE
)
155 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
158 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
160 * All KSEG0 faults are handled by KVM, as the guest kernel does
161 * not expect to ever get them
163 if (kvm_mips_handle_kseg0_tlb_fault
164 (vcpu
->arch
.host_cp0_badvaddr
, vcpu
) < 0) {
165 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
169 kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
170 cause
, opc
, badvaddr
);
171 kvm_mips_dump_host_tlbs();
172 kvm_arch_vcpu_dump_regs(vcpu
);
173 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
179 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu
*vcpu
)
181 struct kvm_run
*run
= vcpu
->run
;
182 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
183 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
184 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
185 enum emulation_result er
= EMULATE_DONE
;
186 int ret
= RESUME_GUEST
;
188 if (((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
)
189 && KVM_GUEST_KERNEL_MODE(vcpu
)) {
190 if (kvm_mips_handle_commpage_tlb_fault(badvaddr
, vcpu
) < 0) {
191 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
194 } else if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
195 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
196 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
197 vcpu
->arch
.pc
, badvaddr
);
200 * User Address (UA) fault, this could happen if
201 * (1) TLB entry not present/valid in both Guest and shadow host
202 * TLBs, in this case we pass on the fault to the guest
203 * kernel and let it handle it.
204 * (2) TLB entry is present in the Guest TLB but not in the
205 * shadow, in this case we inject the TLB from the Guest TLB
206 * into the shadow host TLB
209 er
= kvm_mips_handle_tlbmiss(cause
, opc
, run
, vcpu
);
210 if (er
== EMULATE_DONE
)
213 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
216 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
217 if (kvm_mips_handle_kseg0_tlb_fault
218 (vcpu
->arch
.host_cp0_badvaddr
, vcpu
) < 0) {
219 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
223 kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
224 cause
, opc
, badvaddr
);
225 kvm_mips_dump_host_tlbs();
226 kvm_arch_vcpu_dump_regs(vcpu
);
227 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
233 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu
*vcpu
)
235 struct kvm_run
*run
= vcpu
->run
;
236 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
237 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
238 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
239 enum emulation_result er
= EMULATE_DONE
;
240 int ret
= RESUME_GUEST
;
242 if (KVM_GUEST_KERNEL_MODE(vcpu
)
243 && (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
)) {
244 kvm_debug("Emulate Store to MMIO space\n");
245 er
= kvm_mips_emulate_inst(cause
, opc
, run
, vcpu
);
246 if (er
== EMULATE_FAIL
) {
247 kvm_err("Emulate Store to MMIO space failed\n");
248 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
251 run
->exit_reason
= KVM_EXIT_MMIO
;
255 kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
256 cause
, opc
, badvaddr
);
257 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
263 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu
*vcpu
)
265 struct kvm_run
*run
= vcpu
->run
;
266 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
267 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
268 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
269 enum emulation_result er
= EMULATE_DONE
;
270 int ret
= RESUME_GUEST
;
272 if (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
) {
273 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr
);
274 er
= kvm_mips_emulate_inst(cause
, opc
, run
, vcpu
);
275 if (er
== EMULATE_FAIL
) {
276 kvm_err("Emulate Load from MMIO space failed\n");
277 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
280 run
->exit_reason
= KVM_EXIT_MMIO
;
284 kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
285 cause
, opc
, badvaddr
);
286 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
293 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu
*vcpu
)
295 struct kvm_run
*run
= vcpu
->run
;
296 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
297 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
298 enum emulation_result er
= EMULATE_DONE
;
299 int ret
= RESUME_GUEST
;
301 er
= kvm_mips_emulate_syscall(cause
, opc
, run
, vcpu
);
302 if (er
== EMULATE_DONE
)
305 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
311 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu
*vcpu
)
313 struct kvm_run
*run
= vcpu
->run
;
314 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
315 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
316 enum emulation_result er
= EMULATE_DONE
;
317 int ret
= RESUME_GUEST
;
319 er
= kvm_mips_handle_ri(cause
, opc
, run
, vcpu
);
320 if (er
== EMULATE_DONE
)
323 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
329 static int kvm_trap_emul_handle_break(struct kvm_vcpu
*vcpu
)
331 struct kvm_run
*run
= vcpu
->run
;
332 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
333 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
334 enum emulation_result er
= EMULATE_DONE
;
335 int ret
= RESUME_GUEST
;
337 er
= kvm_mips_emulate_bp_exc(cause
, opc
, run
, vcpu
);
338 if (er
== EMULATE_DONE
)
341 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
347 static int kvm_trap_emul_handle_trap(struct kvm_vcpu
*vcpu
)
349 struct kvm_run
*run
= vcpu
->run
;
350 uint32_t __user
*opc
= (uint32_t __user
*)vcpu
->arch
.pc
;
351 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
352 enum emulation_result er
= EMULATE_DONE
;
353 int ret
= RESUME_GUEST
;
355 er
= kvm_mips_emulate_trap_exc(cause
, opc
, run
, vcpu
);
356 if (er
== EMULATE_DONE
) {
359 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
365 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu
*vcpu
)
367 struct kvm_run
*run
= vcpu
->run
;
368 uint32_t __user
*opc
= (uint32_t __user
*)vcpu
->arch
.pc
;
369 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
370 enum emulation_result er
= EMULATE_DONE
;
371 int ret
= RESUME_GUEST
;
373 er
= kvm_mips_emulate_msafpe_exc(cause
, opc
, run
, vcpu
);
374 if (er
== EMULATE_DONE
) {
377 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
383 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu
*vcpu
)
385 struct kvm_run
*run
= vcpu
->run
;
386 uint32_t __user
*opc
= (uint32_t __user
*)vcpu
->arch
.pc
;
387 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
388 enum emulation_result er
= EMULATE_DONE
;
389 int ret
= RESUME_GUEST
;
391 er
= kvm_mips_emulate_fpe_exc(cause
, opc
, run
, vcpu
);
392 if (er
== EMULATE_DONE
) {
395 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
402 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
403 * @vcpu: Virtual CPU context.
405 * Handle when the guest attempts to use MSA when it is disabled.
407 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu
*vcpu
)
409 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
410 struct kvm_run
*run
= vcpu
->run
;
411 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
412 unsigned long cause
= vcpu
->arch
.host_cp0_cause
;
413 enum emulation_result er
= EMULATE_DONE
;
414 int ret
= RESUME_GUEST
;
416 if (!kvm_mips_guest_has_msa(&vcpu
->arch
) ||
417 (kvm_read_c0_guest_status(cop0
) & (ST0_CU1
| ST0_FR
)) == ST0_CU1
) {
419 * No MSA in guest, or FPU enabled and not in FR=1 mode,
420 * guest reserved instruction exception
422 er
= kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
423 } else if (!(kvm_read_c0_guest_config5(cop0
) & MIPS_CONF5_MSAEN
)) {
424 /* MSA disabled by guest, guest MSA disabled exception */
425 er
= kvm_mips_emulate_msadis_exc(cause
, opc
, run
, vcpu
);
427 /* Restore MSA/FPU state */
438 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
448 static int kvm_trap_emul_vm_init(struct kvm
*kvm
)
453 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu
*vcpu
)
458 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu
*vcpu
)
460 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
462 int vcpu_id
= vcpu
->vcpu_id
;
465 * Arch specific stuff, set up config registers properly so that the
466 * guest will come up as expected, for now we simulate a MIPS 24kc
468 kvm_write_c0_guest_prid(cop0
, 0x00019300);
469 /* Have config1, Cacheable, noncoherent, write-back, write allocate */
470 kvm_write_c0_guest_config(cop0
, MIPS_CONF_M
| (0x3 << CP0C0_K0
) |
472 (MMU_TYPE_R4000
<< CP0C0_MT
));
474 /* Read the cache characteristics from the host Config1 Register */
475 config1
= (read_c0_config1() & ~0x7f);
477 /* Set up MMU size */
478 config1
&= ~(0x3f << 25);
479 config1
|= ((KVM_MIPS_GUEST_TLB_SIZE
- 1) << 25);
481 /* We unset some bits that we aren't emulating */
483 ~((1 << CP0C1_C2
) | (1 << CP0C1_MD
) | (1 << CP0C1_PC
) |
484 (1 << CP0C1_WR
) | (1 << CP0C1_CA
));
485 kvm_write_c0_guest_config1(cop0
, config1
);
487 /* Have config3, no tertiary/secondary caches implemented */
488 kvm_write_c0_guest_config2(cop0
, MIPS_CONF_M
);
489 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
491 /* Have config4, UserLocal */
492 kvm_write_c0_guest_config3(cop0
, MIPS_CONF_M
| MIPS_CONF3_ULRI
);
495 kvm_write_c0_guest_config4(cop0
, MIPS_CONF_M
);
498 kvm_write_c0_guest_config5(cop0
, 0);
500 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
501 kvm_write_c0_guest_config7(cop0
, (MIPS_CONF7_WII
) | (1 << 10));
504 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
506 kvm_write_c0_guest_intctl(cop0
, 0xFC000000);
508 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
509 kvm_write_c0_guest_ebase(cop0
, KVM_GUEST_KSEG0
| (vcpu_id
& 0xFF));
514 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu
*vcpu
,
515 const struct kvm_one_reg
*reg
,
519 case KVM_REG_MIPS_CP0_COUNT
:
520 *v
= kvm_mips_read_count(vcpu
);
522 case KVM_REG_MIPS_COUNT_CTL
:
523 *v
= vcpu
->arch
.count_ctl
;
525 case KVM_REG_MIPS_COUNT_RESUME
:
526 *v
= ktime_to_ns(vcpu
->arch
.count_resume
);
528 case KVM_REG_MIPS_COUNT_HZ
:
529 *v
= vcpu
->arch
.count_hz
;
537 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu
*vcpu
,
538 const struct kvm_one_reg
*reg
,
541 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
543 unsigned int cur
, change
;
546 case KVM_REG_MIPS_CP0_COUNT
:
547 kvm_mips_write_count(vcpu
, v
);
549 case KVM_REG_MIPS_CP0_COMPARE
:
550 kvm_mips_write_compare(vcpu
, v
);
552 case KVM_REG_MIPS_CP0_CAUSE
:
554 * If the timer is stopped or started (DC bit) it must look
555 * atomic with changes to the interrupt pending bits (TI, IRQ5).
556 * A timer interrupt should not happen in between.
558 if ((kvm_read_c0_guest_cause(cop0
) ^ v
) & CAUSEF_DC
) {
560 /* disable timer first */
561 kvm_mips_count_disable_cause(vcpu
);
562 kvm_change_c0_guest_cause(cop0
, ~CAUSEF_DC
, v
);
564 /* enable timer last */
565 kvm_change_c0_guest_cause(cop0
, ~CAUSEF_DC
, v
);
566 kvm_mips_count_enable_cause(vcpu
);
569 kvm_write_c0_guest_cause(cop0
, v
);
572 case KVM_REG_MIPS_CP0_CONFIG
:
573 /* read-only for now */
575 case KVM_REG_MIPS_CP0_CONFIG1
:
576 cur
= kvm_read_c0_guest_config1(cop0
);
577 change
= (cur
^ v
) & kvm_mips_config1_wrmask(vcpu
);
580 kvm_write_c0_guest_config1(cop0
, v
);
583 case KVM_REG_MIPS_CP0_CONFIG2
:
584 /* read-only for now */
586 case KVM_REG_MIPS_CP0_CONFIG3
:
587 cur
= kvm_read_c0_guest_config3(cop0
);
588 change
= (cur
^ v
) & kvm_mips_config3_wrmask(vcpu
);
591 kvm_write_c0_guest_config3(cop0
, v
);
594 case KVM_REG_MIPS_CP0_CONFIG4
:
595 cur
= kvm_read_c0_guest_config4(cop0
);
596 change
= (cur
^ v
) & kvm_mips_config4_wrmask(vcpu
);
599 kvm_write_c0_guest_config4(cop0
, v
);
602 case KVM_REG_MIPS_CP0_CONFIG5
:
603 cur
= kvm_read_c0_guest_config5(cop0
);
604 change
= (cur
^ v
) & kvm_mips_config5_wrmask(vcpu
);
607 kvm_write_c0_guest_config5(cop0
, v
);
610 case KVM_REG_MIPS_COUNT_CTL
:
611 ret
= kvm_mips_set_count_ctl(vcpu
, v
);
613 case KVM_REG_MIPS_COUNT_RESUME
:
614 ret
= kvm_mips_set_count_resume(vcpu
, v
);
616 case KVM_REG_MIPS_COUNT_HZ
:
617 ret
= kvm_mips_set_count_hz(vcpu
, v
);
625 static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu
*vcpu
)
632 static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu
*vcpu
)
637 static struct kvm_mips_callbacks kvm_trap_emul_callbacks
= {
639 .handle_cop_unusable
= kvm_trap_emul_handle_cop_unusable
,
640 .handle_tlb_mod
= kvm_trap_emul_handle_tlb_mod
,
641 .handle_tlb_st_miss
= kvm_trap_emul_handle_tlb_st_miss
,
642 .handle_tlb_ld_miss
= kvm_trap_emul_handle_tlb_ld_miss
,
643 .handle_addr_err_st
= kvm_trap_emul_handle_addr_err_st
,
644 .handle_addr_err_ld
= kvm_trap_emul_handle_addr_err_ld
,
645 .handle_syscall
= kvm_trap_emul_handle_syscall
,
646 .handle_res_inst
= kvm_trap_emul_handle_res_inst
,
647 .handle_break
= kvm_trap_emul_handle_break
,
648 .handle_trap
= kvm_trap_emul_handle_trap
,
649 .handle_msa_fpe
= kvm_trap_emul_handle_msa_fpe
,
650 .handle_fpe
= kvm_trap_emul_handle_fpe
,
651 .handle_msa_disabled
= kvm_trap_emul_handle_msa_disabled
,
653 .vm_init
= kvm_trap_emul_vm_init
,
654 .vcpu_init
= kvm_trap_emul_vcpu_init
,
655 .vcpu_setup
= kvm_trap_emul_vcpu_setup
,
656 .gva_to_gpa
= kvm_trap_emul_gva_to_gpa_cb
,
657 .queue_timer_int
= kvm_mips_queue_timer_int_cb
,
658 .dequeue_timer_int
= kvm_mips_dequeue_timer_int_cb
,
659 .queue_io_int
= kvm_mips_queue_io_int_cb
,
660 .dequeue_io_int
= kvm_mips_dequeue_io_int_cb
,
661 .irq_deliver
= kvm_mips_irq_deliver_cb
,
662 .irq_clear
= kvm_mips_irq_clear_cb
,
663 .get_one_reg
= kvm_trap_emul_get_one_reg
,
664 .set_one_reg
= kvm_trap_emul_set_one_reg
,
665 .vcpu_get_regs
= kvm_trap_emul_vcpu_get_regs
,
666 .vcpu_set_regs
= kvm_trap_emul_vcpu_set_regs
,
669 int kvm_mips_emulation_init(struct kvm_mips_callbacks
**install_callbacks
)
671 *install_callbacks
= &kvm_trap_emul_callbacks
;