2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/log2.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgalloc.h>
21 #include "interrupt.h"
23 static gpa_t
kvm_trap_emul_gva_to_gpa_cb(gva_t gva
)
26 gva_t kseg
= KSEGX(gva
);
27 gva_t gkseg
= KVM_GUEST_KSEGX(gva
);
29 if ((kseg
== CKSEG0
) || (kseg
== CKSEG1
))
31 else if (gkseg
== KVM_GUEST_KSEG0
)
32 gpa
= KVM_GUEST_CPHYSADDR(gva
);
34 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__
, gva
);
35 kvm_mips_dump_host_tlbs();
36 gpa
= KVM_INVALID_ADDR
;
39 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__
, gva
, gpa
);
44 static int kvm_trap_emul_no_handler(struct kvm_vcpu
*vcpu
)
46 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
47 u32 cause
= vcpu
->arch
.host_cp0_cause
;
48 u32 exccode
= (cause
& CAUSEF_EXCCODE
) >> CAUSEB_EXCCODE
;
49 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
53 * Fetch the instruction.
55 if (cause
& CAUSEF_BD
)
57 kvm_get_badinstr(opc
, vcpu
, &inst
);
59 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
60 exccode
, opc
, inst
, badvaddr
,
61 kvm_read_c0_guest_status(vcpu
->arch
.cop0
));
62 kvm_arch_vcpu_dump_regs(vcpu
);
63 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
67 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu
*vcpu
)
69 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
70 struct kvm_run
*run
= vcpu
->run
;
71 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
72 u32 cause
= vcpu
->arch
.host_cp0_cause
;
73 enum emulation_result er
= EMULATE_DONE
;
74 int ret
= RESUME_GUEST
;
76 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 1) {
78 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
) ||
79 (kvm_read_c0_guest_status(cop0
) & ST0_CU1
) == 0) {
81 * Unusable/no FPU in guest:
82 * deliver guest COP1 Unusable Exception
84 er
= kvm_mips_emulate_fpu_exc(cause
, opc
, run
, vcpu
);
86 /* Restore FPU state */
91 er
= kvm_mips_emulate_inst(cause
, opc
, run
, vcpu
);
100 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
105 run
->exit_reason
= KVM_EXIT_INTR
;
109 case EMULATE_HYPERCALL
:
110 ret
= kvm_mips_handle_hypcall(vcpu
);
119 static int kvm_mips_bad_load(u32 cause
, u32
*opc
, struct kvm_run
*run
,
120 struct kvm_vcpu
*vcpu
)
122 enum emulation_result er
;
123 union mips_instruction inst
;
126 /* A code fetch fault doesn't count as an MMIO */
127 if (kvm_is_ifetch_fault(&vcpu
->arch
)) {
128 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
132 /* Fetch the instruction. */
133 if (cause
& CAUSEF_BD
)
135 err
= kvm_get_badinstr(opc
, vcpu
, &inst
.word
);
137 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
141 /* Emulate the load */
142 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
143 if (er
== EMULATE_FAIL
) {
144 kvm_err("Emulate load from MMIO space failed\n");
145 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
147 run
->exit_reason
= KVM_EXIT_MMIO
;
152 static int kvm_mips_bad_store(u32 cause
, u32
*opc
, struct kvm_run
*run
,
153 struct kvm_vcpu
*vcpu
)
155 enum emulation_result er
;
156 union mips_instruction inst
;
159 /* Fetch the instruction. */
160 if (cause
& CAUSEF_BD
)
162 err
= kvm_get_badinstr(opc
, vcpu
, &inst
.word
);
164 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
168 /* Emulate the store */
169 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
170 if (er
== EMULATE_FAIL
) {
171 kvm_err("Emulate store to MMIO space failed\n");
172 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
174 run
->exit_reason
= KVM_EXIT_MMIO
;
179 static int kvm_mips_bad_access(u32 cause
, u32
*opc
, struct kvm_run
*run
,
180 struct kvm_vcpu
*vcpu
, bool store
)
183 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
185 return kvm_mips_bad_load(cause
, opc
, run
, vcpu
);
188 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu
*vcpu
)
190 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
191 struct kvm_run
*run
= vcpu
->run
;
192 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
193 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
194 u32 cause
= vcpu
->arch
.host_cp0_cause
;
195 struct kvm_mips_tlb
*tlb
;
196 unsigned long entryhi
;
199 if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
200 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
202 * First find the mapping in the guest TLB. If the failure to
203 * write was due to the guest TLB, it should be up to the guest
206 entryhi
= (badvaddr
& VPN2_MASK
) |
207 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
208 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
211 * These should never happen.
212 * They would indicate stale host TLB entries.
214 if (unlikely(index
< 0)) {
215 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
218 tlb
= vcpu
->arch
.guest_tlb
+ index
;
219 if (unlikely(!TLB_IS_VALID(*tlb
, badvaddr
))) {
220 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
225 * Guest entry not dirty? That would explain the TLB modified
226 * exception. Relay that on to the guest so it can handle it.
228 if (!TLB_IS_DIRTY(*tlb
, badvaddr
)) {
229 kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
233 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
, badvaddr
,
235 /* Not writable, needs handling as MMIO */
236 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
238 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
239 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr
, vcpu
, true) < 0)
240 /* Not writable, needs handling as MMIO */
241 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
244 /* host kernel addresses are all handled as MMIO */
245 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
249 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu
*vcpu
, bool store
)
251 struct kvm_run
*run
= vcpu
->run
;
252 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
253 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
254 u32 cause
= vcpu
->arch
.host_cp0_cause
;
255 enum emulation_result er
= EMULATE_DONE
;
256 int ret
= RESUME_GUEST
;
258 if (((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
)
259 && KVM_GUEST_KERNEL_MODE(vcpu
)) {
260 if (kvm_mips_handle_commpage_tlb_fault(badvaddr
, vcpu
) < 0) {
261 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
264 } else if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
265 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
266 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
267 store
? "ST" : "LD", cause
, opc
, badvaddr
);
270 * User Address (UA) fault, this could happen if
271 * (1) TLB entry not present/valid in both Guest and shadow host
272 * TLBs, in this case we pass on the fault to the guest
273 * kernel and let it handle it.
274 * (2) TLB entry is present in the Guest TLB but not in the
275 * shadow, in this case we inject the TLB from the Guest TLB
276 * into the shadow host TLB
279 er
= kvm_mips_handle_tlbmiss(cause
, opc
, run
, vcpu
, store
);
280 if (er
== EMULATE_DONE
)
283 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
286 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
288 * All KSEG0 faults are handled by KVM, as the guest kernel does
289 * not expect to ever get them
291 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr
, vcpu
, store
) < 0)
292 ret
= kvm_mips_bad_access(cause
, opc
, run
, vcpu
, store
);
293 } else if (KVM_GUEST_KERNEL_MODE(vcpu
)
294 && (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
)) {
296 * With EVA we may get a TLB exception instead of an address
297 * error when the guest performs MMIO to KSeg1 addresses.
299 ret
= kvm_mips_bad_access(cause
, opc
, run
, vcpu
, store
);
301 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
302 store
? "ST" : "LD", cause
, opc
, badvaddr
);
303 kvm_mips_dump_host_tlbs();
304 kvm_arch_vcpu_dump_regs(vcpu
);
305 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
311 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu
*vcpu
)
313 return kvm_trap_emul_handle_tlb_miss(vcpu
, true);
316 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu
*vcpu
)
318 return kvm_trap_emul_handle_tlb_miss(vcpu
, false);
321 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu
*vcpu
)
323 struct kvm_run
*run
= vcpu
->run
;
324 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
325 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
326 u32 cause
= vcpu
->arch
.host_cp0_cause
;
327 int ret
= RESUME_GUEST
;
329 if (KVM_GUEST_KERNEL_MODE(vcpu
)
330 && (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
)) {
331 ret
= kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
333 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
334 cause
, opc
, badvaddr
);
335 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
341 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu
*vcpu
)
343 struct kvm_run
*run
= vcpu
->run
;
344 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
345 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
346 u32 cause
= vcpu
->arch
.host_cp0_cause
;
347 int ret
= RESUME_GUEST
;
349 if (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
) {
350 ret
= kvm_mips_bad_load(cause
, opc
, run
, vcpu
);
352 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
353 cause
, opc
, badvaddr
);
354 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
360 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu
*vcpu
)
362 struct kvm_run
*run
= vcpu
->run
;
363 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
364 u32 cause
= vcpu
->arch
.host_cp0_cause
;
365 enum emulation_result er
= EMULATE_DONE
;
366 int ret
= RESUME_GUEST
;
368 er
= kvm_mips_emulate_syscall(cause
, opc
, run
, vcpu
);
369 if (er
== EMULATE_DONE
)
372 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
378 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu
*vcpu
)
380 struct kvm_run
*run
= vcpu
->run
;
381 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
382 u32 cause
= vcpu
->arch
.host_cp0_cause
;
383 enum emulation_result er
= EMULATE_DONE
;
384 int ret
= RESUME_GUEST
;
386 er
= kvm_mips_handle_ri(cause
, opc
, run
, vcpu
);
387 if (er
== EMULATE_DONE
)
390 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
396 static int kvm_trap_emul_handle_break(struct kvm_vcpu
*vcpu
)
398 struct kvm_run
*run
= vcpu
->run
;
399 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
400 u32 cause
= vcpu
->arch
.host_cp0_cause
;
401 enum emulation_result er
= EMULATE_DONE
;
402 int ret
= RESUME_GUEST
;
404 er
= kvm_mips_emulate_bp_exc(cause
, opc
, run
, vcpu
);
405 if (er
== EMULATE_DONE
)
408 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
414 static int kvm_trap_emul_handle_trap(struct kvm_vcpu
*vcpu
)
416 struct kvm_run
*run
= vcpu
->run
;
417 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
418 u32 cause
= vcpu
->arch
.host_cp0_cause
;
419 enum emulation_result er
= EMULATE_DONE
;
420 int ret
= RESUME_GUEST
;
422 er
= kvm_mips_emulate_trap_exc(cause
, opc
, run
, vcpu
);
423 if (er
== EMULATE_DONE
) {
426 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
432 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu
*vcpu
)
434 struct kvm_run
*run
= vcpu
->run
;
435 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
436 u32 cause
= vcpu
->arch
.host_cp0_cause
;
437 enum emulation_result er
= EMULATE_DONE
;
438 int ret
= RESUME_GUEST
;
440 er
= kvm_mips_emulate_msafpe_exc(cause
, opc
, run
, vcpu
);
441 if (er
== EMULATE_DONE
) {
444 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
450 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu
*vcpu
)
452 struct kvm_run
*run
= vcpu
->run
;
453 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
454 u32 cause
= vcpu
->arch
.host_cp0_cause
;
455 enum emulation_result er
= EMULATE_DONE
;
456 int ret
= RESUME_GUEST
;
458 er
= kvm_mips_emulate_fpe_exc(cause
, opc
, run
, vcpu
);
459 if (er
== EMULATE_DONE
) {
462 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
469 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
470 * @vcpu: Virtual CPU context.
472 * Handle when the guest attempts to use MSA when it is disabled.
474 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu
*vcpu
)
476 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
477 struct kvm_run
*run
= vcpu
->run
;
478 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
479 u32 cause
= vcpu
->arch
.host_cp0_cause
;
480 enum emulation_result er
= EMULATE_DONE
;
481 int ret
= RESUME_GUEST
;
483 if (!kvm_mips_guest_has_msa(&vcpu
->arch
) ||
484 (kvm_read_c0_guest_status(cop0
) & (ST0_CU1
| ST0_FR
)) == ST0_CU1
) {
486 * No MSA in guest, or FPU enabled and not in FR=1 mode,
487 * guest reserved instruction exception
489 er
= kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
490 } else if (!(kvm_read_c0_guest_config5(cop0
) & MIPS_CONF5_MSAEN
)) {
491 /* MSA disabled by guest, guest MSA disabled exception */
492 er
= kvm_mips_emulate_msadis_exc(cause
, opc
, run
, vcpu
);
494 /* Restore MSA/FPU state */
505 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
515 static int kvm_trap_emul_hardware_enable(void)
520 static void kvm_trap_emul_hardware_disable(void)
524 static int kvm_trap_emul_check_extension(struct kvm
*kvm
, long ext
)
529 case KVM_CAP_MIPS_TE
:
540 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu
*vcpu
)
542 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
543 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
546 * Allocate GVA -> HPA page tables.
547 * MIPS doesn't use the mm_struct pointer argument.
549 kern_mm
->pgd
= pgd_alloc(kern_mm
);
553 user_mm
->pgd
= pgd_alloc(user_mm
);
555 pgd_free(kern_mm
, kern_mm
->pgd
);
562 static void kvm_mips_emul_free_gva_pt(pgd_t
*pgd
)
564 /* Don't free host kernel page tables copied from init_mm.pgd */
565 const unsigned long end
= 0x80000000;
566 unsigned long pgd_va
, pud_va
, pmd_va
;
572 for (i
= 0; i
< USER_PTRS_PER_PGD
; i
++) {
573 if (pgd_none(pgd
[i
]))
576 pgd_va
= (unsigned long)i
<< PGDIR_SHIFT
;
579 pud
= pud_offset(pgd
+ i
, 0);
580 for (j
= 0; j
< PTRS_PER_PUD
; j
++) {
581 if (pud_none(pud
[j
]))
584 pud_va
= pgd_va
| ((unsigned long)j
<< PUD_SHIFT
);
587 pmd
= pmd_offset(pud
+ j
, 0);
588 for (k
= 0; k
< PTRS_PER_PMD
; k
++) {
589 if (pmd_none(pmd
[k
]))
592 pmd_va
= pud_va
| (k
<< PMD_SHIFT
);
595 pte
= pte_offset(pmd
+ k
, 0);
596 pte_free_kernel(NULL
, pte
);
605 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu
*vcpu
)
607 kvm_mips_emul_free_gva_pt(vcpu
->arch
.guest_kernel_mm
.pgd
);
608 kvm_mips_emul_free_gva_pt(vcpu
->arch
.guest_user_mm
.pgd
);
611 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu
*vcpu
)
613 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
615 int vcpu_id
= vcpu
->vcpu_id
;
617 /* Start off the timer at 100 MHz */
618 kvm_mips_init_count(vcpu
, 100*1000*1000);
621 * Arch specific stuff, set up config registers properly so that the
622 * guest will come up as expected
624 #ifndef CONFIG_CPU_MIPSR6
625 /* r2-r5, simulate a MIPS 24kc */
626 kvm_write_c0_guest_prid(cop0
, 0x00019300);
628 /* r6+, simulate a generic QEMU machine */
629 kvm_write_c0_guest_prid(cop0
, 0x00010000);
632 * Have config1, Cacheable, noncoherent, write-back, write allocate.
633 * Endianness, arch revision & virtually tagged icache should match
636 config
= read_c0_config() & MIPS_CONF_AR
;
637 config
|= MIPS_CONF_M
| CONF_CM_CACHABLE_NONCOHERENT
| MIPS_CONF_MT_TLB
;
638 #ifdef CONFIG_CPU_BIG_ENDIAN
641 if (cpu_has_vtag_icache
)
642 config
|= MIPS_CONF_VI
;
643 kvm_write_c0_guest_config(cop0
, config
);
645 /* Read the cache characteristics from the host Config1 Register */
646 config1
= (read_c0_config1() & ~0x7f);
648 /* DCache line size not correctly reported in Config1 on Octeon CPUs */
649 if (cpu_dcache_line_size()) {
650 config1
&= ~MIPS_CONF1_DL
;
651 config1
|= ((ilog2(cpu_dcache_line_size()) - 1) <<
652 MIPS_CONF1_DL_SHF
) & MIPS_CONF1_DL
;
655 /* Set up MMU size */
656 config1
&= ~(0x3f << 25);
657 config1
|= ((KVM_MIPS_GUEST_TLB_SIZE
- 1) << 25);
659 /* We unset some bits that we aren't emulating */
660 config1
&= ~(MIPS_CONF1_C2
| MIPS_CONF1_MD
| MIPS_CONF1_PC
|
661 MIPS_CONF1_WR
| MIPS_CONF1_CA
);
662 kvm_write_c0_guest_config1(cop0
, config1
);
664 /* Have config3, no tertiary/secondary caches implemented */
665 kvm_write_c0_guest_config2(cop0
, MIPS_CONF_M
);
666 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
668 /* Have config4, UserLocal */
669 kvm_write_c0_guest_config3(cop0
, MIPS_CONF_M
| MIPS_CONF3_ULRI
);
672 kvm_write_c0_guest_config4(cop0
, MIPS_CONF_M
);
675 kvm_write_c0_guest_config5(cop0
, 0);
677 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
678 kvm_write_c0_guest_config7(cop0
, (MIPS_CONF7_WII
) | (1 << 10));
681 kvm_write_c0_guest_status(cop0
, ST0_BEV
| ST0_ERL
);
684 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
686 kvm_write_c0_guest_intctl(cop0
, 0xFC000000);
688 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
689 kvm_write_c0_guest_ebase(cop0
, KVM_GUEST_KSEG0
|
690 (vcpu_id
& MIPS_EBASE_CPUNUM
));
692 /* Put PC at guest reset vector */
693 vcpu
->arch
.pc
= KVM_GUEST_CKSEG1ADDR(0x1fc00000);
698 static void kvm_trap_emul_flush_shadow_all(struct kvm
*kvm
)
700 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
701 kvm_flush_remote_tlbs(kvm
);
704 static void kvm_trap_emul_flush_shadow_memslot(struct kvm
*kvm
,
705 const struct kvm_memory_slot
*slot
)
707 kvm_trap_emul_flush_shadow_all(kvm
);
710 static u64 kvm_trap_emul_get_one_regs
[] = {
711 KVM_REG_MIPS_CP0_INDEX
,
712 KVM_REG_MIPS_CP0_ENTRYLO0
,
713 KVM_REG_MIPS_CP0_ENTRYLO1
,
714 KVM_REG_MIPS_CP0_CONTEXT
,
715 KVM_REG_MIPS_CP0_USERLOCAL
,
716 KVM_REG_MIPS_CP0_PAGEMASK
,
717 KVM_REG_MIPS_CP0_WIRED
,
718 KVM_REG_MIPS_CP0_HWRENA
,
719 KVM_REG_MIPS_CP0_BADVADDR
,
720 KVM_REG_MIPS_CP0_COUNT
,
721 KVM_REG_MIPS_CP0_ENTRYHI
,
722 KVM_REG_MIPS_CP0_COMPARE
,
723 KVM_REG_MIPS_CP0_STATUS
,
724 KVM_REG_MIPS_CP0_INTCTL
,
725 KVM_REG_MIPS_CP0_CAUSE
,
726 KVM_REG_MIPS_CP0_EPC
,
727 KVM_REG_MIPS_CP0_PRID
,
728 KVM_REG_MIPS_CP0_EBASE
,
729 KVM_REG_MIPS_CP0_CONFIG
,
730 KVM_REG_MIPS_CP0_CONFIG1
,
731 KVM_REG_MIPS_CP0_CONFIG2
,
732 KVM_REG_MIPS_CP0_CONFIG3
,
733 KVM_REG_MIPS_CP0_CONFIG4
,
734 KVM_REG_MIPS_CP0_CONFIG5
,
735 KVM_REG_MIPS_CP0_CONFIG7
,
736 KVM_REG_MIPS_CP0_ERROREPC
,
737 KVM_REG_MIPS_CP0_KSCRATCH1
,
738 KVM_REG_MIPS_CP0_KSCRATCH2
,
739 KVM_REG_MIPS_CP0_KSCRATCH3
,
740 KVM_REG_MIPS_CP0_KSCRATCH4
,
741 KVM_REG_MIPS_CP0_KSCRATCH5
,
742 KVM_REG_MIPS_CP0_KSCRATCH6
,
744 KVM_REG_MIPS_COUNT_CTL
,
745 KVM_REG_MIPS_COUNT_RESUME
,
746 KVM_REG_MIPS_COUNT_HZ
,
749 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu
*vcpu
)
751 return ARRAY_SIZE(kvm_trap_emul_get_one_regs
);
754 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu
*vcpu
,
757 if (copy_to_user(indices
, kvm_trap_emul_get_one_regs
,
758 sizeof(kvm_trap_emul_get_one_regs
)))
760 indices
+= ARRAY_SIZE(kvm_trap_emul_get_one_regs
);
765 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu
*vcpu
,
766 const struct kvm_one_reg
*reg
,
769 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
772 case KVM_REG_MIPS_CP0_INDEX
:
773 *v
= (long)kvm_read_c0_guest_index(cop0
);
775 case KVM_REG_MIPS_CP0_ENTRYLO0
:
776 *v
= kvm_read_c0_guest_entrylo0(cop0
);
778 case KVM_REG_MIPS_CP0_ENTRYLO1
:
779 *v
= kvm_read_c0_guest_entrylo1(cop0
);
781 case KVM_REG_MIPS_CP0_CONTEXT
:
782 *v
= (long)kvm_read_c0_guest_context(cop0
);
784 case KVM_REG_MIPS_CP0_USERLOCAL
:
785 *v
= (long)kvm_read_c0_guest_userlocal(cop0
);
787 case KVM_REG_MIPS_CP0_PAGEMASK
:
788 *v
= (long)kvm_read_c0_guest_pagemask(cop0
);
790 case KVM_REG_MIPS_CP0_WIRED
:
791 *v
= (long)kvm_read_c0_guest_wired(cop0
);
793 case KVM_REG_MIPS_CP0_HWRENA
:
794 *v
= (long)kvm_read_c0_guest_hwrena(cop0
);
796 case KVM_REG_MIPS_CP0_BADVADDR
:
797 *v
= (long)kvm_read_c0_guest_badvaddr(cop0
);
799 case KVM_REG_MIPS_CP0_ENTRYHI
:
800 *v
= (long)kvm_read_c0_guest_entryhi(cop0
);
802 case KVM_REG_MIPS_CP0_COMPARE
:
803 *v
= (long)kvm_read_c0_guest_compare(cop0
);
805 case KVM_REG_MIPS_CP0_STATUS
:
806 *v
= (long)kvm_read_c0_guest_status(cop0
);
808 case KVM_REG_MIPS_CP0_INTCTL
:
809 *v
= (long)kvm_read_c0_guest_intctl(cop0
);
811 case KVM_REG_MIPS_CP0_CAUSE
:
812 *v
= (long)kvm_read_c0_guest_cause(cop0
);
814 case KVM_REG_MIPS_CP0_EPC
:
815 *v
= (long)kvm_read_c0_guest_epc(cop0
);
817 case KVM_REG_MIPS_CP0_PRID
:
818 *v
= (long)kvm_read_c0_guest_prid(cop0
);
820 case KVM_REG_MIPS_CP0_EBASE
:
821 *v
= (long)kvm_read_c0_guest_ebase(cop0
);
823 case KVM_REG_MIPS_CP0_CONFIG
:
824 *v
= (long)kvm_read_c0_guest_config(cop0
);
826 case KVM_REG_MIPS_CP0_CONFIG1
:
827 *v
= (long)kvm_read_c0_guest_config1(cop0
);
829 case KVM_REG_MIPS_CP0_CONFIG2
:
830 *v
= (long)kvm_read_c0_guest_config2(cop0
);
832 case KVM_REG_MIPS_CP0_CONFIG3
:
833 *v
= (long)kvm_read_c0_guest_config3(cop0
);
835 case KVM_REG_MIPS_CP0_CONFIG4
:
836 *v
= (long)kvm_read_c0_guest_config4(cop0
);
838 case KVM_REG_MIPS_CP0_CONFIG5
:
839 *v
= (long)kvm_read_c0_guest_config5(cop0
);
841 case KVM_REG_MIPS_CP0_CONFIG7
:
842 *v
= (long)kvm_read_c0_guest_config7(cop0
);
844 case KVM_REG_MIPS_CP0_COUNT
:
845 *v
= kvm_mips_read_count(vcpu
);
847 case KVM_REG_MIPS_COUNT_CTL
:
848 *v
= vcpu
->arch
.count_ctl
;
850 case KVM_REG_MIPS_COUNT_RESUME
:
851 *v
= ktime_to_ns(vcpu
->arch
.count_resume
);
853 case KVM_REG_MIPS_COUNT_HZ
:
854 *v
= vcpu
->arch
.count_hz
;
856 case KVM_REG_MIPS_CP0_ERROREPC
:
857 *v
= (long)kvm_read_c0_guest_errorepc(cop0
);
859 case KVM_REG_MIPS_CP0_KSCRATCH1
:
860 *v
= (long)kvm_read_c0_guest_kscratch1(cop0
);
862 case KVM_REG_MIPS_CP0_KSCRATCH2
:
863 *v
= (long)kvm_read_c0_guest_kscratch2(cop0
);
865 case KVM_REG_MIPS_CP0_KSCRATCH3
:
866 *v
= (long)kvm_read_c0_guest_kscratch3(cop0
);
868 case KVM_REG_MIPS_CP0_KSCRATCH4
:
869 *v
= (long)kvm_read_c0_guest_kscratch4(cop0
);
871 case KVM_REG_MIPS_CP0_KSCRATCH5
:
872 *v
= (long)kvm_read_c0_guest_kscratch5(cop0
);
874 case KVM_REG_MIPS_CP0_KSCRATCH6
:
875 *v
= (long)kvm_read_c0_guest_kscratch6(cop0
);
883 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu
*vcpu
,
884 const struct kvm_one_reg
*reg
,
887 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
889 unsigned int cur
, change
;
892 case KVM_REG_MIPS_CP0_INDEX
:
893 kvm_write_c0_guest_index(cop0
, v
);
895 case KVM_REG_MIPS_CP0_ENTRYLO0
:
896 kvm_write_c0_guest_entrylo0(cop0
, v
);
898 case KVM_REG_MIPS_CP0_ENTRYLO1
:
899 kvm_write_c0_guest_entrylo1(cop0
, v
);
901 case KVM_REG_MIPS_CP0_CONTEXT
:
902 kvm_write_c0_guest_context(cop0
, v
);
904 case KVM_REG_MIPS_CP0_USERLOCAL
:
905 kvm_write_c0_guest_userlocal(cop0
, v
);
907 case KVM_REG_MIPS_CP0_PAGEMASK
:
908 kvm_write_c0_guest_pagemask(cop0
, v
);
910 case KVM_REG_MIPS_CP0_WIRED
:
911 kvm_write_c0_guest_wired(cop0
, v
);
913 case KVM_REG_MIPS_CP0_HWRENA
:
914 kvm_write_c0_guest_hwrena(cop0
, v
);
916 case KVM_REG_MIPS_CP0_BADVADDR
:
917 kvm_write_c0_guest_badvaddr(cop0
, v
);
919 case KVM_REG_MIPS_CP0_ENTRYHI
:
920 kvm_write_c0_guest_entryhi(cop0
, v
);
922 case KVM_REG_MIPS_CP0_STATUS
:
923 kvm_write_c0_guest_status(cop0
, v
);
925 case KVM_REG_MIPS_CP0_INTCTL
:
926 /* No VInt, so no VS, read-only for now */
928 case KVM_REG_MIPS_CP0_EPC
:
929 kvm_write_c0_guest_epc(cop0
, v
);
931 case KVM_REG_MIPS_CP0_PRID
:
932 kvm_write_c0_guest_prid(cop0
, v
);
934 case KVM_REG_MIPS_CP0_EBASE
:
936 * Allow core number to be written, but the exception base must
937 * remain in guest KSeg0.
939 kvm_change_c0_guest_ebase(cop0
, 0x1ffff000 | MIPS_EBASE_CPUNUM
,
942 case KVM_REG_MIPS_CP0_COUNT
:
943 kvm_mips_write_count(vcpu
, v
);
945 case KVM_REG_MIPS_CP0_COMPARE
:
946 kvm_mips_write_compare(vcpu
, v
, false);
948 case KVM_REG_MIPS_CP0_CAUSE
:
950 * If the timer is stopped or started (DC bit) it must look
951 * atomic with changes to the interrupt pending bits (TI, IRQ5).
952 * A timer interrupt should not happen in between.
954 if ((kvm_read_c0_guest_cause(cop0
) ^ v
) & CAUSEF_DC
) {
956 /* disable timer first */
957 kvm_mips_count_disable_cause(vcpu
);
958 kvm_change_c0_guest_cause(cop0
, (u32
)~CAUSEF_DC
,
961 /* enable timer last */
962 kvm_change_c0_guest_cause(cop0
, (u32
)~CAUSEF_DC
,
964 kvm_mips_count_enable_cause(vcpu
);
967 kvm_write_c0_guest_cause(cop0
, v
);
970 case KVM_REG_MIPS_CP0_CONFIG
:
971 /* read-only for now */
973 case KVM_REG_MIPS_CP0_CONFIG1
:
974 cur
= kvm_read_c0_guest_config1(cop0
);
975 change
= (cur
^ v
) & kvm_mips_config1_wrmask(vcpu
);
978 kvm_write_c0_guest_config1(cop0
, v
);
981 case KVM_REG_MIPS_CP0_CONFIG2
:
982 /* read-only for now */
984 case KVM_REG_MIPS_CP0_CONFIG3
:
985 cur
= kvm_read_c0_guest_config3(cop0
);
986 change
= (cur
^ v
) & kvm_mips_config3_wrmask(vcpu
);
989 kvm_write_c0_guest_config3(cop0
, v
);
992 case KVM_REG_MIPS_CP0_CONFIG4
:
993 cur
= kvm_read_c0_guest_config4(cop0
);
994 change
= (cur
^ v
) & kvm_mips_config4_wrmask(vcpu
);
997 kvm_write_c0_guest_config4(cop0
, v
);
1000 case KVM_REG_MIPS_CP0_CONFIG5
:
1001 cur
= kvm_read_c0_guest_config5(cop0
);
1002 change
= (cur
^ v
) & kvm_mips_config5_wrmask(vcpu
);
1005 kvm_write_c0_guest_config5(cop0
, v
);
1008 case KVM_REG_MIPS_CP0_CONFIG7
:
1009 /* writes ignored */
1011 case KVM_REG_MIPS_COUNT_CTL
:
1012 ret
= kvm_mips_set_count_ctl(vcpu
, v
);
1014 case KVM_REG_MIPS_COUNT_RESUME
:
1015 ret
= kvm_mips_set_count_resume(vcpu
, v
);
1017 case KVM_REG_MIPS_COUNT_HZ
:
1018 ret
= kvm_mips_set_count_hz(vcpu
, v
);
1020 case KVM_REG_MIPS_CP0_ERROREPC
:
1021 kvm_write_c0_guest_errorepc(cop0
, v
);
1023 case KVM_REG_MIPS_CP0_KSCRATCH1
:
1024 kvm_write_c0_guest_kscratch1(cop0
, v
);
1026 case KVM_REG_MIPS_CP0_KSCRATCH2
:
1027 kvm_write_c0_guest_kscratch2(cop0
, v
);
1029 case KVM_REG_MIPS_CP0_KSCRATCH3
:
1030 kvm_write_c0_guest_kscratch3(cop0
, v
);
1032 case KVM_REG_MIPS_CP0_KSCRATCH4
:
1033 kvm_write_c0_guest_kscratch4(cop0
, v
);
1035 case KVM_REG_MIPS_CP0_KSCRATCH5
:
1036 kvm_write_c0_guest_kscratch5(cop0
, v
);
1038 case KVM_REG_MIPS_CP0_KSCRATCH6
:
1039 kvm_write_c0_guest_kscratch6(cop0
, v
);
1047 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1049 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1050 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1051 struct mm_struct
*mm
;
1054 * Were we in guest context? If so, restore the appropriate ASID based
1055 * on the mode of the Guest (Kernel/User).
1057 if (current
->flags
& PF_VCPU
) {
1058 mm
= KVM_GUEST_KERNEL_MODE(vcpu
) ? kern_mm
: user_mm
;
1059 if ((cpu_context(cpu
, mm
) ^ asid_cache(cpu
)) &
1060 asid_version_mask(cpu
))
1061 get_new_mmu_context(mm
, cpu
);
1062 write_c0_entryhi(cpu_asid(cpu
, mm
));
1063 TLBMISS_HANDLER_SETUP_PGD(mm
->pgd
);
1064 kvm_mips_suspend_mm(cpu
);
1071 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu
*vcpu
, int cpu
)
1075 if (current
->flags
& PF_VCPU
) {
1076 /* Restore normal Linux process memory map */
1077 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
1078 asid_version_mask(cpu
)))
1079 get_new_mmu_context(current
->mm
, cpu
);
1080 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
1081 TLBMISS_HANDLER_SETUP_PGD(current
->mm
->pgd
);
1082 kvm_mips_resume_mm(cpu
);
1089 static void kvm_trap_emul_check_requests(struct kvm_vcpu
*vcpu
, int cpu
,
1092 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1093 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1094 struct mm_struct
*mm
;
1097 if (likely(!kvm_request_pending(vcpu
)))
1100 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1102 * Both kernel & user GVA mappings must be invalidated. The
1103 * caller is just about to check whether the ASID is stale
1104 * anyway so no need to reload it here.
1106 kvm_mips_flush_gva_pt(kern_mm
->pgd
, KMF_GPA
| KMF_KERN
);
1107 kvm_mips_flush_gva_pt(user_mm
->pgd
, KMF_GPA
| KMF_USER
);
1108 for_each_possible_cpu(i
) {
1109 cpu_context(i
, kern_mm
) = 0;
1110 cpu_context(i
, user_mm
) = 0;
1113 /* Generate new ASID for current mode */
1115 mm
= KVM_GUEST_KERNEL_MODE(vcpu
) ? kern_mm
: user_mm
;
1116 get_new_mmu_context(mm
, cpu
);
1118 write_c0_entryhi(cpu_asid(cpu
, mm
));
1119 TLBMISS_HANDLER_SETUP_PGD(mm
->pgd
);
1126 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1127 * @vcpu: VCPU pointer.
1129 * Call before a GVA space access outside of guest mode, to ensure that
1130 * asynchronous TLB flush requests are handled or delayed until completion of
1131 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1133 * Should be called with IRQs already enabled.
1135 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu
*vcpu
)
1137 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1138 WARN_ON_ONCE(irqs_disabled());
1141 * The caller is about to access the GVA space, so we set the mode to
1142 * force TLB flush requests to send an IPI, and also disable IRQs to
1143 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1145 local_irq_disable();
1148 * Make sure the read of VCPU requests is not reordered ahead of the
1149 * write to vcpu->mode, or we could miss a TLB flush request while
1150 * the requester sees the VCPU as outside of guest mode and not needing
1153 smp_store_mb(vcpu
->mode
, READING_SHADOW_PAGE_TABLES
);
1156 * If a TLB flush has been requested (potentially while
1157 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1158 * before accessing the GVA space, and be sure to reload the ASID if
1159 * necessary as it'll be immediately used.
1161 * TLB flush requests after this check will trigger an IPI due to the
1162 * mode change above, which will be delayed due to IRQs disabled.
1164 kvm_trap_emul_check_requests(vcpu
, smp_processor_id(), true);
1168 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1169 * @vcpu: VCPU pointer.
1171 * Called after a GVA space access outside of guest mode. Should have a matching
1172 * call to kvm_trap_emul_gva_lockless_begin().
1174 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu
*vcpu
)
1177 * Make sure the write to vcpu->mode is not reordered in front of GVA
1178 * accesses, or a TLB flush requester may not think it necessary to send
1181 smp_store_release(&vcpu
->mode
, OUTSIDE_GUEST_MODE
);
1184 * Now that the access to GVA space is complete, its safe for pending
1185 * TLB flush request IPIs to be handled (which indicates completion).
1190 static void kvm_trap_emul_vcpu_reenter(struct kvm_run
*run
,
1191 struct kvm_vcpu
*vcpu
)
1193 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1194 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1195 struct mm_struct
*mm
;
1196 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1197 int i
, cpu
= smp_processor_id();
1201 * No need to reload ASID, IRQs are disabled already so there's no rush,
1202 * and we'll check if we need to regenerate below anyway before
1203 * re-entering the guest.
1205 kvm_trap_emul_check_requests(vcpu
, cpu
, false);
1207 if (KVM_GUEST_KERNEL_MODE(vcpu
)) {
1213 * Lazy host ASID regeneration / PT flush for guest user mode.
1214 * If the guest ASID has changed since the last guest usermode
1215 * execution, invalidate the stale TLB entries and flush GVA PT
1218 gasid
= kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
;
1219 if (gasid
!= vcpu
->arch
.last_user_gasid
) {
1220 kvm_mips_flush_gva_pt(user_mm
->pgd
, KMF_USER
);
1221 for_each_possible_cpu(i
)
1222 cpu_context(i
, user_mm
) = 0;
1223 vcpu
->arch
.last_user_gasid
= gasid
;
1228 * Check if ASID is stale. This may happen due to a TLB flush request or
1229 * a lazy user MM invalidation.
1231 if ((cpu_context(cpu
, mm
) ^ asid_cache(cpu
)) &
1232 asid_version_mask(cpu
))
1233 get_new_mmu_context(mm
, cpu
);
1236 static int kvm_trap_emul_vcpu_run(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1238 int cpu
= smp_processor_id();
1241 /* Check if we have any exceptions/interrupts pending */
1242 kvm_mips_deliver_interrupts(vcpu
,
1243 kvm_read_c0_guest_cause(vcpu
->arch
.cop0
));
1245 kvm_trap_emul_vcpu_reenter(run
, vcpu
);
1248 * We use user accessors to access guest memory, but we don't want to
1249 * invoke Linux page faulting.
1251 pagefault_disable();
1253 /* Disable hardware page table walking while in guest */
1257 * While in guest context we're in the guest's address space, not the
1258 * host process address space, so we need to be careful not to confuse
1259 * e.g. cache management IPIs.
1261 kvm_mips_suspend_mm(cpu
);
1263 r
= vcpu
->arch
.vcpu_run(run
, vcpu
);
1265 /* We may have migrated while handling guest exits */
1266 cpu
= smp_processor_id();
1268 /* Restore normal Linux process memory map */
1269 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
1270 asid_version_mask(cpu
)))
1271 get_new_mmu_context(current
->mm
, cpu
);
1272 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
1273 TLBMISS_HANDLER_SETUP_PGD(current
->mm
->pgd
);
1274 kvm_mips_resume_mm(cpu
);
1283 static struct kvm_mips_callbacks kvm_trap_emul_callbacks
= {
1285 .handle_cop_unusable
= kvm_trap_emul_handle_cop_unusable
,
1286 .handle_tlb_mod
= kvm_trap_emul_handle_tlb_mod
,
1287 .handle_tlb_st_miss
= kvm_trap_emul_handle_tlb_st_miss
,
1288 .handle_tlb_ld_miss
= kvm_trap_emul_handle_tlb_ld_miss
,
1289 .handle_addr_err_st
= kvm_trap_emul_handle_addr_err_st
,
1290 .handle_addr_err_ld
= kvm_trap_emul_handle_addr_err_ld
,
1291 .handle_syscall
= kvm_trap_emul_handle_syscall
,
1292 .handle_res_inst
= kvm_trap_emul_handle_res_inst
,
1293 .handle_break
= kvm_trap_emul_handle_break
,
1294 .handle_trap
= kvm_trap_emul_handle_trap
,
1295 .handle_msa_fpe
= kvm_trap_emul_handle_msa_fpe
,
1296 .handle_fpe
= kvm_trap_emul_handle_fpe
,
1297 .handle_msa_disabled
= kvm_trap_emul_handle_msa_disabled
,
1298 .handle_guest_exit
= kvm_trap_emul_no_handler
,
1300 .hardware_enable
= kvm_trap_emul_hardware_enable
,
1301 .hardware_disable
= kvm_trap_emul_hardware_disable
,
1302 .check_extension
= kvm_trap_emul_check_extension
,
1303 .vcpu_init
= kvm_trap_emul_vcpu_init
,
1304 .vcpu_uninit
= kvm_trap_emul_vcpu_uninit
,
1305 .vcpu_setup
= kvm_trap_emul_vcpu_setup
,
1306 .flush_shadow_all
= kvm_trap_emul_flush_shadow_all
,
1307 .flush_shadow_memslot
= kvm_trap_emul_flush_shadow_memslot
,
1308 .gva_to_gpa
= kvm_trap_emul_gva_to_gpa_cb
,
1309 .queue_timer_int
= kvm_mips_queue_timer_int_cb
,
1310 .dequeue_timer_int
= kvm_mips_dequeue_timer_int_cb
,
1311 .queue_io_int
= kvm_mips_queue_io_int_cb
,
1312 .dequeue_io_int
= kvm_mips_dequeue_io_int_cb
,
1313 .irq_deliver
= kvm_mips_irq_deliver_cb
,
1314 .irq_clear
= kvm_mips_irq_clear_cb
,
1315 .num_regs
= kvm_trap_emul_num_regs
,
1316 .copy_reg_indices
= kvm_trap_emul_copy_reg_indices
,
1317 .get_one_reg
= kvm_trap_emul_get_one_reg
,
1318 .set_one_reg
= kvm_trap_emul_set_one_reg
,
1319 .vcpu_load
= kvm_trap_emul_vcpu_load
,
1320 .vcpu_put
= kvm_trap_emul_vcpu_put
,
1321 .vcpu_run
= kvm_trap_emul_vcpu_run
,
1322 .vcpu_reenter
= kvm_trap_emul_vcpu_reenter
,
1325 int kvm_mips_emulation_init(struct kvm_mips_callbacks
**install_callbacks
)
1327 *install_callbacks
= &kvm_trap_emul_callbacks
;