Linux 5.7.6
[linux/fpc-iii.git] / arch / mips / kvm / trap_emul.c
blob5a11e83dffe6754d5cd14bb2391f09df78c33e58
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/log2.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgalloc.h>
21 #include "interrupt.h"
23 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
25 gpa_t gpa;
26 gva_t kseg = KSEGX(gva);
27 gva_t gkseg = KVM_GUEST_KSEGX(gva);
29 if ((kseg == CKSEG0) || (kseg == CKSEG1))
30 gpa = CPHYSADDR(gva);
31 else if (gkseg == KVM_GUEST_KSEG0)
32 gpa = KVM_GUEST_CPHYSADDR(gva);
33 else {
34 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
35 kvm_mips_dump_host_tlbs();
36 gpa = KVM_INVALID_ADDR;
39 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
41 return gpa;
44 static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
46 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
47 u32 cause = vcpu->arch.host_cp0_cause;
48 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
49 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
50 u32 inst = 0;
53 * Fetch the instruction.
55 if (cause & CAUSEF_BD)
56 opc += 1;
57 kvm_get_badinstr(opc, vcpu, &inst);
59 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
60 exccode, opc, inst, badvaddr,
61 kvm_read_c0_guest_status(vcpu->arch.cop0));
62 kvm_arch_vcpu_dump_regs(vcpu);
63 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
64 return RESUME_HOST;
67 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
69 struct mips_coproc *cop0 = vcpu->arch.cop0;
70 struct kvm_run *run = vcpu->run;
71 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
72 u32 cause = vcpu->arch.host_cp0_cause;
73 enum emulation_result er = EMULATE_DONE;
74 int ret = RESUME_GUEST;
76 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
77 /* FPU Unusable */
78 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
79 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
81 * Unusable/no FPU in guest:
82 * deliver guest COP1 Unusable Exception
84 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
85 } else {
86 /* Restore FPU state */
87 kvm_own_fpu(vcpu);
88 er = EMULATE_DONE;
90 } else {
91 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
94 switch (er) {
95 case EMULATE_DONE:
96 ret = RESUME_GUEST;
97 break;
99 case EMULATE_FAIL:
100 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
101 ret = RESUME_HOST;
102 break;
104 case EMULATE_WAIT:
105 run->exit_reason = KVM_EXIT_INTR;
106 ret = RESUME_HOST;
107 break;
109 case EMULATE_HYPERCALL:
110 ret = kvm_mips_handle_hypcall(vcpu);
111 break;
113 default:
114 BUG();
116 return ret;
119 static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
120 struct kvm_vcpu *vcpu)
122 enum emulation_result er;
123 union mips_instruction inst;
124 int err;
126 /* A code fetch fault doesn't count as an MMIO */
127 if (kvm_is_ifetch_fault(&vcpu->arch)) {
128 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
129 return RESUME_HOST;
132 /* Fetch the instruction. */
133 if (cause & CAUSEF_BD)
134 opc += 1;
135 err = kvm_get_badinstr(opc, vcpu, &inst.word);
136 if (err) {
137 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
138 return RESUME_HOST;
141 /* Emulate the load */
142 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
143 if (er == EMULATE_FAIL) {
144 kvm_err("Emulate load from MMIO space failed\n");
145 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
146 } else {
147 run->exit_reason = KVM_EXIT_MMIO;
149 return RESUME_HOST;
152 static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
153 struct kvm_vcpu *vcpu)
155 enum emulation_result er;
156 union mips_instruction inst;
157 int err;
159 /* Fetch the instruction. */
160 if (cause & CAUSEF_BD)
161 opc += 1;
162 err = kvm_get_badinstr(opc, vcpu, &inst.word);
163 if (err) {
164 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
165 return RESUME_HOST;
168 /* Emulate the store */
169 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
170 if (er == EMULATE_FAIL) {
171 kvm_err("Emulate store to MMIO space failed\n");
172 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
173 } else {
174 run->exit_reason = KVM_EXIT_MMIO;
176 return RESUME_HOST;
179 static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
180 struct kvm_vcpu *vcpu, bool store)
182 if (store)
183 return kvm_mips_bad_store(cause, opc, run, vcpu);
184 else
185 return kvm_mips_bad_load(cause, opc, run, vcpu);
188 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
190 struct mips_coproc *cop0 = vcpu->arch.cop0;
191 struct kvm_run *run = vcpu->run;
192 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
193 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
194 u32 cause = vcpu->arch.host_cp0_cause;
195 struct kvm_mips_tlb *tlb;
196 unsigned long entryhi;
197 int index;
199 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
200 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
202 * First find the mapping in the guest TLB. If the failure to
203 * write was due to the guest TLB, it should be up to the guest
204 * to handle it.
206 entryhi = (badvaddr & VPN2_MASK) |
207 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
208 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
211 * These should never happen.
212 * They would indicate stale host TLB entries.
214 if (unlikely(index < 0)) {
215 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
216 return RESUME_HOST;
218 tlb = vcpu->arch.guest_tlb + index;
219 if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
220 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
221 return RESUME_HOST;
225 * Guest entry not dirty? That would explain the TLB modified
226 * exception. Relay that on to the guest so it can handle it.
228 if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
229 kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
230 return RESUME_GUEST;
233 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
234 true))
235 /* Not writable, needs handling as MMIO */
236 return kvm_mips_bad_store(cause, opc, run, vcpu);
237 return RESUME_GUEST;
238 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
239 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
240 /* Not writable, needs handling as MMIO */
241 return kvm_mips_bad_store(cause, opc, run, vcpu);
242 return RESUME_GUEST;
243 } else {
244 /* host kernel addresses are all handled as MMIO */
245 return kvm_mips_bad_store(cause, opc, run, vcpu);
249 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
251 struct kvm_run *run = vcpu->run;
252 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
253 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
254 u32 cause = vcpu->arch.host_cp0_cause;
255 enum emulation_result er = EMULATE_DONE;
256 int ret = RESUME_GUEST;
258 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
259 && KVM_GUEST_KERNEL_MODE(vcpu)) {
260 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
261 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
262 ret = RESUME_HOST;
264 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
265 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
266 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
267 store ? "ST" : "LD", cause, opc, badvaddr);
270 * User Address (UA) fault, this could happen if
271 * (1) TLB entry not present/valid in both Guest and shadow host
272 * TLBs, in this case we pass on the fault to the guest
273 * kernel and let it handle it.
274 * (2) TLB entry is present in the Guest TLB but not in the
275 * shadow, in this case we inject the TLB from the Guest TLB
276 * into the shadow host TLB
279 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
280 if (er == EMULATE_DONE)
281 ret = RESUME_GUEST;
282 else {
283 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
284 ret = RESUME_HOST;
286 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
288 * All KSEG0 faults are handled by KVM, as the guest kernel does
289 * not expect to ever get them
291 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
292 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
293 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
294 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
296 * With EVA we may get a TLB exception instead of an address
297 * error when the guest performs MMIO to KSeg1 addresses.
299 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
300 } else {
301 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
302 store ? "ST" : "LD", cause, opc, badvaddr);
303 kvm_mips_dump_host_tlbs();
304 kvm_arch_vcpu_dump_regs(vcpu);
305 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
306 ret = RESUME_HOST;
308 return ret;
311 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
313 return kvm_trap_emul_handle_tlb_miss(vcpu, true);
316 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
318 return kvm_trap_emul_handle_tlb_miss(vcpu, false);
321 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
323 struct kvm_run *run = vcpu->run;
324 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
325 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
326 u32 cause = vcpu->arch.host_cp0_cause;
327 int ret = RESUME_GUEST;
329 if (KVM_GUEST_KERNEL_MODE(vcpu)
330 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
331 ret = kvm_mips_bad_store(cause, opc, run, vcpu);
332 } else {
333 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
334 cause, opc, badvaddr);
335 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
336 ret = RESUME_HOST;
338 return ret;
341 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
343 struct kvm_run *run = vcpu->run;
344 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
345 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
346 u32 cause = vcpu->arch.host_cp0_cause;
347 int ret = RESUME_GUEST;
349 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
350 ret = kvm_mips_bad_load(cause, opc, run, vcpu);
351 } else {
352 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
353 cause, opc, badvaddr);
354 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
355 ret = RESUME_HOST;
357 return ret;
360 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
362 struct kvm_run *run = vcpu->run;
363 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
364 u32 cause = vcpu->arch.host_cp0_cause;
365 enum emulation_result er = EMULATE_DONE;
366 int ret = RESUME_GUEST;
368 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
369 if (er == EMULATE_DONE)
370 ret = RESUME_GUEST;
371 else {
372 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
373 ret = RESUME_HOST;
375 return ret;
378 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
380 struct kvm_run *run = vcpu->run;
381 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
382 u32 cause = vcpu->arch.host_cp0_cause;
383 enum emulation_result er = EMULATE_DONE;
384 int ret = RESUME_GUEST;
386 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
387 if (er == EMULATE_DONE)
388 ret = RESUME_GUEST;
389 else {
390 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
391 ret = RESUME_HOST;
393 return ret;
396 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
398 struct kvm_run *run = vcpu->run;
399 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
400 u32 cause = vcpu->arch.host_cp0_cause;
401 enum emulation_result er = EMULATE_DONE;
402 int ret = RESUME_GUEST;
404 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
405 if (er == EMULATE_DONE)
406 ret = RESUME_GUEST;
407 else {
408 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
409 ret = RESUME_HOST;
411 return ret;
414 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
416 struct kvm_run *run = vcpu->run;
417 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
418 u32 cause = vcpu->arch.host_cp0_cause;
419 enum emulation_result er = EMULATE_DONE;
420 int ret = RESUME_GUEST;
422 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
423 if (er == EMULATE_DONE) {
424 ret = RESUME_GUEST;
425 } else {
426 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
427 ret = RESUME_HOST;
429 return ret;
432 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
434 struct kvm_run *run = vcpu->run;
435 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
436 u32 cause = vcpu->arch.host_cp0_cause;
437 enum emulation_result er = EMULATE_DONE;
438 int ret = RESUME_GUEST;
440 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
441 if (er == EMULATE_DONE) {
442 ret = RESUME_GUEST;
443 } else {
444 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
445 ret = RESUME_HOST;
447 return ret;
450 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
452 struct kvm_run *run = vcpu->run;
453 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
454 u32 cause = vcpu->arch.host_cp0_cause;
455 enum emulation_result er = EMULATE_DONE;
456 int ret = RESUME_GUEST;
458 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
459 if (er == EMULATE_DONE) {
460 ret = RESUME_GUEST;
461 } else {
462 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
463 ret = RESUME_HOST;
465 return ret;
469 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
470 * @vcpu: Virtual CPU context.
472 * Handle when the guest attempts to use MSA when it is disabled.
474 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
476 struct mips_coproc *cop0 = vcpu->arch.cop0;
477 struct kvm_run *run = vcpu->run;
478 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
479 u32 cause = vcpu->arch.host_cp0_cause;
480 enum emulation_result er = EMULATE_DONE;
481 int ret = RESUME_GUEST;
483 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
484 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
486 * No MSA in guest, or FPU enabled and not in FR=1 mode,
487 * guest reserved instruction exception
489 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
490 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
491 /* MSA disabled by guest, guest MSA disabled exception */
492 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
493 } else {
494 /* Restore MSA/FPU state */
495 kvm_own_msa(vcpu);
496 er = EMULATE_DONE;
499 switch (er) {
500 case EMULATE_DONE:
501 ret = RESUME_GUEST;
502 break;
504 case EMULATE_FAIL:
505 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
506 ret = RESUME_HOST;
507 break;
509 default:
510 BUG();
512 return ret;
515 static int kvm_trap_emul_hardware_enable(void)
517 return 0;
520 static void kvm_trap_emul_hardware_disable(void)
524 static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
526 int r;
528 switch (ext) {
529 case KVM_CAP_MIPS_TE:
530 r = 1;
531 break;
532 default:
533 r = 0;
534 break;
537 return r;
540 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
542 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
543 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
546 * Allocate GVA -> HPA page tables.
547 * MIPS doesn't use the mm_struct pointer argument.
549 kern_mm->pgd = pgd_alloc(kern_mm);
550 if (!kern_mm->pgd)
551 return -ENOMEM;
553 user_mm->pgd = pgd_alloc(user_mm);
554 if (!user_mm->pgd) {
555 pgd_free(kern_mm, kern_mm->pgd);
556 return -ENOMEM;
559 return 0;
562 static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
564 /* Don't free host kernel page tables copied from init_mm.pgd */
565 const unsigned long end = 0x80000000;
566 unsigned long pgd_va, pud_va, pmd_va;
567 p4d_t *p4d;
568 pud_t *pud;
569 pmd_t *pmd;
570 pte_t *pte;
571 int i, j, k;
573 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
574 if (pgd_none(pgd[i]))
575 continue;
577 pgd_va = (unsigned long)i << PGDIR_SHIFT;
578 if (pgd_va >= end)
579 break;
580 p4d = p4d_offset(pgd, 0);
581 pud = pud_offset(p4d + i, 0);
582 for (j = 0; j < PTRS_PER_PUD; j++) {
583 if (pud_none(pud[j]))
584 continue;
586 pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
587 if (pud_va >= end)
588 break;
589 pmd = pmd_offset(pud + j, 0);
590 for (k = 0; k < PTRS_PER_PMD; k++) {
591 if (pmd_none(pmd[k]))
592 continue;
594 pmd_va = pud_va | (k << PMD_SHIFT);
595 if (pmd_va >= end)
596 break;
597 pte = pte_offset(pmd + k, 0);
598 pte_free_kernel(NULL, pte);
600 pmd_free(NULL, pmd);
602 pud_free(NULL, pud);
604 pgd_free(NULL, pgd);
607 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
609 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
610 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
613 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
615 struct mips_coproc *cop0 = vcpu->arch.cop0;
616 u32 config, config1;
617 int vcpu_id = vcpu->vcpu_id;
619 /* Start off the timer at 100 MHz */
620 kvm_mips_init_count(vcpu, 100*1000*1000);
623 * Arch specific stuff, set up config registers properly so that the
624 * guest will come up as expected
626 #ifndef CONFIG_CPU_MIPSR6
627 /* r2-r5, simulate a MIPS 24kc */
628 kvm_write_c0_guest_prid(cop0, 0x00019300);
629 #else
630 /* r6+, simulate a generic QEMU machine */
631 kvm_write_c0_guest_prid(cop0, 0x00010000);
632 #endif
634 * Have config1, Cacheable, noncoherent, write-back, write allocate.
635 * Endianness, arch revision & virtually tagged icache should match
636 * host.
638 config = read_c0_config() & MIPS_CONF_AR;
639 config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
640 #ifdef CONFIG_CPU_BIG_ENDIAN
641 config |= CONF_BE;
642 #endif
643 if (cpu_has_vtag_icache)
644 config |= MIPS_CONF_VI;
645 kvm_write_c0_guest_config(cop0, config);
647 /* Read the cache characteristics from the host Config1 Register */
648 config1 = (read_c0_config1() & ~0x7f);
650 /* DCache line size not correctly reported in Config1 on Octeon CPUs */
651 if (cpu_dcache_line_size()) {
652 config1 &= ~MIPS_CONF1_DL;
653 config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
654 MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
657 /* Set up MMU size */
658 config1 &= ~(0x3f << 25);
659 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
661 /* We unset some bits that we aren't emulating */
662 config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
663 MIPS_CONF1_WR | MIPS_CONF1_CA);
664 kvm_write_c0_guest_config1(cop0, config1);
666 /* Have config3, no tertiary/secondary caches implemented */
667 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
668 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
670 /* Have config4, UserLocal */
671 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
673 /* Have config5 */
674 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
676 /* No config6 */
677 kvm_write_c0_guest_config5(cop0, 0);
679 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
680 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
682 /* Status */
683 kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
686 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
688 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
690 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
691 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
692 (vcpu_id & MIPS_EBASE_CPUNUM));
694 /* Put PC at guest reset vector */
695 vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
697 return 0;
700 static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
702 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
703 kvm_flush_remote_tlbs(kvm);
706 static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
707 const struct kvm_memory_slot *slot)
709 kvm_trap_emul_flush_shadow_all(kvm);
712 static u64 kvm_trap_emul_get_one_regs[] = {
713 KVM_REG_MIPS_CP0_INDEX,
714 KVM_REG_MIPS_CP0_ENTRYLO0,
715 KVM_REG_MIPS_CP0_ENTRYLO1,
716 KVM_REG_MIPS_CP0_CONTEXT,
717 KVM_REG_MIPS_CP0_USERLOCAL,
718 KVM_REG_MIPS_CP0_PAGEMASK,
719 KVM_REG_MIPS_CP0_WIRED,
720 KVM_REG_MIPS_CP0_HWRENA,
721 KVM_REG_MIPS_CP0_BADVADDR,
722 KVM_REG_MIPS_CP0_COUNT,
723 KVM_REG_MIPS_CP0_ENTRYHI,
724 KVM_REG_MIPS_CP0_COMPARE,
725 KVM_REG_MIPS_CP0_STATUS,
726 KVM_REG_MIPS_CP0_INTCTL,
727 KVM_REG_MIPS_CP0_CAUSE,
728 KVM_REG_MIPS_CP0_EPC,
729 KVM_REG_MIPS_CP0_PRID,
730 KVM_REG_MIPS_CP0_EBASE,
731 KVM_REG_MIPS_CP0_CONFIG,
732 KVM_REG_MIPS_CP0_CONFIG1,
733 KVM_REG_MIPS_CP0_CONFIG2,
734 KVM_REG_MIPS_CP0_CONFIG3,
735 KVM_REG_MIPS_CP0_CONFIG4,
736 KVM_REG_MIPS_CP0_CONFIG5,
737 KVM_REG_MIPS_CP0_CONFIG7,
738 KVM_REG_MIPS_CP0_ERROREPC,
739 KVM_REG_MIPS_CP0_KSCRATCH1,
740 KVM_REG_MIPS_CP0_KSCRATCH2,
741 KVM_REG_MIPS_CP0_KSCRATCH3,
742 KVM_REG_MIPS_CP0_KSCRATCH4,
743 KVM_REG_MIPS_CP0_KSCRATCH5,
744 KVM_REG_MIPS_CP0_KSCRATCH6,
746 KVM_REG_MIPS_COUNT_CTL,
747 KVM_REG_MIPS_COUNT_RESUME,
748 KVM_REG_MIPS_COUNT_HZ,
751 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
753 return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
756 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
757 u64 __user *indices)
759 if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
760 sizeof(kvm_trap_emul_get_one_regs)))
761 return -EFAULT;
762 indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
764 return 0;
767 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
768 const struct kvm_one_reg *reg,
769 s64 *v)
771 struct mips_coproc *cop0 = vcpu->arch.cop0;
773 switch (reg->id) {
774 case KVM_REG_MIPS_CP0_INDEX:
775 *v = (long)kvm_read_c0_guest_index(cop0);
776 break;
777 case KVM_REG_MIPS_CP0_ENTRYLO0:
778 *v = kvm_read_c0_guest_entrylo0(cop0);
779 break;
780 case KVM_REG_MIPS_CP0_ENTRYLO1:
781 *v = kvm_read_c0_guest_entrylo1(cop0);
782 break;
783 case KVM_REG_MIPS_CP0_CONTEXT:
784 *v = (long)kvm_read_c0_guest_context(cop0);
785 break;
786 case KVM_REG_MIPS_CP0_USERLOCAL:
787 *v = (long)kvm_read_c0_guest_userlocal(cop0);
788 break;
789 case KVM_REG_MIPS_CP0_PAGEMASK:
790 *v = (long)kvm_read_c0_guest_pagemask(cop0);
791 break;
792 case KVM_REG_MIPS_CP0_WIRED:
793 *v = (long)kvm_read_c0_guest_wired(cop0);
794 break;
795 case KVM_REG_MIPS_CP0_HWRENA:
796 *v = (long)kvm_read_c0_guest_hwrena(cop0);
797 break;
798 case KVM_REG_MIPS_CP0_BADVADDR:
799 *v = (long)kvm_read_c0_guest_badvaddr(cop0);
800 break;
801 case KVM_REG_MIPS_CP0_ENTRYHI:
802 *v = (long)kvm_read_c0_guest_entryhi(cop0);
803 break;
804 case KVM_REG_MIPS_CP0_COMPARE:
805 *v = (long)kvm_read_c0_guest_compare(cop0);
806 break;
807 case KVM_REG_MIPS_CP0_STATUS:
808 *v = (long)kvm_read_c0_guest_status(cop0);
809 break;
810 case KVM_REG_MIPS_CP0_INTCTL:
811 *v = (long)kvm_read_c0_guest_intctl(cop0);
812 break;
813 case KVM_REG_MIPS_CP0_CAUSE:
814 *v = (long)kvm_read_c0_guest_cause(cop0);
815 break;
816 case KVM_REG_MIPS_CP0_EPC:
817 *v = (long)kvm_read_c0_guest_epc(cop0);
818 break;
819 case KVM_REG_MIPS_CP0_PRID:
820 *v = (long)kvm_read_c0_guest_prid(cop0);
821 break;
822 case KVM_REG_MIPS_CP0_EBASE:
823 *v = (long)kvm_read_c0_guest_ebase(cop0);
824 break;
825 case KVM_REG_MIPS_CP0_CONFIG:
826 *v = (long)kvm_read_c0_guest_config(cop0);
827 break;
828 case KVM_REG_MIPS_CP0_CONFIG1:
829 *v = (long)kvm_read_c0_guest_config1(cop0);
830 break;
831 case KVM_REG_MIPS_CP0_CONFIG2:
832 *v = (long)kvm_read_c0_guest_config2(cop0);
833 break;
834 case KVM_REG_MIPS_CP0_CONFIG3:
835 *v = (long)kvm_read_c0_guest_config3(cop0);
836 break;
837 case KVM_REG_MIPS_CP0_CONFIG4:
838 *v = (long)kvm_read_c0_guest_config4(cop0);
839 break;
840 case KVM_REG_MIPS_CP0_CONFIG5:
841 *v = (long)kvm_read_c0_guest_config5(cop0);
842 break;
843 case KVM_REG_MIPS_CP0_CONFIG7:
844 *v = (long)kvm_read_c0_guest_config7(cop0);
845 break;
846 case KVM_REG_MIPS_CP0_COUNT:
847 *v = kvm_mips_read_count(vcpu);
848 break;
849 case KVM_REG_MIPS_COUNT_CTL:
850 *v = vcpu->arch.count_ctl;
851 break;
852 case KVM_REG_MIPS_COUNT_RESUME:
853 *v = ktime_to_ns(vcpu->arch.count_resume);
854 break;
855 case KVM_REG_MIPS_COUNT_HZ:
856 *v = vcpu->arch.count_hz;
857 break;
858 case KVM_REG_MIPS_CP0_ERROREPC:
859 *v = (long)kvm_read_c0_guest_errorepc(cop0);
860 break;
861 case KVM_REG_MIPS_CP0_KSCRATCH1:
862 *v = (long)kvm_read_c0_guest_kscratch1(cop0);
863 break;
864 case KVM_REG_MIPS_CP0_KSCRATCH2:
865 *v = (long)kvm_read_c0_guest_kscratch2(cop0);
866 break;
867 case KVM_REG_MIPS_CP0_KSCRATCH3:
868 *v = (long)kvm_read_c0_guest_kscratch3(cop0);
869 break;
870 case KVM_REG_MIPS_CP0_KSCRATCH4:
871 *v = (long)kvm_read_c0_guest_kscratch4(cop0);
872 break;
873 case KVM_REG_MIPS_CP0_KSCRATCH5:
874 *v = (long)kvm_read_c0_guest_kscratch5(cop0);
875 break;
876 case KVM_REG_MIPS_CP0_KSCRATCH6:
877 *v = (long)kvm_read_c0_guest_kscratch6(cop0);
878 break;
879 default:
880 return -EINVAL;
882 return 0;
885 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
886 const struct kvm_one_reg *reg,
887 s64 v)
889 struct mips_coproc *cop0 = vcpu->arch.cop0;
890 int ret = 0;
891 unsigned int cur, change;
893 switch (reg->id) {
894 case KVM_REG_MIPS_CP0_INDEX:
895 kvm_write_c0_guest_index(cop0, v);
896 break;
897 case KVM_REG_MIPS_CP0_ENTRYLO0:
898 kvm_write_c0_guest_entrylo0(cop0, v);
899 break;
900 case KVM_REG_MIPS_CP0_ENTRYLO1:
901 kvm_write_c0_guest_entrylo1(cop0, v);
902 break;
903 case KVM_REG_MIPS_CP0_CONTEXT:
904 kvm_write_c0_guest_context(cop0, v);
905 break;
906 case KVM_REG_MIPS_CP0_USERLOCAL:
907 kvm_write_c0_guest_userlocal(cop0, v);
908 break;
909 case KVM_REG_MIPS_CP0_PAGEMASK:
910 kvm_write_c0_guest_pagemask(cop0, v);
911 break;
912 case KVM_REG_MIPS_CP0_WIRED:
913 kvm_write_c0_guest_wired(cop0, v);
914 break;
915 case KVM_REG_MIPS_CP0_HWRENA:
916 kvm_write_c0_guest_hwrena(cop0, v);
917 break;
918 case KVM_REG_MIPS_CP0_BADVADDR:
919 kvm_write_c0_guest_badvaddr(cop0, v);
920 break;
921 case KVM_REG_MIPS_CP0_ENTRYHI:
922 kvm_write_c0_guest_entryhi(cop0, v);
923 break;
924 case KVM_REG_MIPS_CP0_STATUS:
925 kvm_write_c0_guest_status(cop0, v);
926 break;
927 case KVM_REG_MIPS_CP0_INTCTL:
928 /* No VInt, so no VS, read-only for now */
929 break;
930 case KVM_REG_MIPS_CP0_EPC:
931 kvm_write_c0_guest_epc(cop0, v);
932 break;
933 case KVM_REG_MIPS_CP0_PRID:
934 kvm_write_c0_guest_prid(cop0, v);
935 break;
936 case KVM_REG_MIPS_CP0_EBASE:
938 * Allow core number to be written, but the exception base must
939 * remain in guest KSeg0.
941 kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
943 break;
944 case KVM_REG_MIPS_CP0_COUNT:
945 kvm_mips_write_count(vcpu, v);
946 break;
947 case KVM_REG_MIPS_CP0_COMPARE:
948 kvm_mips_write_compare(vcpu, v, false);
949 break;
950 case KVM_REG_MIPS_CP0_CAUSE:
952 * If the timer is stopped or started (DC bit) it must look
953 * atomic with changes to the interrupt pending bits (TI, IRQ5).
954 * A timer interrupt should not happen in between.
956 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
957 if (v & CAUSEF_DC) {
958 /* disable timer first */
959 kvm_mips_count_disable_cause(vcpu);
960 kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
962 } else {
963 /* enable timer last */
964 kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
966 kvm_mips_count_enable_cause(vcpu);
968 } else {
969 kvm_write_c0_guest_cause(cop0, v);
971 break;
972 case KVM_REG_MIPS_CP0_CONFIG:
973 /* read-only for now */
974 break;
975 case KVM_REG_MIPS_CP0_CONFIG1:
976 cur = kvm_read_c0_guest_config1(cop0);
977 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
978 if (change) {
979 v = cur ^ change;
980 kvm_write_c0_guest_config1(cop0, v);
982 break;
983 case KVM_REG_MIPS_CP0_CONFIG2:
984 /* read-only for now */
985 break;
986 case KVM_REG_MIPS_CP0_CONFIG3:
987 cur = kvm_read_c0_guest_config3(cop0);
988 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
989 if (change) {
990 v = cur ^ change;
991 kvm_write_c0_guest_config3(cop0, v);
993 break;
994 case KVM_REG_MIPS_CP0_CONFIG4:
995 cur = kvm_read_c0_guest_config4(cop0);
996 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
997 if (change) {
998 v = cur ^ change;
999 kvm_write_c0_guest_config4(cop0, v);
1001 break;
1002 case KVM_REG_MIPS_CP0_CONFIG5:
1003 cur = kvm_read_c0_guest_config5(cop0);
1004 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
1005 if (change) {
1006 v = cur ^ change;
1007 kvm_write_c0_guest_config5(cop0, v);
1009 break;
1010 case KVM_REG_MIPS_CP0_CONFIG7:
1011 /* writes ignored */
1012 break;
1013 case KVM_REG_MIPS_COUNT_CTL:
1014 ret = kvm_mips_set_count_ctl(vcpu, v);
1015 break;
1016 case KVM_REG_MIPS_COUNT_RESUME:
1017 ret = kvm_mips_set_count_resume(vcpu, v);
1018 break;
1019 case KVM_REG_MIPS_COUNT_HZ:
1020 ret = kvm_mips_set_count_hz(vcpu, v);
1021 break;
1022 case KVM_REG_MIPS_CP0_ERROREPC:
1023 kvm_write_c0_guest_errorepc(cop0, v);
1024 break;
1025 case KVM_REG_MIPS_CP0_KSCRATCH1:
1026 kvm_write_c0_guest_kscratch1(cop0, v);
1027 break;
1028 case KVM_REG_MIPS_CP0_KSCRATCH2:
1029 kvm_write_c0_guest_kscratch2(cop0, v);
1030 break;
1031 case KVM_REG_MIPS_CP0_KSCRATCH3:
1032 kvm_write_c0_guest_kscratch3(cop0, v);
1033 break;
1034 case KVM_REG_MIPS_CP0_KSCRATCH4:
1035 kvm_write_c0_guest_kscratch4(cop0, v);
1036 break;
1037 case KVM_REG_MIPS_CP0_KSCRATCH5:
1038 kvm_write_c0_guest_kscratch5(cop0, v);
1039 break;
1040 case KVM_REG_MIPS_CP0_KSCRATCH6:
1041 kvm_write_c0_guest_kscratch6(cop0, v);
1042 break;
1043 default:
1044 return -EINVAL;
1046 return ret;
1049 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1051 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1052 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1053 struct mm_struct *mm;
1056 * Were we in guest context? If so, restore the appropriate ASID based
1057 * on the mode of the Guest (Kernel/User).
1059 if (current->flags & PF_VCPU) {
1060 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1061 check_switch_mmu_context(mm);
1062 kvm_mips_suspend_mm(cpu);
1063 ehb();
1066 return 0;
1069 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1071 kvm_lose_fpu(vcpu);
1073 if (current->flags & PF_VCPU) {
1074 /* Restore normal Linux process memory map */
1075 check_switch_mmu_context(current->mm);
1076 kvm_mips_resume_mm(cpu);
1077 ehb();
1080 return 0;
1083 static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
1084 bool reload_asid)
1086 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1087 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1088 struct mm_struct *mm;
1089 int i;
1091 if (likely(!kvm_request_pending(vcpu)))
1092 return;
1094 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1096 * Both kernel & user GVA mappings must be invalidated. The
1097 * caller is just about to check whether the ASID is stale
1098 * anyway so no need to reload it here.
1100 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
1101 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
1102 for_each_possible_cpu(i) {
1103 set_cpu_context(i, kern_mm, 0);
1104 set_cpu_context(i, user_mm, 0);
1107 /* Generate new ASID for current mode */
1108 if (reload_asid) {
1109 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1110 get_new_mmu_context(mm);
1111 htw_stop();
1112 write_c0_entryhi(cpu_asid(cpu, mm));
1113 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
1114 htw_start();
1120 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1121 * @vcpu: VCPU pointer.
1123 * Call before a GVA space access outside of guest mode, to ensure that
1124 * asynchronous TLB flush requests are handled or delayed until completion of
1125 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1127 * Should be called with IRQs already enabled.
1129 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
1131 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1132 WARN_ON_ONCE(irqs_disabled());
1135 * The caller is about to access the GVA space, so we set the mode to
1136 * force TLB flush requests to send an IPI, and also disable IRQs to
1137 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1139 local_irq_disable();
1142 * Make sure the read of VCPU requests is not reordered ahead of the
1143 * write to vcpu->mode, or we could miss a TLB flush request while
1144 * the requester sees the VCPU as outside of guest mode and not needing
1145 * an IPI.
1147 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1150 * If a TLB flush has been requested (potentially while
1151 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1152 * before accessing the GVA space, and be sure to reload the ASID if
1153 * necessary as it'll be immediately used.
1155 * TLB flush requests after this check will trigger an IPI due to the
1156 * mode change above, which will be delayed due to IRQs disabled.
1158 kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
1162 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1163 * @vcpu: VCPU pointer.
1165 * Called after a GVA space access outside of guest mode. Should have a matching
1166 * call to kvm_trap_emul_gva_lockless_begin().
1168 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
1171 * Make sure the write to vcpu->mode is not reordered in front of GVA
1172 * accesses, or a TLB flush requester may not think it necessary to send
1173 * an IPI.
1175 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1178 * Now that the access to GVA space is complete, its safe for pending
1179 * TLB flush request IPIs to be handled (which indicates completion).
1181 local_irq_enable();
1184 static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
1185 struct kvm_vcpu *vcpu)
1187 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1188 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1189 struct mm_struct *mm;
1190 struct mips_coproc *cop0 = vcpu->arch.cop0;
1191 int i, cpu = smp_processor_id();
1192 unsigned int gasid;
1195 * No need to reload ASID, IRQs are disabled already so there's no rush,
1196 * and we'll check if we need to regenerate below anyway before
1197 * re-entering the guest.
1199 kvm_trap_emul_check_requests(vcpu, cpu, false);
1201 if (KVM_GUEST_KERNEL_MODE(vcpu)) {
1202 mm = kern_mm;
1203 } else {
1204 mm = user_mm;
1207 * Lazy host ASID regeneration / PT flush for guest user mode.
1208 * If the guest ASID has changed since the last guest usermode
1209 * execution, invalidate the stale TLB entries and flush GVA PT
1210 * entries too.
1212 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
1213 if (gasid != vcpu->arch.last_user_gasid) {
1214 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
1215 for_each_possible_cpu(i)
1216 set_cpu_context(i, user_mm, 0);
1217 vcpu->arch.last_user_gasid = gasid;
1222 * Check if ASID is stale. This may happen due to a TLB flush request or
1223 * a lazy user MM invalidation.
1225 check_mmu_context(mm);
1228 static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1230 int cpu = smp_processor_id();
1231 int r;
1233 /* Check if we have any exceptions/interrupts pending */
1234 kvm_mips_deliver_interrupts(vcpu,
1235 kvm_read_c0_guest_cause(vcpu->arch.cop0));
1237 kvm_trap_emul_vcpu_reenter(run, vcpu);
1240 * We use user accessors to access guest memory, but we don't want to
1241 * invoke Linux page faulting.
1243 pagefault_disable();
1245 /* Disable hardware page table walking while in guest */
1246 htw_stop();
1249 * While in guest context we're in the guest's address space, not the
1250 * host process address space, so we need to be careful not to confuse
1251 * e.g. cache management IPIs.
1253 kvm_mips_suspend_mm(cpu);
1255 r = vcpu->arch.vcpu_run(run, vcpu);
1257 /* We may have migrated while handling guest exits */
1258 cpu = smp_processor_id();
1260 /* Restore normal Linux process memory map */
1261 check_switch_mmu_context(current->mm);
1262 kvm_mips_resume_mm(cpu);
1264 htw_start();
1266 pagefault_enable();
1268 return r;
1271 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
1272 /* exit handlers */
1273 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
1274 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
1275 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
1276 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
1277 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
1278 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
1279 .handle_syscall = kvm_trap_emul_handle_syscall,
1280 .handle_res_inst = kvm_trap_emul_handle_res_inst,
1281 .handle_break = kvm_trap_emul_handle_break,
1282 .handle_trap = kvm_trap_emul_handle_trap,
1283 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
1284 .handle_fpe = kvm_trap_emul_handle_fpe,
1285 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
1286 .handle_guest_exit = kvm_trap_emul_no_handler,
1288 .hardware_enable = kvm_trap_emul_hardware_enable,
1289 .hardware_disable = kvm_trap_emul_hardware_disable,
1290 .check_extension = kvm_trap_emul_check_extension,
1291 .vcpu_init = kvm_trap_emul_vcpu_init,
1292 .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
1293 .vcpu_setup = kvm_trap_emul_vcpu_setup,
1294 .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
1295 .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
1296 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
1297 .queue_timer_int = kvm_mips_queue_timer_int_cb,
1298 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
1299 .queue_io_int = kvm_mips_queue_io_int_cb,
1300 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
1301 .irq_deliver = kvm_mips_irq_deliver_cb,
1302 .irq_clear = kvm_mips_irq_clear_cb,
1303 .num_regs = kvm_trap_emul_num_regs,
1304 .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
1305 .get_one_reg = kvm_trap_emul_get_one_reg,
1306 .set_one_reg = kvm_trap_emul_set_one_reg,
1307 .vcpu_load = kvm_trap_emul_vcpu_load,
1308 .vcpu_put = kvm_trap_emul_vcpu_put,
1309 .vcpu_run = kvm_trap_emul_vcpu_run,
1310 .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
1313 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
1315 *install_callbacks = &kvm_trap_emul_callbacks;
1316 return 0;