Linux 5.7.6
[linux/fpc-iii.git] / arch / mips / kvm / mips.c
blob8f05dd0a0f4ec6690c836c27a8b98e47dff828aa
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/signal.h>
20 #include <linux/fs.h>
21 #include <linux/memblock.h>
23 #include <asm/fpu.h>
24 #include <asm/page.h>
25 #include <asm/cacheflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
30 #include <linux/kvm_host.h>
32 #include "interrupt.h"
33 #include "commpage.h"
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
38 #ifndef VECTORSPACING
39 #define VECTORSPACING 0x100 /* for EI/VI mode */
40 #endif
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
45 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
46 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
47 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
48 { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
49 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
50 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
51 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
52 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
53 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
54 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
55 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
56 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
57 { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
58 { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
59 { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
60 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
61 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
62 #ifdef CONFIG_KVM_MIPS_VZ
63 { "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
64 { "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
65 { "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
66 { "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
67 { "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
68 { "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
69 { "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
70 { "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
71 #endif
72 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
73 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
74 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
75 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
76 {NULL}
79 bool kvm_trace_guest_mode_change;
81 int kvm_guest_mode_change_trace_reg(void)
83 kvm_trace_guest_mode_change = 1;
84 return 0;
87 void kvm_guest_mode_change_trace_unreg(void)
89 kvm_trace_guest_mode_change = 0;
93 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
94 * Config7, so we are "runnable" if interrupts are pending
96 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
98 return !!(vcpu->arch.pending_exceptions);
101 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
103 return false;
106 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
108 return 1;
111 int kvm_arch_hardware_enable(void)
113 return kvm_mips_callbacks->hardware_enable();
116 void kvm_arch_hardware_disable(void)
118 kvm_mips_callbacks->hardware_disable();
121 int kvm_arch_hardware_setup(void *opaque)
123 return 0;
126 int kvm_arch_check_processor_compat(void *opaque)
128 return 0;
131 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
133 switch (type) {
134 #ifdef CONFIG_KVM_MIPS_VZ
135 case KVM_VM_MIPS_VZ:
136 #else
137 case KVM_VM_MIPS_TE:
138 #endif
139 break;
140 default:
141 /* Unsupported KVM type */
142 return -EINVAL;
145 /* Allocate page table to map GPA -> RPA */
146 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
147 if (!kvm->arch.gpa_mm.pgd)
148 return -ENOMEM;
150 return 0;
153 void kvm_mips_free_vcpus(struct kvm *kvm)
155 unsigned int i;
156 struct kvm_vcpu *vcpu;
158 kvm_for_each_vcpu(i, vcpu, kvm) {
159 kvm_vcpu_destroy(vcpu);
162 mutex_lock(&kvm->lock);
164 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
165 kvm->vcpus[i] = NULL;
167 atomic_set(&kvm->online_vcpus, 0);
169 mutex_unlock(&kvm->lock);
172 static void kvm_mips_free_gpa_pt(struct kvm *kvm)
174 /* It should always be safe to remove after flushing the whole range */
175 WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
176 pgd_free(NULL, kvm->arch.gpa_mm.pgd);
179 void kvm_arch_destroy_vm(struct kvm *kvm)
181 kvm_mips_free_vcpus(kvm);
182 kvm_mips_free_gpa_pt(kvm);
185 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
186 unsigned long arg)
188 return -ENOIOCTLCMD;
191 void kvm_arch_flush_shadow_all(struct kvm *kvm)
193 /* Flush whole GPA */
194 kvm_mips_flush_gpa_pt(kvm, 0, ~0);
196 /* Let implementation do the rest */
197 kvm_mips_callbacks->flush_shadow_all(kvm);
200 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
201 struct kvm_memory_slot *slot)
204 * The slot has been made invalid (ready for moving or deletion), so we
205 * need to ensure that it can no longer be accessed by any guest VCPUs.
208 spin_lock(&kvm->mmu_lock);
209 /* Flush slot from GPA */
210 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
211 slot->base_gfn + slot->npages - 1);
212 /* Let implementation do the rest */
213 kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
214 spin_unlock(&kvm->mmu_lock);
217 int kvm_arch_prepare_memory_region(struct kvm *kvm,
218 struct kvm_memory_slot *memslot,
219 const struct kvm_userspace_memory_region *mem,
220 enum kvm_mr_change change)
222 return 0;
225 void kvm_arch_commit_memory_region(struct kvm *kvm,
226 const struct kvm_userspace_memory_region *mem,
227 struct kvm_memory_slot *old,
228 const struct kvm_memory_slot *new,
229 enum kvm_mr_change change)
231 int needs_flush;
233 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
234 __func__, kvm, mem->slot, mem->guest_phys_addr,
235 mem->memory_size, mem->userspace_addr);
238 * If dirty page logging is enabled, write protect all pages in the slot
239 * ready for dirty logging.
241 * There is no need to do this in any of the following cases:
242 * CREATE: No dirty mappings will already exist.
243 * MOVE/DELETE: The old mappings will already have been cleaned up by
244 * kvm_arch_flush_shadow_memslot()
246 if (change == KVM_MR_FLAGS_ONLY &&
247 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
248 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
249 spin_lock(&kvm->mmu_lock);
250 /* Write protect GPA page table entries */
251 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
252 new->base_gfn + new->npages - 1);
253 /* Let implementation do the rest */
254 if (needs_flush)
255 kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
256 spin_unlock(&kvm->mmu_lock);
260 static inline void dump_handler(const char *symbol, void *start, void *end)
262 u32 *p;
264 pr_debug("LEAF(%s)\n", symbol);
266 pr_debug("\t.set push\n");
267 pr_debug("\t.set noreorder\n");
269 for (p = start; p < (u32 *)end; ++p)
270 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
272 pr_debug("\t.set\tpop\n");
274 pr_debug("\tEND(%s)\n", symbol);
277 /* low level hrtimer wake routine */
278 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
280 struct kvm_vcpu *vcpu;
282 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
284 kvm_mips_callbacks->queue_timer_int(vcpu);
286 vcpu->arch.wait = 0;
287 if (swq_has_sleeper(&vcpu->wq))
288 swake_up_one(&vcpu->wq);
290 return kvm_mips_count_timeout(vcpu);
293 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
295 return 0;
298 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
300 int err, size;
301 void *gebase, *p, *handler, *refill_start, *refill_end;
302 int i;
304 kvm_debug("kvm @ %p: create cpu %d at %p\n",
305 vcpu->kvm, vcpu->vcpu_id, vcpu);
307 err = kvm_mips_callbacks->vcpu_init(vcpu);
308 if (err)
309 return err;
311 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
312 HRTIMER_MODE_REL);
313 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
316 * Allocate space for host mode exception handlers that handle
317 * guest mode exits
319 if (cpu_has_veic || cpu_has_vint)
320 size = 0x200 + VECTORSPACING * 64;
321 else
322 size = 0x4000;
324 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
326 if (!gebase) {
327 err = -ENOMEM;
328 goto out_uninit_vcpu;
330 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
331 ALIGN(size, PAGE_SIZE), gebase);
334 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
335 * limits us to the low 512MB of physical address space. If the memory
336 * we allocate is out of range, just give up now.
338 if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
339 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
340 gebase);
341 err = -ENOMEM;
342 goto out_free_gebase;
345 /* Save new ebase */
346 vcpu->arch.guest_ebase = gebase;
348 /* Build guest exception vectors dynamically in unmapped memory */
349 handler = gebase + 0x2000;
351 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
352 refill_start = gebase;
353 if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
354 refill_start += 0x080;
355 refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
357 /* General Exception Entry point */
358 kvm_mips_build_exception(gebase + 0x180, handler);
360 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
361 for (i = 0; i < 8; i++) {
362 kvm_debug("L1 Vectored handler @ %p\n",
363 gebase + 0x200 + (i * VECTORSPACING));
364 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
365 handler);
368 /* General exit handler */
369 p = handler;
370 p = kvm_mips_build_exit(p);
372 /* Guest entry routine */
373 vcpu->arch.vcpu_run = p;
374 p = kvm_mips_build_vcpu_run(p);
376 /* Dump the generated code */
377 pr_debug("#include <asm/asm.h>\n");
378 pr_debug("#include <asm/regdef.h>\n");
379 pr_debug("\n");
380 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
381 dump_handler("kvm_tlb_refill", refill_start, refill_end);
382 dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
383 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
385 /* Invalidate the icache for these ranges */
386 flush_icache_range((unsigned long)gebase,
387 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
390 * Allocate comm page for guest kernel, a TLB will be reserved for
391 * mapping GVA @ 0xFFFF8000 to this page
393 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
395 if (!vcpu->arch.kseg0_commpage) {
396 err = -ENOMEM;
397 goto out_free_gebase;
400 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
401 kvm_mips_commpage_init(vcpu);
403 /* Init */
404 vcpu->arch.last_sched_cpu = -1;
405 vcpu->arch.last_exec_cpu = -1;
407 /* Initial guest state */
408 err = kvm_mips_callbacks->vcpu_setup(vcpu);
409 if (err)
410 goto out_free_commpage;
412 return 0;
414 out_free_commpage:
415 kfree(vcpu->arch.kseg0_commpage);
416 out_free_gebase:
417 kfree(gebase);
418 out_uninit_vcpu:
419 kvm_mips_callbacks->vcpu_uninit(vcpu);
420 return err;
423 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
425 hrtimer_cancel(&vcpu->arch.comparecount_timer);
427 kvm_mips_dump_stats(vcpu);
429 kvm_mmu_free_memory_caches(vcpu);
430 kfree(vcpu->arch.guest_ebase);
431 kfree(vcpu->arch.kseg0_commpage);
433 kvm_mips_callbacks->vcpu_uninit(vcpu);
436 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
437 struct kvm_guest_debug *dbg)
439 return -ENOIOCTLCMD;
442 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
444 int r = -EINTR;
446 vcpu_load(vcpu);
448 kvm_sigset_activate(vcpu);
450 if (vcpu->mmio_needed) {
451 if (!vcpu->mmio_is_write)
452 kvm_mips_complete_mmio_load(vcpu, run);
453 vcpu->mmio_needed = 0;
456 if (run->immediate_exit)
457 goto out;
459 lose_fpu(1);
461 local_irq_disable();
462 guest_enter_irqoff();
463 trace_kvm_enter(vcpu);
466 * Make sure the read of VCPU requests in vcpu_run() callback is not
467 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
468 * flush request while the requester sees the VCPU as outside of guest
469 * mode and not needing an IPI.
471 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
473 r = kvm_mips_callbacks->vcpu_run(run, vcpu);
475 trace_kvm_out(vcpu);
476 guest_exit_irqoff();
477 local_irq_enable();
479 out:
480 kvm_sigset_deactivate(vcpu);
482 vcpu_put(vcpu);
483 return r;
486 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
487 struct kvm_mips_interrupt *irq)
489 int intr = (int)irq->irq;
490 struct kvm_vcpu *dvcpu = NULL;
492 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
493 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
494 (int)intr);
496 if (irq->cpu == -1)
497 dvcpu = vcpu;
498 else
499 dvcpu = vcpu->kvm->vcpus[irq->cpu];
501 if (intr == 2 || intr == 3 || intr == 4) {
502 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
504 } else if (intr == -2 || intr == -3 || intr == -4) {
505 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
506 } else {
507 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
508 irq->cpu, irq->irq);
509 return -EINVAL;
512 dvcpu->arch.wait = 0;
514 if (swq_has_sleeper(&dvcpu->wq))
515 swake_up_one(&dvcpu->wq);
517 return 0;
520 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
521 struct kvm_mp_state *mp_state)
523 return -ENOIOCTLCMD;
526 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
527 struct kvm_mp_state *mp_state)
529 return -ENOIOCTLCMD;
532 static u64 kvm_mips_get_one_regs[] = {
533 KVM_REG_MIPS_R0,
534 KVM_REG_MIPS_R1,
535 KVM_REG_MIPS_R2,
536 KVM_REG_MIPS_R3,
537 KVM_REG_MIPS_R4,
538 KVM_REG_MIPS_R5,
539 KVM_REG_MIPS_R6,
540 KVM_REG_MIPS_R7,
541 KVM_REG_MIPS_R8,
542 KVM_REG_MIPS_R9,
543 KVM_REG_MIPS_R10,
544 KVM_REG_MIPS_R11,
545 KVM_REG_MIPS_R12,
546 KVM_REG_MIPS_R13,
547 KVM_REG_MIPS_R14,
548 KVM_REG_MIPS_R15,
549 KVM_REG_MIPS_R16,
550 KVM_REG_MIPS_R17,
551 KVM_REG_MIPS_R18,
552 KVM_REG_MIPS_R19,
553 KVM_REG_MIPS_R20,
554 KVM_REG_MIPS_R21,
555 KVM_REG_MIPS_R22,
556 KVM_REG_MIPS_R23,
557 KVM_REG_MIPS_R24,
558 KVM_REG_MIPS_R25,
559 KVM_REG_MIPS_R26,
560 KVM_REG_MIPS_R27,
561 KVM_REG_MIPS_R28,
562 KVM_REG_MIPS_R29,
563 KVM_REG_MIPS_R30,
564 KVM_REG_MIPS_R31,
566 #ifndef CONFIG_CPU_MIPSR6
567 KVM_REG_MIPS_HI,
568 KVM_REG_MIPS_LO,
569 #endif
570 KVM_REG_MIPS_PC,
573 static u64 kvm_mips_get_one_regs_fpu[] = {
574 KVM_REG_MIPS_FCR_IR,
575 KVM_REG_MIPS_FCR_CSR,
578 static u64 kvm_mips_get_one_regs_msa[] = {
579 KVM_REG_MIPS_MSA_IR,
580 KVM_REG_MIPS_MSA_CSR,
583 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
585 unsigned long ret;
587 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
588 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
589 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
590 /* odd doubles */
591 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
592 ret += 16;
594 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
595 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
596 ret += kvm_mips_callbacks->num_regs(vcpu);
598 return ret;
601 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
603 u64 index;
604 unsigned int i;
606 if (copy_to_user(indices, kvm_mips_get_one_regs,
607 sizeof(kvm_mips_get_one_regs)))
608 return -EFAULT;
609 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
611 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
612 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
613 sizeof(kvm_mips_get_one_regs_fpu)))
614 return -EFAULT;
615 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
617 for (i = 0; i < 32; ++i) {
618 index = KVM_REG_MIPS_FPR_32(i);
619 if (copy_to_user(indices, &index, sizeof(index)))
620 return -EFAULT;
621 ++indices;
623 /* skip odd doubles if no F64 */
624 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
625 continue;
627 index = KVM_REG_MIPS_FPR_64(i);
628 if (copy_to_user(indices, &index, sizeof(index)))
629 return -EFAULT;
630 ++indices;
634 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
635 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
636 sizeof(kvm_mips_get_one_regs_msa)))
637 return -EFAULT;
638 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
640 for (i = 0; i < 32; ++i) {
641 index = KVM_REG_MIPS_VEC_128(i);
642 if (copy_to_user(indices, &index, sizeof(index)))
643 return -EFAULT;
644 ++indices;
648 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
651 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
652 const struct kvm_one_reg *reg)
654 struct mips_coproc *cop0 = vcpu->arch.cop0;
655 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
656 int ret;
657 s64 v;
658 s64 vs[2];
659 unsigned int idx;
661 switch (reg->id) {
662 /* General purpose registers */
663 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
664 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
665 break;
666 #ifndef CONFIG_CPU_MIPSR6
667 case KVM_REG_MIPS_HI:
668 v = (long)vcpu->arch.hi;
669 break;
670 case KVM_REG_MIPS_LO:
671 v = (long)vcpu->arch.lo;
672 break;
673 #endif
674 case KVM_REG_MIPS_PC:
675 v = (long)vcpu->arch.pc;
676 break;
678 /* Floating point registers */
679 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
680 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
681 return -EINVAL;
682 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
683 /* Odd singles in top of even double when FR=0 */
684 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
685 v = get_fpr32(&fpu->fpr[idx], 0);
686 else
687 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
688 break;
689 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
690 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
691 return -EINVAL;
692 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
693 /* Can't access odd doubles in FR=0 mode */
694 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
695 return -EINVAL;
696 v = get_fpr64(&fpu->fpr[idx], 0);
697 break;
698 case KVM_REG_MIPS_FCR_IR:
699 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
700 return -EINVAL;
701 v = boot_cpu_data.fpu_id;
702 break;
703 case KVM_REG_MIPS_FCR_CSR:
704 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
705 return -EINVAL;
706 v = fpu->fcr31;
707 break;
709 /* MIPS SIMD Architecture (MSA) registers */
710 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
711 if (!kvm_mips_guest_has_msa(&vcpu->arch))
712 return -EINVAL;
713 /* Can't access MSA registers in FR=0 mode */
714 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
715 return -EINVAL;
716 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
717 #ifdef CONFIG_CPU_LITTLE_ENDIAN
718 /* least significant byte first */
719 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
720 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
721 #else
722 /* most significant byte first */
723 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
724 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
725 #endif
726 break;
727 case KVM_REG_MIPS_MSA_IR:
728 if (!kvm_mips_guest_has_msa(&vcpu->arch))
729 return -EINVAL;
730 v = boot_cpu_data.msa_id;
731 break;
732 case KVM_REG_MIPS_MSA_CSR:
733 if (!kvm_mips_guest_has_msa(&vcpu->arch))
734 return -EINVAL;
735 v = fpu->msacsr;
736 break;
738 /* registers to be handled specially */
739 default:
740 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
741 if (ret)
742 return ret;
743 break;
745 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
746 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
748 return put_user(v, uaddr64);
749 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
750 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
751 u32 v32 = (u32)v;
753 return put_user(v32, uaddr32);
754 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
755 void __user *uaddr = (void __user *)(long)reg->addr;
757 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
758 } else {
759 return -EINVAL;
763 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
764 const struct kvm_one_reg *reg)
766 struct mips_coproc *cop0 = vcpu->arch.cop0;
767 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
768 s64 v;
769 s64 vs[2];
770 unsigned int idx;
772 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
773 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
775 if (get_user(v, uaddr64) != 0)
776 return -EFAULT;
777 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
778 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
779 s32 v32;
781 if (get_user(v32, uaddr32) != 0)
782 return -EFAULT;
783 v = (s64)v32;
784 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
785 void __user *uaddr = (void __user *)(long)reg->addr;
787 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
788 } else {
789 return -EINVAL;
792 switch (reg->id) {
793 /* General purpose registers */
794 case KVM_REG_MIPS_R0:
795 /* Silently ignore requests to set $0 */
796 break;
797 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
798 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
799 break;
800 #ifndef CONFIG_CPU_MIPSR6
801 case KVM_REG_MIPS_HI:
802 vcpu->arch.hi = v;
803 break;
804 case KVM_REG_MIPS_LO:
805 vcpu->arch.lo = v;
806 break;
807 #endif
808 case KVM_REG_MIPS_PC:
809 vcpu->arch.pc = v;
810 break;
812 /* Floating point registers */
813 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
814 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
815 return -EINVAL;
816 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
817 /* Odd singles in top of even double when FR=0 */
818 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
819 set_fpr32(&fpu->fpr[idx], 0, v);
820 else
821 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
822 break;
823 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
824 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
825 return -EINVAL;
826 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
827 /* Can't access odd doubles in FR=0 mode */
828 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
829 return -EINVAL;
830 set_fpr64(&fpu->fpr[idx], 0, v);
831 break;
832 case KVM_REG_MIPS_FCR_IR:
833 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
834 return -EINVAL;
835 /* Read-only */
836 break;
837 case KVM_REG_MIPS_FCR_CSR:
838 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
839 return -EINVAL;
840 fpu->fcr31 = v;
841 break;
843 /* MIPS SIMD Architecture (MSA) registers */
844 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
845 if (!kvm_mips_guest_has_msa(&vcpu->arch))
846 return -EINVAL;
847 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
848 #ifdef CONFIG_CPU_LITTLE_ENDIAN
849 /* least significant byte first */
850 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
851 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
852 #else
853 /* most significant byte first */
854 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
855 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
856 #endif
857 break;
858 case KVM_REG_MIPS_MSA_IR:
859 if (!kvm_mips_guest_has_msa(&vcpu->arch))
860 return -EINVAL;
861 /* Read-only */
862 break;
863 case KVM_REG_MIPS_MSA_CSR:
864 if (!kvm_mips_guest_has_msa(&vcpu->arch))
865 return -EINVAL;
866 fpu->msacsr = v;
867 break;
869 /* registers to be handled specially */
870 default:
871 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
873 return 0;
876 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
877 struct kvm_enable_cap *cap)
879 int r = 0;
881 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
882 return -EINVAL;
883 if (cap->flags)
884 return -EINVAL;
885 if (cap->args[0])
886 return -EINVAL;
888 switch (cap->cap) {
889 case KVM_CAP_MIPS_FPU:
890 vcpu->arch.fpu_enabled = true;
891 break;
892 case KVM_CAP_MIPS_MSA:
893 vcpu->arch.msa_enabled = true;
894 break;
895 default:
896 r = -EINVAL;
897 break;
900 return r;
903 long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
904 unsigned long arg)
906 struct kvm_vcpu *vcpu = filp->private_data;
907 void __user *argp = (void __user *)arg;
909 if (ioctl == KVM_INTERRUPT) {
910 struct kvm_mips_interrupt irq;
912 if (copy_from_user(&irq, argp, sizeof(irq)))
913 return -EFAULT;
914 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
915 irq.irq);
917 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
920 return -ENOIOCTLCMD;
923 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
924 unsigned long arg)
926 struct kvm_vcpu *vcpu = filp->private_data;
927 void __user *argp = (void __user *)arg;
928 long r;
930 vcpu_load(vcpu);
932 switch (ioctl) {
933 case KVM_SET_ONE_REG:
934 case KVM_GET_ONE_REG: {
935 struct kvm_one_reg reg;
937 r = -EFAULT;
938 if (copy_from_user(&reg, argp, sizeof(reg)))
939 break;
940 if (ioctl == KVM_SET_ONE_REG)
941 r = kvm_mips_set_reg(vcpu, &reg);
942 else
943 r = kvm_mips_get_reg(vcpu, &reg);
944 break;
946 case KVM_GET_REG_LIST: {
947 struct kvm_reg_list __user *user_list = argp;
948 struct kvm_reg_list reg_list;
949 unsigned n;
951 r = -EFAULT;
952 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
953 break;
954 n = reg_list.n;
955 reg_list.n = kvm_mips_num_regs(vcpu);
956 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
957 break;
958 r = -E2BIG;
959 if (n < reg_list.n)
960 break;
961 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
962 break;
964 case KVM_ENABLE_CAP: {
965 struct kvm_enable_cap cap;
967 r = -EFAULT;
968 if (copy_from_user(&cap, argp, sizeof(cap)))
969 break;
970 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
971 break;
973 default:
974 r = -ENOIOCTLCMD;
977 vcpu_put(vcpu);
978 return r;
981 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
986 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
987 struct kvm_memory_slot *memslot)
989 /* Let implementation handle TLB/GVA invalidation */
990 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
993 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
995 long r;
997 switch (ioctl) {
998 default:
999 r = -ENOIOCTLCMD;
1002 return r;
1005 int kvm_arch_init(void *opaque)
1007 if (kvm_mips_callbacks) {
1008 kvm_err("kvm: module already exists\n");
1009 return -EEXIST;
1012 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1015 void kvm_arch_exit(void)
1017 kvm_mips_callbacks = NULL;
1020 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1021 struct kvm_sregs *sregs)
1023 return -ENOIOCTLCMD;
1026 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1027 struct kvm_sregs *sregs)
1029 return -ENOIOCTLCMD;
1032 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1036 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1038 return -ENOIOCTLCMD;
1041 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1043 return -ENOIOCTLCMD;
1046 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1048 return VM_FAULT_SIGBUS;
1051 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1053 int r;
1055 switch (ext) {
1056 case KVM_CAP_ONE_REG:
1057 case KVM_CAP_ENABLE_CAP:
1058 case KVM_CAP_READONLY_MEM:
1059 case KVM_CAP_SYNC_MMU:
1060 case KVM_CAP_IMMEDIATE_EXIT:
1061 r = 1;
1062 break;
1063 case KVM_CAP_NR_VCPUS:
1064 r = num_online_cpus();
1065 break;
1066 case KVM_CAP_MAX_VCPUS:
1067 r = KVM_MAX_VCPUS;
1068 break;
1069 case KVM_CAP_MAX_VCPU_ID:
1070 r = KVM_MAX_VCPU_ID;
1071 break;
1072 case KVM_CAP_MIPS_FPU:
1073 /* We don't handle systems with inconsistent cpu_has_fpu */
1074 r = !!raw_cpu_has_fpu;
1075 break;
1076 case KVM_CAP_MIPS_MSA:
1078 * We don't support MSA vector partitioning yet:
1079 * 1) It would require explicit support which can't be tested
1080 * yet due to lack of support in current hardware.
1081 * 2) It extends the state that would need to be saved/restored
1082 * by e.g. QEMU for migration.
1084 * When vector partitioning hardware becomes available, support
1085 * could be added by requiring a flag when enabling
1086 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1087 * to save/restore the appropriate extra state.
1089 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1090 break;
1091 default:
1092 r = kvm_mips_callbacks->check_extension(kvm, ext);
1093 break;
1095 return r;
1098 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1100 return kvm_mips_pending_timer(vcpu) ||
1101 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1104 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1106 int i;
1107 struct mips_coproc *cop0;
1109 if (!vcpu)
1110 return -1;
1112 kvm_debug("VCPU Register Dump:\n");
1113 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1114 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1116 for (i = 0; i < 32; i += 4) {
1117 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1118 vcpu->arch.gprs[i],
1119 vcpu->arch.gprs[i + 1],
1120 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1122 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1123 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1125 cop0 = vcpu->arch.cop0;
1126 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1127 kvm_read_c0_guest_status(cop0),
1128 kvm_read_c0_guest_cause(cop0));
1130 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1132 return 0;
1135 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1137 int i;
1139 vcpu_load(vcpu);
1141 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1142 vcpu->arch.gprs[i] = regs->gpr[i];
1143 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1144 vcpu->arch.hi = regs->hi;
1145 vcpu->arch.lo = regs->lo;
1146 vcpu->arch.pc = regs->pc;
1148 vcpu_put(vcpu);
1149 return 0;
1152 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1154 int i;
1156 vcpu_load(vcpu);
1158 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1159 regs->gpr[i] = vcpu->arch.gprs[i];
1161 regs->hi = vcpu->arch.hi;
1162 regs->lo = vcpu->arch.lo;
1163 regs->pc = vcpu->arch.pc;
1165 vcpu_put(vcpu);
1166 return 0;
1169 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1170 struct kvm_translation *tr)
1172 return 0;
1175 static void kvm_mips_set_c0_status(void)
1177 u32 status = read_c0_status();
1179 if (cpu_has_dsp)
1180 status |= (ST0_MX);
1182 write_c0_status(status);
1183 ehb();
1187 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1189 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1191 u32 cause = vcpu->arch.host_cp0_cause;
1192 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1193 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1194 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1195 enum emulation_result er = EMULATE_DONE;
1196 u32 inst;
1197 int ret = RESUME_GUEST;
1199 vcpu->mode = OUTSIDE_GUEST_MODE;
1201 /* re-enable HTW before enabling interrupts */
1202 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1203 htw_start();
1205 /* Set a default exit reason */
1206 run->exit_reason = KVM_EXIT_UNKNOWN;
1207 run->ready_for_interrupt_injection = 1;
1210 * Set the appropriate status bits based on host CPU features,
1211 * before we hit the scheduler
1213 kvm_mips_set_c0_status();
1215 local_irq_enable();
1217 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1218 cause, opc, run, vcpu);
1219 trace_kvm_exit(vcpu, exccode);
1221 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1223 * Do a privilege check, if in UM most of these exit conditions
1224 * end up causing an exception to be delivered to the Guest
1225 * Kernel
1227 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1228 if (er == EMULATE_PRIV_FAIL) {
1229 goto skip_emul;
1230 } else if (er == EMULATE_FAIL) {
1231 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1232 ret = RESUME_HOST;
1233 goto skip_emul;
1237 switch (exccode) {
1238 case EXCCODE_INT:
1239 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1241 ++vcpu->stat.int_exits;
1243 if (need_resched())
1244 cond_resched();
1246 ret = RESUME_GUEST;
1247 break;
1249 case EXCCODE_CPU:
1250 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1252 ++vcpu->stat.cop_unusable_exits;
1253 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1254 /* XXXKYMA: Might need to return to user space */
1255 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1256 ret = RESUME_HOST;
1257 break;
1259 case EXCCODE_MOD:
1260 ++vcpu->stat.tlbmod_exits;
1261 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1262 break;
1264 case EXCCODE_TLBS:
1265 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1266 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1267 badvaddr);
1269 ++vcpu->stat.tlbmiss_st_exits;
1270 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1271 break;
1273 case EXCCODE_TLBL:
1274 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1275 cause, opc, badvaddr);
1277 ++vcpu->stat.tlbmiss_ld_exits;
1278 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1279 break;
1281 case EXCCODE_ADES:
1282 ++vcpu->stat.addrerr_st_exits;
1283 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1284 break;
1286 case EXCCODE_ADEL:
1287 ++vcpu->stat.addrerr_ld_exits;
1288 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1289 break;
1291 case EXCCODE_SYS:
1292 ++vcpu->stat.syscall_exits;
1293 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1294 break;
1296 case EXCCODE_RI:
1297 ++vcpu->stat.resvd_inst_exits;
1298 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1299 break;
1301 case EXCCODE_BP:
1302 ++vcpu->stat.break_inst_exits;
1303 ret = kvm_mips_callbacks->handle_break(vcpu);
1304 break;
1306 case EXCCODE_TR:
1307 ++vcpu->stat.trap_inst_exits;
1308 ret = kvm_mips_callbacks->handle_trap(vcpu);
1309 break;
1311 case EXCCODE_MSAFPE:
1312 ++vcpu->stat.msa_fpe_exits;
1313 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1314 break;
1316 case EXCCODE_FPE:
1317 ++vcpu->stat.fpe_exits;
1318 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1319 break;
1321 case EXCCODE_MSADIS:
1322 ++vcpu->stat.msa_disabled_exits;
1323 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1324 break;
1326 case EXCCODE_GE:
1327 /* defer exit accounting to handler */
1328 ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1329 break;
1331 default:
1332 if (cause & CAUSEF_BD)
1333 opc += 1;
1334 inst = 0;
1335 kvm_get_badinstr(opc, vcpu, &inst);
1336 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1337 exccode, opc, inst, badvaddr,
1338 kvm_read_c0_guest_status(vcpu->arch.cop0));
1339 kvm_arch_vcpu_dump_regs(vcpu);
1340 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1341 ret = RESUME_HOST;
1342 break;
1346 skip_emul:
1347 local_irq_disable();
1349 if (ret == RESUME_GUEST)
1350 kvm_vz_acquire_htimer(vcpu);
1352 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1353 kvm_mips_deliver_interrupts(vcpu, cause);
1355 if (!(ret & RESUME_HOST)) {
1356 /* Only check for signals if not already exiting to userspace */
1357 if (signal_pending(current)) {
1358 run->exit_reason = KVM_EXIT_INTR;
1359 ret = (-EINTR << 2) | RESUME_HOST;
1360 ++vcpu->stat.signal_exits;
1361 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1365 if (ret == RESUME_GUEST) {
1366 trace_kvm_reenter(vcpu);
1369 * Make sure the read of VCPU requests in vcpu_reenter()
1370 * callback is not reordered ahead of the write to vcpu->mode,
1371 * or we could miss a TLB flush request while the requester sees
1372 * the VCPU as outside of guest mode and not needing an IPI.
1374 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1376 kvm_mips_callbacks->vcpu_reenter(run, vcpu);
1379 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1380 * is live), restore FCR31 / MSACSR.
1382 * This should be before returning to the guest exception
1383 * vector, as it may well cause an [MSA] FP exception if there
1384 * are pending exception bits unmasked. (see
1385 * kvm_mips_csr_die_notifier() for how that is handled).
1387 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1388 read_c0_status() & ST0_CU1)
1389 __kvm_restore_fcsr(&vcpu->arch);
1391 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1392 read_c0_config5() & MIPS_CONF5_MSAEN)
1393 __kvm_restore_msacsr(&vcpu->arch);
1396 /* Disable HTW before returning to guest or host */
1397 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1398 htw_stop();
1400 return ret;
1403 /* Enable FPU for guest and restore context */
1404 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1406 struct mips_coproc *cop0 = vcpu->arch.cop0;
1407 unsigned int sr, cfg5;
1409 preempt_disable();
1411 sr = kvm_read_c0_guest_status(cop0);
1414 * If MSA state is already live, it is undefined how it interacts with
1415 * FR=0 FPU state, and we don't want to hit reserved instruction
1416 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1417 * play it safe and save it first.
1419 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1420 * get called when guest CU1 is set, however we can't trust the guest
1421 * not to clobber the status register directly via the commpage.
1423 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1424 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1425 kvm_lose_fpu(vcpu);
1428 * Enable FPU for guest
1429 * We set FR and FRE according to guest context
1431 change_c0_status(ST0_CU1 | ST0_FR, sr);
1432 if (cpu_has_fre) {
1433 cfg5 = kvm_read_c0_guest_config5(cop0);
1434 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1436 enable_fpu_hazard();
1438 /* If guest FPU state not active, restore it now */
1439 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1440 __kvm_restore_fpu(&vcpu->arch);
1441 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1442 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1443 } else {
1444 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1447 preempt_enable();
1450 #ifdef CONFIG_CPU_HAS_MSA
1451 /* Enable MSA for guest and restore context */
1452 void kvm_own_msa(struct kvm_vcpu *vcpu)
1454 struct mips_coproc *cop0 = vcpu->arch.cop0;
1455 unsigned int sr, cfg5;
1457 preempt_disable();
1460 * Enable FPU if enabled in guest, since we're restoring FPU context
1461 * anyway. We set FR and FRE according to guest context.
1463 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1464 sr = kvm_read_c0_guest_status(cop0);
1467 * If FR=0 FPU state is already live, it is undefined how it
1468 * interacts with MSA state, so play it safe and save it first.
1470 if (!(sr & ST0_FR) &&
1471 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1472 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1473 kvm_lose_fpu(vcpu);
1475 change_c0_status(ST0_CU1 | ST0_FR, sr);
1476 if (sr & ST0_CU1 && cpu_has_fre) {
1477 cfg5 = kvm_read_c0_guest_config5(cop0);
1478 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1482 /* Enable MSA for guest */
1483 set_c0_config5(MIPS_CONF5_MSAEN);
1484 enable_fpu_hazard();
1486 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1487 case KVM_MIPS_AUX_FPU:
1489 * Guest FPU state already loaded, only restore upper MSA state
1491 __kvm_restore_msa_upper(&vcpu->arch);
1492 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1493 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1494 break;
1495 case 0:
1496 /* Neither FPU or MSA already active, restore full MSA state */
1497 __kvm_restore_msa(&vcpu->arch);
1498 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1499 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1500 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1501 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1502 KVM_TRACE_AUX_FPU_MSA);
1503 break;
1504 default:
1505 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1506 break;
1509 preempt_enable();
1511 #endif
1513 /* Drop FPU & MSA without saving it */
1514 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1516 preempt_disable();
1517 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1518 disable_msa();
1519 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1520 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1522 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1523 clear_c0_status(ST0_CU1 | ST0_FR);
1524 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1525 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1527 preempt_enable();
1530 /* Save and disable FPU & MSA */
1531 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1534 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1535 * is disabled in guest context (software), but the register state in
1536 * the hardware may still be in use.
1537 * This is why we explicitly re-enable the hardware before saving.
1540 preempt_disable();
1541 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1542 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1543 set_c0_config5(MIPS_CONF5_MSAEN);
1544 enable_fpu_hazard();
1547 __kvm_save_msa(&vcpu->arch);
1548 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1550 /* Disable MSA & FPU */
1551 disable_msa();
1552 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1553 clear_c0_status(ST0_CU1 | ST0_FR);
1554 disable_fpu_hazard();
1556 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1557 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1558 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1559 set_c0_status(ST0_CU1);
1560 enable_fpu_hazard();
1563 __kvm_save_fpu(&vcpu->arch);
1564 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1565 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1567 /* Disable FPU */
1568 clear_c0_status(ST0_CU1 | ST0_FR);
1569 disable_fpu_hazard();
1571 preempt_enable();
1575 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1576 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1577 * exception if cause bits are set in the value being written.
1579 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1580 unsigned long cmd, void *ptr)
1582 struct die_args *args = (struct die_args *)ptr;
1583 struct pt_regs *regs = args->regs;
1584 unsigned long pc;
1586 /* Only interested in FPE and MSAFPE */
1587 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1588 return NOTIFY_DONE;
1590 /* Return immediately if guest context isn't active */
1591 if (!(current->flags & PF_VCPU))
1592 return NOTIFY_DONE;
1594 /* Should never get here from user mode */
1595 BUG_ON(user_mode(regs));
1597 pc = instruction_pointer(regs);
1598 switch (cmd) {
1599 case DIE_FP:
1600 /* match 2nd instruction in __kvm_restore_fcsr */
1601 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1602 return NOTIFY_DONE;
1603 break;
1604 case DIE_MSAFP:
1605 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1606 if (!cpu_has_msa ||
1607 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1608 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1609 return NOTIFY_DONE;
1610 break;
1613 /* Move PC forward a little and continue executing */
1614 instruction_pointer(regs) += 4;
1616 return NOTIFY_STOP;
1619 static struct notifier_block kvm_mips_csr_die_notifier = {
1620 .notifier_call = kvm_mips_csr_die_notify,
1623 static int __init kvm_mips_init(void)
1625 int ret;
1627 if (cpu_has_mmid) {
1628 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1629 return -EOPNOTSUPP;
1632 ret = kvm_mips_entry_setup();
1633 if (ret)
1634 return ret;
1636 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1638 if (ret)
1639 return ret;
1641 register_die_notifier(&kvm_mips_csr_die_notifier);
1643 return 0;
1646 static void __exit kvm_mips_exit(void)
1648 kvm_exit();
1650 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1653 module_init(kvm_mips_init);
1654 module_exit(kvm_mips_exit);
1656 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);