2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
17 #include <linux/bootmem.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
22 #include <linux/kvm_host.h>
24 #include "kvm_mips_int.h"
25 #include "kvm_mips_comm.h"
27 #define CREATE_TRACE_POINTS
31 #define VECTORSPACING 0x100 /* for EI/VI mode */
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35 struct kvm_stats_debugfs_item debugfs_entries
[] = {
36 { "wait", VCPU_STAT(wait_exits
) },
37 { "cache", VCPU_STAT(cache_exits
) },
38 { "signal", VCPU_STAT(signal_exits
) },
39 { "interrupt", VCPU_STAT(int_exits
) },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits
) },
41 { "tlbmod", VCPU_STAT(tlbmod_exits
) },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits
) },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits
) },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits
) },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits
) },
46 { "syscall", VCPU_STAT(syscall_exits
) },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits
) },
48 { "break_inst", VCPU_STAT(break_inst_exits
) },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits
) },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
54 static int kvm_mips_reset_vcpu(struct kvm_vcpu
*vcpu
)
57 for_each_possible_cpu(i
) {
58 vcpu
->arch
.guest_kernel_asid
[i
] = 0;
59 vcpu
->arch
.guest_user_asid
[i
] = 0;
64 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
69 /* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
70 * are "runnable" if interrupts are pending
72 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
74 return !!(vcpu
->arch
.pending_exceptions
);
77 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
82 int kvm_arch_hardware_enable(void *garbage
)
87 void kvm_arch_hardware_disable(void *garbage
)
91 int kvm_arch_hardware_setup(void)
96 void kvm_arch_hardware_unsetup(void)
100 void kvm_arch_check_processor_compat(void *rtn
)
107 static void kvm_mips_init_tlbs(struct kvm
*kvm
)
111 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
112 wired
= read_c0_wired();
113 write_c0_wired(wired
+ 1);
115 kvm
->arch
.commpage_tlb
= wired
;
117 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
118 kvm
->arch
.commpage_tlb
);
121 static void kvm_mips_init_vm_percpu(void *arg
)
123 struct kvm
*kvm
= (struct kvm
*)arg
;
125 kvm_mips_init_tlbs(kvm
);
126 kvm_mips_callbacks
->vm_init(kvm
);
130 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
132 if (atomic_inc_return(&kvm_mips_instance
) == 1) {
133 kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
135 on_each_cpu(kvm_mips_init_vm_percpu
, kvm
, 1);
142 void kvm_mips_free_vcpus(struct kvm
*kvm
)
145 struct kvm_vcpu
*vcpu
;
147 /* Put the pages we reserved for the guest pmap */
148 for (i
= 0; i
< kvm
->arch
.guest_pmap_npages
; i
++) {
149 if (kvm
->arch
.guest_pmap
[i
] != KVM_INVALID_PAGE
)
150 kvm_mips_release_pfn_clean(kvm
->arch
.guest_pmap
[i
]);
153 if (kvm
->arch
.guest_pmap
)
154 kfree(kvm
->arch
.guest_pmap
);
156 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
157 kvm_arch_vcpu_free(vcpu
);
160 mutex_lock(&kvm
->lock
);
162 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
163 kvm
->vcpus
[i
] = NULL
;
165 atomic_set(&kvm
->online_vcpus
, 0);
167 mutex_unlock(&kvm
->lock
);
170 void kvm_arch_sync_events(struct kvm
*kvm
)
174 static void kvm_mips_uninit_tlbs(void *arg
)
176 /* Restore wired count */
179 /* Clear out all the TLBs */
180 kvm_local_flush_tlb_all();
183 void kvm_arch_destroy_vm(struct kvm
*kvm
)
185 kvm_mips_free_vcpus(kvm
);
187 /* If this is the last instance, restore wired count */
188 if (atomic_dec_return(&kvm_mips_instance
) == 0) {
189 kvm_info("%s: last KVM instance, restoring TLB parameters\n",
191 on_each_cpu(kvm_mips_uninit_tlbs
, NULL
, 1);
196 kvm_arch_dev_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
201 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
202 struct kvm_memory_slot
*dont
)
206 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
207 unsigned long npages
)
212 void kvm_arch_memslots_updated(struct kvm
*kvm
)
216 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
217 struct kvm_memory_slot
*memslot
,
218 struct kvm_userspace_memory_region
*mem
,
219 enum kvm_mr_change change
)
224 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
225 struct kvm_userspace_memory_region
*mem
,
226 const struct kvm_memory_slot
*old
,
227 enum kvm_mr_change change
)
229 unsigned long npages
= 0;
232 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
233 __func__
, kvm
, mem
->slot
, mem
->guest_phys_addr
,
234 mem
->memory_size
, mem
->userspace_addr
);
236 /* Setup Guest PMAP table */
237 if (!kvm
->arch
.guest_pmap
) {
239 npages
= mem
->memory_size
>> PAGE_SHIFT
;
242 kvm
->arch
.guest_pmap_npages
= npages
;
243 kvm
->arch
.guest_pmap
=
244 kzalloc(npages
* sizeof(unsigned long), GFP_KERNEL
);
246 if (!kvm
->arch
.guest_pmap
) {
247 kvm_err("Failed to allocate guest PMAP");
253 ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
254 npages
, kvm
->arch
.guest_pmap
);
256 /* Now setup the page table */
257 for (i
= 0; i
< npages
; i
++) {
258 kvm
->arch
.guest_pmap
[i
] = KVM_INVALID_PAGE
;
266 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
270 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
271 struct kvm_memory_slot
*slot
)
275 void kvm_arch_flush_shadow(struct kvm
*kvm
)
279 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
281 extern char mips32_exception
[], mips32_exceptionEnd
[];
282 extern char mips32_GuestException
[], mips32_GuestExceptionEnd
[];
283 int err
, size
, offset
;
287 struct kvm_vcpu
*vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
294 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
299 kvm_info("kvm @ %p: create cpu %d at %p\n", kvm
, id
, vcpu
);
301 /* Allocate space for host mode exception handlers that handle
304 if (cpu_has_veic
|| cpu_has_vint
) {
305 size
= 0x200 + VECTORSPACING
* 64;
310 /* Save Linux EBASE */
311 vcpu
->arch
.host_ebase
= (void *)read_c0_ebase();
313 gebase
= kzalloc(ALIGN(size
, PAGE_SIZE
), GFP_KERNEL
);
319 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
320 ALIGN(size
, PAGE_SIZE
), gebase
);
323 vcpu
->arch
.guest_ebase
= gebase
;
325 /* Copy L1 Guest Exception handler to correct offset */
327 /* TLB Refill, EXL = 0 */
328 memcpy(gebase
, mips32_exception
,
329 mips32_exceptionEnd
- mips32_exception
);
331 /* General Exception Entry point */
332 memcpy(gebase
+ 0x180, mips32_exception
,
333 mips32_exceptionEnd
- mips32_exception
);
335 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
336 for (i
= 0; i
< 8; i
++) {
337 kvm_debug("L1 Vectored handler @ %p\n",
338 gebase
+ 0x200 + (i
* VECTORSPACING
));
339 memcpy(gebase
+ 0x200 + (i
* VECTORSPACING
), mips32_exception
,
340 mips32_exceptionEnd
- mips32_exception
);
343 /* General handler, relocate to unmapped space for sanity's sake */
345 kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
347 mips32_GuestExceptionEnd
- mips32_GuestException
);
349 memcpy(gebase
+ offset
, mips32_GuestException
,
350 mips32_GuestExceptionEnd
- mips32_GuestException
);
352 /* Invalidate the icache for these ranges */
353 mips32_SyncICache((unsigned long) gebase
, ALIGN(size
, PAGE_SIZE
));
355 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
356 vcpu
->arch
.kseg0_commpage
= kzalloc(PAGE_SIZE
<< 1, GFP_KERNEL
);
358 if (!vcpu
->arch
.kseg0_commpage
) {
360 goto out_free_gebase
;
363 kvm_info("Allocated COMM page @ %p\n", vcpu
->arch
.kseg0_commpage
);
364 kvm_mips_commpage_init(vcpu
);
367 vcpu
->arch
.last_sched_cpu
= -1;
369 /* Start off the timer */
370 kvm_mips_emulate_count(vcpu
);
384 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
386 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
388 kvm_vcpu_uninit(vcpu
);
390 kvm_mips_dump_stats(vcpu
);
392 if (vcpu
->arch
.guest_ebase
)
393 kfree(vcpu
->arch
.guest_ebase
);
395 if (vcpu
->arch
.kseg0_commpage
)
396 kfree(vcpu
->arch
.kseg0_commpage
);
400 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
402 kvm_arch_vcpu_free(vcpu
);
406 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
407 struct kvm_guest_debug
*dbg
)
412 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
417 if (vcpu
->sigset_active
)
418 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
420 if (vcpu
->mmio_needed
) {
421 if (!vcpu
->mmio_is_write
)
422 kvm_mips_complete_mmio_load(vcpu
, run
);
423 vcpu
->mmio_needed
= 0;
426 /* Check if we have any exceptions/interrupts pending */
427 kvm_mips_deliver_interrupts(vcpu
,
428 kvm_read_c0_guest_cause(vcpu
->arch
.cop0
));
433 r
= __kvm_mips_vcpu_run(run
, vcpu
);
438 if (vcpu
->sigset_active
)
439 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
445 kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_mips_interrupt
*irq
)
447 int intr
= (int)irq
->irq
;
448 struct kvm_vcpu
*dvcpu
= NULL
;
450 if (intr
== 3 || intr
== -3 || intr
== 4 || intr
== -4)
451 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__
, irq
->cpu
,
457 dvcpu
= vcpu
->kvm
->vcpus
[irq
->cpu
];
459 if (intr
== 2 || intr
== 3 || intr
== 4) {
460 kvm_mips_callbacks
->queue_io_int(dvcpu
, irq
);
462 } else if (intr
== -2 || intr
== -3 || intr
== -4) {
463 kvm_mips_callbacks
->dequeue_io_int(dvcpu
, irq
);
465 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__
,
470 dvcpu
->arch
.wait
= 0;
472 if (waitqueue_active(&dvcpu
->wq
)) {
473 wake_up_interruptible(&dvcpu
->wq
);
480 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
481 struct kvm_mp_state
*mp_state
)
487 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
488 struct kvm_mp_state
*mp_state
)
493 #define MIPS_CP0_32(_R, _S) \
494 (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
496 #define MIPS_CP0_64(_R, _S) \
497 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
499 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
500 #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
501 #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
502 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
503 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
504 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
505 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
506 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
507 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
508 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
509 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
510 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
511 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
512 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
513 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
514 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
515 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
516 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
517 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
518 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
519 #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
520 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
521 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
523 static u64 kvm_mips_get_one_regs
[] = {
561 KVM_REG_MIPS_CP0_INDEX
,
562 KVM_REG_MIPS_CP0_CONTEXT
,
563 KVM_REG_MIPS_CP0_PAGEMASK
,
564 KVM_REG_MIPS_CP0_WIRED
,
565 KVM_REG_MIPS_CP0_BADVADDR
,
566 KVM_REG_MIPS_CP0_ENTRYHI
,
567 KVM_REG_MIPS_CP0_STATUS
,
568 KVM_REG_MIPS_CP0_CAUSE
,
569 /* EPC set via kvm_regs, et al. */
570 KVM_REG_MIPS_CP0_CONFIG
,
571 KVM_REG_MIPS_CP0_CONFIG1
,
572 KVM_REG_MIPS_CP0_CONFIG2
,
573 KVM_REG_MIPS_CP0_CONFIG3
,
574 KVM_REG_MIPS_CP0_CONFIG7
,
575 KVM_REG_MIPS_CP0_ERROREPC
578 static int kvm_mips_get_reg(struct kvm_vcpu
*vcpu
,
579 const struct kvm_one_reg
*reg
)
581 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
585 case KVM_REG_MIPS_R0
... KVM_REG_MIPS_R31
:
586 v
= (long)vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
];
588 case KVM_REG_MIPS_HI
:
589 v
= (long)vcpu
->arch
.hi
;
591 case KVM_REG_MIPS_LO
:
592 v
= (long)vcpu
->arch
.lo
;
594 case KVM_REG_MIPS_PC
:
595 v
= (long)vcpu
->arch
.pc
;
598 case KVM_REG_MIPS_CP0_INDEX
:
599 v
= (long)kvm_read_c0_guest_index(cop0
);
601 case KVM_REG_MIPS_CP0_CONTEXT
:
602 v
= (long)kvm_read_c0_guest_context(cop0
);
604 case KVM_REG_MIPS_CP0_PAGEMASK
:
605 v
= (long)kvm_read_c0_guest_pagemask(cop0
);
607 case KVM_REG_MIPS_CP0_WIRED
:
608 v
= (long)kvm_read_c0_guest_wired(cop0
);
610 case KVM_REG_MIPS_CP0_BADVADDR
:
611 v
= (long)kvm_read_c0_guest_badvaddr(cop0
);
613 case KVM_REG_MIPS_CP0_ENTRYHI
:
614 v
= (long)kvm_read_c0_guest_entryhi(cop0
);
616 case KVM_REG_MIPS_CP0_STATUS
:
617 v
= (long)kvm_read_c0_guest_status(cop0
);
619 case KVM_REG_MIPS_CP0_CAUSE
:
620 v
= (long)kvm_read_c0_guest_cause(cop0
);
622 case KVM_REG_MIPS_CP0_ERROREPC
:
623 v
= (long)kvm_read_c0_guest_errorepc(cop0
);
625 case KVM_REG_MIPS_CP0_CONFIG
:
626 v
= (long)kvm_read_c0_guest_config(cop0
);
628 case KVM_REG_MIPS_CP0_CONFIG1
:
629 v
= (long)kvm_read_c0_guest_config1(cop0
);
631 case KVM_REG_MIPS_CP0_CONFIG2
:
632 v
= (long)kvm_read_c0_guest_config2(cop0
);
634 case KVM_REG_MIPS_CP0_CONFIG3
:
635 v
= (long)kvm_read_c0_guest_config3(cop0
);
637 case KVM_REG_MIPS_CP0_CONFIG7
:
638 v
= (long)kvm_read_c0_guest_config7(cop0
);
643 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
644 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
645 return put_user(v
, uaddr64
);
646 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
647 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
649 return put_user(v32
, uaddr32
);
655 static int kvm_mips_set_reg(struct kvm_vcpu
*vcpu
,
656 const struct kvm_one_reg
*reg
)
658 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
661 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
662 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
664 if (get_user(v
, uaddr64
) != 0)
666 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
667 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
670 if (get_user(v32
, uaddr32
) != 0)
678 case KVM_REG_MIPS_R0
:
679 /* Silently ignore requests to set $0 */
681 case KVM_REG_MIPS_R1
... KVM_REG_MIPS_R31
:
682 vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
] = v
;
684 case KVM_REG_MIPS_HI
:
687 case KVM_REG_MIPS_LO
:
690 case KVM_REG_MIPS_PC
:
694 case KVM_REG_MIPS_CP0_INDEX
:
695 kvm_write_c0_guest_index(cop0
, v
);
697 case KVM_REG_MIPS_CP0_CONTEXT
:
698 kvm_write_c0_guest_context(cop0
, v
);
700 case KVM_REG_MIPS_CP0_PAGEMASK
:
701 kvm_write_c0_guest_pagemask(cop0
, v
);
703 case KVM_REG_MIPS_CP0_WIRED
:
704 kvm_write_c0_guest_wired(cop0
, v
);
706 case KVM_REG_MIPS_CP0_BADVADDR
:
707 kvm_write_c0_guest_badvaddr(cop0
, v
);
709 case KVM_REG_MIPS_CP0_ENTRYHI
:
710 kvm_write_c0_guest_entryhi(cop0
, v
);
712 case KVM_REG_MIPS_CP0_STATUS
:
713 kvm_write_c0_guest_status(cop0
, v
);
715 case KVM_REG_MIPS_CP0_CAUSE
:
716 kvm_write_c0_guest_cause(cop0
, v
);
718 case KVM_REG_MIPS_CP0_ERROREPC
:
719 kvm_write_c0_guest_errorepc(cop0
, v
);
728 kvm_arch_vcpu_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
730 struct kvm_vcpu
*vcpu
= filp
->private_data
;
731 void __user
*argp
= (void __user
*)arg
;
735 case KVM_SET_ONE_REG
:
736 case KVM_GET_ONE_REG
: {
737 struct kvm_one_reg reg
;
738 if (copy_from_user(®
, argp
, sizeof(reg
)))
740 if (ioctl
== KVM_SET_ONE_REG
)
741 return kvm_mips_set_reg(vcpu
, ®
);
743 return kvm_mips_get_reg(vcpu
, ®
);
745 case KVM_GET_REG_LIST
: {
746 struct kvm_reg_list __user
*user_list
= argp
;
747 u64 __user
*reg_dest
;
748 struct kvm_reg_list reg_list
;
751 if (copy_from_user(®_list
, user_list
, sizeof(reg_list
)))
754 reg_list
.n
= ARRAY_SIZE(kvm_mips_get_one_regs
);
755 if (copy_to_user(user_list
, ®_list
, sizeof(reg_list
)))
759 reg_dest
= user_list
->reg
;
760 if (copy_to_user(reg_dest
, kvm_mips_get_one_regs
,
761 sizeof(kvm_mips_get_one_regs
)))
766 /* Treat the NMI as a CPU reset */
767 r
= kvm_mips_reset_vcpu(vcpu
);
771 struct kvm_mips_interrupt irq
;
773 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
776 kvm_debug("[%d] %s: irq: %d\n", vcpu
->vcpu_id
, __func__
,
779 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
791 * Get (and clear) the dirty memory log for a memory slot.
793 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
795 struct kvm_memory_slot
*memslot
;
796 unsigned long ga
, ga_end
;
801 mutex_lock(&kvm
->slots_lock
);
803 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
807 /* If nothing is dirty, don't bother messing with page tables. */
809 memslot
= &kvm
->memslots
->memslots
[log
->slot
];
811 ga
= memslot
->base_gfn
<< PAGE_SHIFT
;
812 ga_end
= ga
+ (memslot
->npages
<< PAGE_SHIFT
);
814 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__
, ga
,
817 n
= kvm_dirty_bitmap_bytes(memslot
);
818 memset(memslot
->dirty_bitmap
, 0, n
);
823 mutex_unlock(&kvm
->slots_lock
);
828 long kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
840 int kvm_arch_init(void *opaque
)
844 if (kvm_mips_callbacks
) {
845 kvm_err("kvm: module already exists\n");
849 ret
= kvm_mips_emulation_init(&kvm_mips_callbacks
);
854 void kvm_arch_exit(void)
856 kvm_mips_callbacks
= NULL
;
860 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
866 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
871 int kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
876 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
881 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
886 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
888 return VM_FAULT_SIGBUS
;
891 int kvm_dev_ioctl_check_extension(long ext
)
896 case KVM_CAP_ONE_REG
:
899 case KVM_CAP_COALESCED_MMIO
:
900 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
909 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
911 return kvm_mips_pending_timer(vcpu
);
914 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu
*vcpu
)
917 struct mips_coproc
*cop0
;
922 printk("VCPU Register Dump:\n");
923 printk("\tpc = 0x%08lx\n", vcpu
->arch
.pc
);;
924 printk("\texceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
926 for (i
= 0; i
< 32; i
+= 4) {
927 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i
,
929 vcpu
->arch
.gprs
[i
+ 1],
930 vcpu
->arch
.gprs
[i
+ 2], vcpu
->arch
.gprs
[i
+ 3]);
932 printk("\thi: 0x%08lx\n", vcpu
->arch
.hi
);
933 printk("\tlo: 0x%08lx\n", vcpu
->arch
.lo
);
935 cop0
= vcpu
->arch
.cop0
;
936 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
937 kvm_read_c0_guest_status(cop0
), kvm_read_c0_guest_cause(cop0
));
939 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0
));
944 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
948 for (i
= 1; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
949 vcpu
->arch
.gprs
[i
] = regs
->gpr
[i
];
950 vcpu
->arch
.gprs
[0] = 0; /* zero is special, and cannot be set. */
951 vcpu
->arch
.hi
= regs
->hi
;
952 vcpu
->arch
.lo
= regs
->lo
;
953 vcpu
->arch
.pc
= regs
->pc
;
958 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
962 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
963 regs
->gpr
[i
] = vcpu
->arch
.gprs
[i
];
965 regs
->hi
= vcpu
->arch
.hi
;
966 regs
->lo
= vcpu
->arch
.lo
;
967 regs
->pc
= vcpu
->arch
.pc
;
972 void kvm_mips_comparecount_func(unsigned long data
)
974 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
976 kvm_mips_callbacks
->queue_timer_int(vcpu
);
979 if (waitqueue_active(&vcpu
->wq
)) {
980 wake_up_interruptible(&vcpu
->wq
);
985 * low level hrtimer wake routine.
987 enum hrtimer_restart
kvm_mips_comparecount_wakeup(struct hrtimer
*timer
)
989 struct kvm_vcpu
*vcpu
;
991 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.comparecount_timer
);
992 kvm_mips_comparecount_func((unsigned long) vcpu
);
993 hrtimer_forward_now(&vcpu
->arch
.comparecount_timer
,
994 ktime_set(0, MS_TO_NS(10)));
995 return HRTIMER_RESTART
;
998 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1000 kvm_mips_callbacks
->vcpu_init(vcpu
);
1001 hrtimer_init(&vcpu
->arch
.comparecount_timer
, CLOCK_MONOTONIC
,
1003 vcpu
->arch
.comparecount_timer
.function
= kvm_mips_comparecount_wakeup
;
1007 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
1013 kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
, struct kvm_translation
*tr
)
1018 /* Initial guest state */
1019 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1021 return kvm_mips_callbacks
->vcpu_setup(vcpu
);
1025 void kvm_mips_set_c0_status(void)
1027 uint32_t status
= read_c0_status();
1030 status
|= (ST0_CU1
);
1035 write_c0_status(status
);
1040 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1042 int kvm_mips_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1044 uint32_t cause
= vcpu
->arch
.host_cp0_cause
;
1045 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1046 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
1047 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
1048 enum emulation_result er
= EMULATE_DONE
;
1049 int ret
= RESUME_GUEST
;
1051 /* Set a default exit reason */
1052 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1053 run
->ready_for_interrupt_injection
= 1;
1055 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
1056 kvm_mips_set_c0_status();
1060 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1061 cause
, opc
, run
, vcpu
);
1063 /* Do a privilege check, if in UM most of these exit conditions end up
1064 * causing an exception to be delivered to the Guest Kernel
1066 er
= kvm_mips_check_privilege(cause
, opc
, run
, vcpu
);
1067 if (er
== EMULATE_PRIV_FAIL
) {
1069 } else if (er
== EMULATE_FAIL
) {
1070 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1077 kvm_debug("[%d]T_INT @ %p\n", vcpu
->vcpu_id
, opc
);
1079 ++vcpu
->stat
.int_exits
;
1080 trace_kvm_exit(vcpu
, INT_EXITS
);
1082 if (need_resched()) {
1089 case T_COP_UNUSABLE
:
1090 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc
);
1092 ++vcpu
->stat
.cop_unusable_exits
;
1093 trace_kvm_exit(vcpu
, COP_UNUSABLE_EXITS
);
1094 ret
= kvm_mips_callbacks
->handle_cop_unusable(vcpu
);
1095 /* XXXKYMA: Might need to return to user space */
1096 if (run
->exit_reason
== KVM_EXIT_IRQ_WINDOW_OPEN
) {
1102 ++vcpu
->stat
.tlbmod_exits
;
1103 trace_kvm_exit(vcpu
, TLBMOD_EXITS
);
1104 ret
= kvm_mips_callbacks
->handle_tlb_mod(vcpu
);
1109 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1110 cause
, kvm_read_c0_guest_status(vcpu
->arch
.cop0
), opc
,
1113 ++vcpu
->stat
.tlbmiss_st_exits
;
1114 trace_kvm_exit(vcpu
, TLBMISS_ST_EXITS
);
1115 ret
= kvm_mips_callbacks
->handle_tlb_st_miss(vcpu
);
1119 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1120 cause
, opc
, badvaddr
);
1122 ++vcpu
->stat
.tlbmiss_ld_exits
;
1123 trace_kvm_exit(vcpu
, TLBMISS_LD_EXITS
);
1124 ret
= kvm_mips_callbacks
->handle_tlb_ld_miss(vcpu
);
1128 ++vcpu
->stat
.addrerr_st_exits
;
1129 trace_kvm_exit(vcpu
, ADDRERR_ST_EXITS
);
1130 ret
= kvm_mips_callbacks
->handle_addr_err_st(vcpu
);
1134 ++vcpu
->stat
.addrerr_ld_exits
;
1135 trace_kvm_exit(vcpu
, ADDRERR_LD_EXITS
);
1136 ret
= kvm_mips_callbacks
->handle_addr_err_ld(vcpu
);
1140 ++vcpu
->stat
.syscall_exits
;
1141 trace_kvm_exit(vcpu
, SYSCALL_EXITS
);
1142 ret
= kvm_mips_callbacks
->handle_syscall(vcpu
);
1146 ++vcpu
->stat
.resvd_inst_exits
;
1147 trace_kvm_exit(vcpu
, RESVD_INST_EXITS
);
1148 ret
= kvm_mips_callbacks
->handle_res_inst(vcpu
);
1152 ++vcpu
->stat
.break_inst_exits
;
1153 trace_kvm_exit(vcpu
, BREAK_INST_EXITS
);
1154 ret
= kvm_mips_callbacks
->handle_break(vcpu
);
1159 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1160 exccode
, opc
, kvm_get_inst(opc
, vcpu
), badvaddr
,
1161 kvm_read_c0_guest_status(vcpu
->arch
.cop0
));
1162 kvm_arch_vcpu_dump_regs(vcpu
);
1163 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1170 local_irq_disable();
1172 if (er
== EMULATE_DONE
&& !(ret
& RESUME_HOST
))
1173 kvm_mips_deliver_interrupts(vcpu
, cause
);
1175 if (!(ret
& RESUME_HOST
)) {
1176 /* Only check for signals if not already exiting to userspace */
1177 if (signal_pending(current
)) {
1178 run
->exit_reason
= KVM_EXIT_INTR
;
1179 ret
= (-EINTR
<< 2) | RESUME_HOST
;
1180 ++vcpu
->stat
.signal_exits
;
1181 trace_kvm_exit(vcpu
, SIGNAL_EXITS
);
1188 int __init
kvm_mips_init(void)
1192 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1197 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
1198 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
1199 * to avoid the possibility of double faulting. The issue is that the TLB code
1200 * references routines that are part of the the KVM module,
1201 * which are only available once the module is loaded.
1203 kvm_mips_gfn_to_pfn
= gfn_to_pfn
;
1204 kvm_mips_release_pfn_clean
= kvm_release_pfn_clean
;
1205 kvm_mips_is_error_pfn
= is_error_pfn
;
1207 pr_info("KVM/MIPS Initialized\n");
1211 void __exit
kvm_mips_exit(void)
1215 kvm_mips_gfn_to_pfn
= NULL
;
1216 kvm_mips_release_pfn_clean
= NULL
;
1217 kvm_mips_is_error_pfn
= NULL
;
1219 pr_info("KVM/MIPS unloaded\n");
1222 module_init(kvm_mips_init
);
1223 module_exit(kvm_mips_exit
);
1225 EXPORT_TRACEPOINT_SYMBOL(kvm_exit
);