2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp.h>
29 #include <linux/kvm_host.h>
30 #include <linux/kvm.h>
31 #include <linux/bitops.h>
32 #include <linux/hrtimer.h>
33 #include <linux/uaccess.h>
34 #include <linux/iommu.h>
35 #include <linux/intel-iommu.h>
37 #include <asm/pgtable.h>
38 #include <asm/gcc_intrin.h>
40 #include <asm/cacheflush.h>
41 #include <asm/div64.h>
44 #include <asm/sn/addrs.h>
45 #include <asm/sn/clksupport.h>
46 #include <asm/sn/shub_mmr.h>
55 static unsigned long kvm_vmm_base
;
56 static unsigned long kvm_vsa_base
;
57 static unsigned long kvm_vm_buffer
;
58 static unsigned long kvm_vm_buffer_size
;
59 unsigned long kvm_vmm_gp
;
61 static long vp_env_info
;
63 static struct kvm_vmm_info
*kvm_vmm_info
;
65 static DEFINE_PER_CPU(struct kvm_vcpu
*, last_vcpu
);
67 struct kvm_stats_debugfs_item debugfs_entries
[] = {
71 static unsigned long kvm_get_itc(struct kvm_vcpu
*vcpu
)
73 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
74 if (vcpu
->kvm
->arch
.is_sn2
)
78 return ia64_getreg(_IA64_REG_AR_ITC
);
81 static void kvm_flush_icache(unsigned long start
, unsigned long len
)
85 for (l
= 0; l
< (len
+ 32); l
+= 32)
86 ia64_fc((void *)(start
+ l
));
92 static void kvm_flush_tlb_all(void)
94 unsigned long i
, j
, count0
, count1
, stride0
, stride1
, addr
;
97 addr
= local_cpu_data
->ptce_base
;
98 count0
= local_cpu_data
->ptce_count
[0];
99 count1
= local_cpu_data
->ptce_count
[1];
100 stride0
= local_cpu_data
->ptce_stride
[0];
101 stride1
= local_cpu_data
->ptce_stride
[1];
103 local_irq_save(flags
);
104 for (i
= 0; i
< count0
; ++i
) {
105 for (j
= 0; j
< count1
; ++j
) {
111 local_irq_restore(flags
);
112 ia64_srlz_i(); /* srlz.i implies srlz.d */
115 long ia64_pal_vp_create(u64
*vpd
, u64
*host_iva
, u64
*opt_handler
)
117 struct ia64_pal_retval iprv
;
119 PAL_CALL_STK(iprv
, PAL_VP_CREATE
, (u64
)vpd
, (u64
)host_iva
,
125 static DEFINE_SPINLOCK(vp_lock
);
127 int kvm_arch_hardware_enable(void *garbage
)
132 unsigned long saved_psr
;
135 pte
= pte_val(mk_pte_phys(__pa(kvm_vmm_base
), PAGE_KERNEL
));
136 local_irq_save(saved_psr
);
137 slot
= ia64_itr_entry(0x3, KVM_VMM_BASE
, pte
, KVM_VMM_SHIFT
);
138 local_irq_restore(saved_psr
);
143 status
= ia64_pal_vp_init_env(kvm_vsa_base
?
144 VP_INIT_ENV
: VP_INIT_ENV_INITALIZE
,
145 __pa(kvm_vm_buffer
), KVM_VM_BUFFER_BASE
, &tmp_base
);
147 printk(KERN_WARNING
"kvm: Failed to Enable VT Support!!!!\n");
152 kvm_vsa_base
= tmp_base
;
153 printk(KERN_INFO
"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base
);
155 spin_unlock(&vp_lock
);
156 ia64_ptr_entry(0x3, slot
);
161 void kvm_arch_hardware_disable(void *garbage
)
167 unsigned long saved_psr
;
168 unsigned long host_iva
= ia64_getreg(_IA64_REG_CR_IVA
);
170 pte
= pte_val(mk_pte_phys(__pa(kvm_vmm_base
),
173 local_irq_save(saved_psr
);
174 slot
= ia64_itr_entry(0x3, KVM_VMM_BASE
, pte
, KVM_VMM_SHIFT
);
175 local_irq_restore(saved_psr
);
179 status
= ia64_pal_vp_exit_env(host_iva
);
181 printk(KERN_DEBUG
"kvm: Failed to disable VT support! :%ld\n",
183 ia64_ptr_entry(0x3, slot
);
186 void kvm_arch_check_processor_compat(void *rtn
)
191 int kvm_dev_ioctl_check_extension(long ext
)
197 case KVM_CAP_IRQCHIP
:
198 case KVM_CAP_MP_STATE
:
199 case KVM_CAP_IRQ_INJECT_STATUS
:
202 case KVM_CAP_COALESCED_MMIO
:
203 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
215 static int handle_vm_error(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
217 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
218 kvm_run
->hw
.hardware_exit_reason
= 1;
222 static int handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
224 struct kvm_mmio_req
*p
;
225 struct kvm_io_device
*mmio_dev
;
228 p
= kvm_get_vcpu_ioreq(vcpu
);
230 if ((p
->addr
& PAGE_MASK
) == IOAPIC_DEFAULT_BASE_ADDRESS
)
232 vcpu
->mmio_needed
= 1;
233 vcpu
->mmio_phys_addr
= kvm_run
->mmio
.phys_addr
= p
->addr
;
234 vcpu
->mmio_size
= kvm_run
->mmio
.len
= p
->size
;
235 vcpu
->mmio_is_write
= kvm_run
->mmio
.is_write
= !p
->dir
;
237 if (vcpu
->mmio_is_write
)
238 memcpy(vcpu
->mmio_data
, &p
->data
, p
->size
);
239 memcpy(kvm_run
->mmio
.data
, &p
->data
, p
->size
);
240 kvm_run
->exit_reason
= KVM_EXIT_MMIO
;
244 r
= kvm_io_bus_read(vcpu
->kvm
, KVM_MMIO_BUS
, p
->addr
,
247 r
= kvm_io_bus_write(vcpu
->kvm
, KVM_MMIO_BUS
, p
->addr
,
250 printk(KERN_ERR
"kvm: No iodevice found! addr:%lx\n", p
->addr
);
251 p
->state
= STATE_IORESP_READY
;
256 static int handle_pal_call(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
258 struct exit_ctl_data
*p
;
260 p
= kvm_get_exit_data(vcpu
);
262 if (p
->exit_reason
== EXIT_REASON_PAL_CALL
)
263 return kvm_pal_emul(vcpu
, kvm_run
);
265 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
266 kvm_run
->hw
.hardware_exit_reason
= 2;
271 static int handle_sal_call(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
273 struct exit_ctl_data
*p
;
275 p
= kvm_get_exit_data(vcpu
);
277 if (p
->exit_reason
== EXIT_REASON_SAL_CALL
) {
281 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
282 kvm_run
->hw
.hardware_exit_reason
= 3;
288 static int __apic_accept_irq(struct kvm_vcpu
*vcpu
, uint64_t vector
)
290 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
292 if (!test_and_set_bit(vector
, &vpd
->irr
[0])) {
293 vcpu
->arch
.irq_new_pending
= 1;
301 * offset: address offset to IPI space.
302 * value: deliver value.
304 static void vcpu_deliver_ipi(struct kvm_vcpu
*vcpu
, uint64_t dm
,
319 printk(KERN_ERR
"kvm: Unimplemented Deliver reserved IPI!\n");
322 __apic_accept_irq(vcpu
, vector
);
325 static struct kvm_vcpu
*lid_to_vcpu(struct kvm
*kvm
, unsigned long id
,
330 struct kvm_vcpu
*vcpu
;
332 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
333 lid
.val
= VCPU_LID(vcpu
);
334 if (lid
.id
== id
&& lid
.eid
== eid
)
341 static int handle_ipi(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
343 struct exit_ctl_data
*p
= kvm_get_exit_data(vcpu
);
344 struct kvm_vcpu
*target_vcpu
;
345 struct kvm_pt_regs
*regs
;
346 union ia64_ipi_a addr
= p
->u
.ipi_data
.addr
;
347 union ia64_ipi_d data
= p
->u
.ipi_data
.data
;
349 target_vcpu
= lid_to_vcpu(vcpu
->kvm
, addr
.id
, addr
.eid
);
351 return handle_vm_error(vcpu
, kvm_run
);
353 if (!target_vcpu
->arch
.launched
) {
354 regs
= vcpu_regs(target_vcpu
);
356 regs
->cr_iip
= vcpu
->kvm
->arch
.rdv_sal_data
.boot_ip
;
357 regs
->r1
= vcpu
->kvm
->arch
.rdv_sal_data
.boot_gp
;
359 target_vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
360 if (waitqueue_active(&target_vcpu
->wq
))
361 wake_up_interruptible(&target_vcpu
->wq
);
363 vcpu_deliver_ipi(target_vcpu
, data
.dm
, data
.vector
);
364 if (target_vcpu
!= vcpu
)
365 kvm_vcpu_kick(target_vcpu
);
372 struct kvm_ptc_g ptc_g_data
;
373 struct kvm_vcpu
*vcpu
;
376 static void vcpu_global_purge(void *info
)
378 struct call_data
*p
= (struct call_data
*)info
;
379 struct kvm_vcpu
*vcpu
= p
->vcpu
;
381 if (test_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
384 set_bit(KVM_REQ_PTC_G
, &vcpu
->requests
);
385 if (vcpu
->arch
.ptc_g_count
< MAX_PTC_G_NUM
) {
386 vcpu
->arch
.ptc_g_data
[vcpu
->arch
.ptc_g_count
++] =
389 clear_bit(KVM_REQ_PTC_G
, &vcpu
->requests
);
390 vcpu
->arch
.ptc_g_count
= 0;
391 set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
);
395 static int handle_global_purge(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
397 struct exit_ctl_data
*p
= kvm_get_exit_data(vcpu
);
398 struct kvm
*kvm
= vcpu
->kvm
;
399 struct call_data call_data
;
401 struct kvm_vcpu
*vcpui
;
403 call_data
.ptc_g_data
= p
->u
.ptc_g_data
;
405 kvm_for_each_vcpu(i
, vcpui
, kvm
) {
406 if (vcpui
->arch
.mp_state
== KVM_MP_STATE_UNINITIALIZED
||
410 if (waitqueue_active(&vcpui
->wq
))
411 wake_up_interruptible(&vcpui
->wq
);
413 if (vcpui
->cpu
!= -1) {
414 call_data
.vcpu
= vcpui
;
415 smp_call_function_single(vcpui
->cpu
,
416 vcpu_global_purge
, &call_data
, 1);
418 printk(KERN_WARNING
"kvm: Uninit vcpu received ipi!\n");
424 static int handle_switch_rr6(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
429 static int kvm_sn2_setup_mappings(struct kvm_vcpu
*vcpu
)
431 unsigned long pte
, rtc_phys_addr
, map_addr
;
434 map_addr
= KVM_VMM_BASE
+ (1UL << KVM_VMM_SHIFT
);
435 rtc_phys_addr
= LOCAL_MMR_OFFSET
| SH_RTC
;
436 pte
= pte_val(mk_pte_phys(rtc_phys_addr
, PAGE_KERNEL_UC
));
437 slot
= ia64_itr_entry(0x3, map_addr
, pte
, PAGE_SHIFT
);
438 vcpu
->arch
.sn_rtc_tr_slot
= slot
;
440 printk(KERN_ERR
"Mayday mayday! RTC mapping failed!\n");
446 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
)
451 unsigned long vcpu_now_itc
;
452 unsigned long expires
;
453 struct hrtimer
*p_ht
= &vcpu
->arch
.hlt_timer
;
454 unsigned long cyc_per_usec
= local_cpu_data
->cyc_per_usec
;
455 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
457 if (irqchip_in_kernel(vcpu
->kvm
)) {
459 vcpu_now_itc
= kvm_get_itc(vcpu
) + vcpu
->arch
.itc_offset
;
461 if (time_after(vcpu_now_itc
, vpd
->itm
)) {
462 vcpu
->arch
.timer_check
= 1;
465 itc_diff
= vpd
->itm
- vcpu_now_itc
;
467 itc_diff
= -itc_diff
;
469 expires
= div64_u64(itc_diff
, cyc_per_usec
);
470 kt
= ktime_set(0, 1000 * expires
);
472 vcpu
->arch
.ht_active
= 1;
473 hrtimer_start(p_ht
, kt
, HRTIMER_MODE_ABS
);
475 vcpu
->arch
.mp_state
= KVM_MP_STATE_HALTED
;
476 kvm_vcpu_block(vcpu
);
477 hrtimer_cancel(p_ht
);
478 vcpu
->arch
.ht_active
= 0;
480 if (test_and_clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
) ||
481 kvm_cpu_has_pending_timer(vcpu
))
482 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_HALTED
)
483 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
485 if (vcpu
->arch
.mp_state
!= KVM_MP_STATE_RUNNABLE
)
489 printk(KERN_ERR
"kvm: Unsupported userspace halt!");
494 static int handle_vm_shutdown(struct kvm_vcpu
*vcpu
,
495 struct kvm_run
*kvm_run
)
497 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
501 static int handle_external_interrupt(struct kvm_vcpu
*vcpu
,
502 struct kvm_run
*kvm_run
)
507 static int handle_vcpu_debug(struct kvm_vcpu
*vcpu
,
508 struct kvm_run
*kvm_run
)
510 printk("VMM: %s", vcpu
->arch
.log_buf
);
514 static int (*kvm_vti_exit_handlers
[])(struct kvm_vcpu
*vcpu
,
515 struct kvm_run
*kvm_run
) = {
516 [EXIT_REASON_VM_PANIC
] = handle_vm_error
,
517 [EXIT_REASON_MMIO_INSTRUCTION
] = handle_mmio
,
518 [EXIT_REASON_PAL_CALL
] = handle_pal_call
,
519 [EXIT_REASON_SAL_CALL
] = handle_sal_call
,
520 [EXIT_REASON_SWITCH_RR6
] = handle_switch_rr6
,
521 [EXIT_REASON_VM_DESTROY
] = handle_vm_shutdown
,
522 [EXIT_REASON_EXTERNAL_INTERRUPT
] = handle_external_interrupt
,
523 [EXIT_REASON_IPI
] = handle_ipi
,
524 [EXIT_REASON_PTC_G
] = handle_global_purge
,
525 [EXIT_REASON_DEBUG
] = handle_vcpu_debug
,
529 static const int kvm_vti_max_exit_handlers
=
530 sizeof(kvm_vti_exit_handlers
)/sizeof(*kvm_vti_exit_handlers
);
532 static uint32_t kvm_get_exit_reason(struct kvm_vcpu
*vcpu
)
534 struct exit_ctl_data
*p_exit_data
;
536 p_exit_data
= kvm_get_exit_data(vcpu
);
537 return p_exit_data
->exit_reason
;
541 * The guest has exited. See if we can fix it or if we need userspace
544 static int kvm_handle_exit(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
546 u32 exit_reason
= kvm_get_exit_reason(vcpu
);
547 vcpu
->arch
.last_exit
= exit_reason
;
549 if (exit_reason
< kvm_vti_max_exit_handlers
550 && kvm_vti_exit_handlers
[exit_reason
])
551 return kvm_vti_exit_handlers
[exit_reason
](vcpu
, kvm_run
);
553 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
554 kvm_run
->hw
.hardware_exit_reason
= exit_reason
;
559 static inline void vti_set_rr6(unsigned long rr6
)
561 ia64_set_rr(RR6
, rr6
);
565 static int kvm_insert_vmm_mapping(struct kvm_vcpu
*vcpu
)
568 struct kvm
*kvm
= vcpu
->kvm
;
571 /*Insert a pair of tr to map vmm*/
572 pte
= pte_val(mk_pte_phys(__pa(kvm_vmm_base
), PAGE_KERNEL
));
573 r
= ia64_itr_entry(0x3, KVM_VMM_BASE
, pte
, KVM_VMM_SHIFT
);
576 vcpu
->arch
.vmm_tr_slot
= r
;
577 /*Insert a pairt of tr to map data of vm*/
578 pte
= pte_val(mk_pte_phys(__pa(kvm
->arch
.vm_base
), PAGE_KERNEL
));
579 r
= ia64_itr_entry(0x3, KVM_VM_DATA_BASE
,
580 pte
, KVM_VM_DATA_SHIFT
);
583 vcpu
->arch
.vm_tr_slot
= r
;
585 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
586 if (kvm
->arch
.is_sn2
) {
587 r
= kvm_sn2_setup_mappings(vcpu
);
598 static void kvm_purge_vmm_mapping(struct kvm_vcpu
*vcpu
)
600 struct kvm
*kvm
= vcpu
->kvm
;
601 ia64_ptr_entry(0x3, vcpu
->arch
.vmm_tr_slot
);
602 ia64_ptr_entry(0x3, vcpu
->arch
.vm_tr_slot
);
603 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
604 if (kvm
->arch
.is_sn2
)
605 ia64_ptr_entry(0x3, vcpu
->arch
.sn_rtc_tr_slot
);
609 static int kvm_vcpu_pre_transition(struct kvm_vcpu
*vcpu
)
613 int cpu
= smp_processor_id();
615 if (vcpu
->arch
.last_run_cpu
!= cpu
||
616 per_cpu(last_vcpu
, cpu
) != vcpu
) {
617 per_cpu(last_vcpu
, cpu
) = vcpu
;
618 vcpu
->arch
.last_run_cpu
= cpu
;
622 vcpu
->arch
.host_rr6
= ia64_get_rr(RR6
);
623 vti_set_rr6(vcpu
->arch
.vmm_rr
);
625 r
= kvm_insert_vmm_mapping(vcpu
);
626 local_irq_restore(psr
);
630 static void kvm_vcpu_post_transition(struct kvm_vcpu
*vcpu
)
632 kvm_purge_vmm_mapping(vcpu
);
633 vti_set_rr6(vcpu
->arch
.host_rr6
);
636 static int __vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
638 union context
*host_ctx
, *guest_ctx
;
641 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
644 if (signal_pending(current
)) {
646 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
653 /*Get host and guest context with guest address space.*/
654 host_ctx
= kvm_get_host_context(vcpu
);
655 guest_ctx
= kvm_get_guest_context(vcpu
);
657 clear_bit(KVM_REQ_KICK
, &vcpu
->requests
);
659 r
= kvm_vcpu_pre_transition(vcpu
);
663 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
667 * Transition to the guest
669 kvm_vmm_info
->tramp_entry(host_ctx
, guest_ctx
);
671 kvm_vcpu_post_transition(vcpu
);
673 vcpu
->arch
.launched
= 1;
674 set_bit(KVM_REQ_KICK
, &vcpu
->requests
);
678 * We must have an instruction between local_irq_enable() and
679 * kvm_guest_exit(), so the timer interrupt isn't delayed by
680 * the interrupt shadow. The stat.exits increment will do nicely.
681 * But we need to prevent reordering, hence this barrier():
687 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
689 r
= kvm_handle_exit(kvm_run
, vcpu
);
697 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
700 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
709 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
713 static void kvm_set_mmio_data(struct kvm_vcpu
*vcpu
)
715 struct kvm_mmio_req
*p
= kvm_get_vcpu_ioreq(vcpu
);
717 if (!vcpu
->mmio_is_write
)
718 memcpy(&p
->data
, vcpu
->mmio_data
, 8);
719 p
->state
= STATE_IORESP_READY
;
722 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
729 if (vcpu
->sigset_active
)
730 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
732 if (unlikely(vcpu
->arch
.mp_state
== KVM_MP_STATE_UNINITIALIZED
)) {
733 kvm_vcpu_block(vcpu
);
734 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
739 if (vcpu
->mmio_needed
) {
740 memcpy(vcpu
->mmio_data
, kvm_run
->mmio
.data
, 8);
741 kvm_set_mmio_data(vcpu
);
742 vcpu
->mmio_read_completed
= 1;
743 vcpu
->mmio_needed
= 0;
745 r
= __vcpu_run(vcpu
, kvm_run
);
747 if (vcpu
->sigset_active
)
748 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
754 static struct kvm
*kvm_alloc_kvm(void)
760 BUG_ON(sizeof(struct kvm
) > KVM_VM_STRUCT_SIZE
);
762 vm_base
= __get_free_pages(GFP_KERNEL
, get_order(KVM_VM_DATA_SIZE
));
765 return ERR_PTR(-ENOMEM
);
767 memset((void *)vm_base
, 0, KVM_VM_DATA_SIZE
);
768 kvm
= (struct kvm
*)(vm_base
+
769 offsetof(struct kvm_vm_data
, kvm_vm_struct
));
770 kvm
->arch
.vm_base
= vm_base
;
771 printk(KERN_DEBUG
"kvm: vm's data area:0x%lx\n", vm_base
);
776 struct kvm_io_range
{
782 static const struct kvm_io_range io_ranges
[] = {
783 {VGA_IO_START
, VGA_IO_SIZE
, GPFN_FRAME_BUFFER
},
784 {MMIO_START
, MMIO_SIZE
, GPFN_LOW_MMIO
},
785 {LEGACY_IO_START
, LEGACY_IO_SIZE
, GPFN_LEGACY_IO
},
786 {IO_SAPIC_START
, IO_SAPIC_SIZE
, GPFN_IOSAPIC
},
787 {PIB_START
, PIB_SIZE
, GPFN_PIB
},
790 static void kvm_build_io_pmt(struct kvm
*kvm
)
794 /* Mark I/O ranges */
795 for (i
= 0; i
< (sizeof(io_ranges
) / sizeof(struct kvm_io_range
));
797 for (j
= io_ranges
[i
].start
;
798 j
< io_ranges
[i
].start
+ io_ranges
[i
].size
;
800 kvm_set_pmt_entry(kvm
, j
>> PAGE_SHIFT
,
801 io_ranges
[i
].type
, 0);
806 /*Use unused rids to virtualize guest rid.*/
807 #define GUEST_PHYSICAL_RR0 0x1739
808 #define GUEST_PHYSICAL_RR4 0x2739
809 #define VMM_INIT_RR 0x1660
811 static void kvm_init_vm(struct kvm
*kvm
)
815 kvm
->arch
.metaphysical_rr0
= GUEST_PHYSICAL_RR0
;
816 kvm
->arch
.metaphysical_rr4
= GUEST_PHYSICAL_RR4
;
817 kvm
->arch
.vmm_init_rr
= VMM_INIT_RR
;
820 *Fill P2M entries for MMIO/IO ranges
822 kvm_build_io_pmt(kvm
);
824 INIT_LIST_HEAD(&kvm
->arch
.assigned_dev_head
);
826 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
827 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID
, &kvm
->arch
.irq_sources_bitmap
);
830 struct kvm
*kvm_arch_create_vm(void)
832 struct kvm
*kvm
= kvm_alloc_kvm();
835 return ERR_PTR(-ENOMEM
);
837 kvm
->arch
.is_sn2
= ia64_platform_is("sn2");
845 static int kvm_vm_ioctl_get_irqchip(struct kvm
*kvm
,
846 struct kvm_irqchip
*chip
)
851 switch (chip
->chip_id
) {
852 case KVM_IRQCHIP_IOAPIC
:
853 r
= kvm_get_ioapic(kvm
, &chip
->chip
.ioapic
);
862 static int kvm_vm_ioctl_set_irqchip(struct kvm
*kvm
, struct kvm_irqchip
*chip
)
867 switch (chip
->chip_id
) {
868 case KVM_IRQCHIP_IOAPIC
:
869 r
= kvm_set_ioapic(kvm
, &chip
->chip
.ioapic
);
878 #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
880 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
882 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
887 for (i
= 0; i
< 16; i
++) {
888 vpd
->vgr
[i
] = regs
->vpd
.vgr
[i
];
889 vpd
->vbgr
[i
] = regs
->vpd
.vbgr
[i
];
891 for (i
= 0; i
< 128; i
++)
892 vpd
->vcr
[i
] = regs
->vpd
.vcr
[i
];
893 vpd
->vhpi
= regs
->vpd
.vhpi
;
894 vpd
->vnat
= regs
->vpd
.vnat
;
895 vpd
->vbnat
= regs
->vpd
.vbnat
;
896 vpd
->vpsr
= regs
->vpd
.vpsr
;
898 vpd
->vpr
= regs
->vpd
.vpr
;
900 memcpy(&vcpu
->arch
.guest
, ®s
->saved_guest
, sizeof(union context
));
902 RESTORE_REGS(mp_state
);
903 RESTORE_REGS(vmm_rr
);
904 memcpy(vcpu
->arch
.itrs
, regs
->itrs
, sizeof(struct thash_data
) * NITRS
);
905 memcpy(vcpu
->arch
.dtrs
, regs
->dtrs
, sizeof(struct thash_data
) * NDTRS
);
906 RESTORE_REGS(itr_regions
);
907 RESTORE_REGS(dtr_regions
);
908 RESTORE_REGS(tc_regions
);
909 RESTORE_REGS(irq_check
);
910 RESTORE_REGS(itc_check
);
911 RESTORE_REGS(timer_check
);
912 RESTORE_REGS(timer_pending
);
913 RESTORE_REGS(last_itc
);
914 for (i
= 0; i
< 8; i
++) {
915 vcpu
->arch
.vrr
[i
] = regs
->vrr
[i
];
916 vcpu
->arch
.ibr
[i
] = regs
->ibr
[i
];
917 vcpu
->arch
.dbr
[i
] = regs
->dbr
[i
];
919 for (i
= 0; i
< 4; i
++)
920 vcpu
->arch
.insvc
[i
] = regs
->insvc
[i
];
922 RESTORE_REGS(metaphysical_rr0
);
923 RESTORE_REGS(metaphysical_rr4
);
924 RESTORE_REGS(metaphysical_saved_rr0
);
925 RESTORE_REGS(metaphysical_saved_rr4
);
926 RESTORE_REGS(fp_psr
);
927 RESTORE_REGS(saved_gp
);
929 vcpu
->arch
.irq_new_pending
= 1;
930 vcpu
->arch
.itc_offset
= regs
->saved_itc
- kvm_get_itc(vcpu
);
931 set_bit(KVM_REQ_RESUME
, &vcpu
->requests
);
938 long kvm_arch_vm_ioctl(struct file
*filp
,
939 unsigned int ioctl
, unsigned long arg
)
941 struct kvm
*kvm
= filp
->private_data
;
942 void __user
*argp
= (void __user
*)arg
;
946 case KVM_SET_MEMORY_REGION
: {
947 struct kvm_memory_region kvm_mem
;
948 struct kvm_userspace_memory_region kvm_userspace_mem
;
951 if (copy_from_user(&kvm_mem
, argp
, sizeof kvm_mem
))
953 kvm_userspace_mem
.slot
= kvm_mem
.slot
;
954 kvm_userspace_mem
.flags
= kvm_mem
.flags
;
955 kvm_userspace_mem
.guest_phys_addr
=
956 kvm_mem
.guest_phys_addr
;
957 kvm_userspace_mem
.memory_size
= kvm_mem
.memory_size
;
958 r
= kvm_vm_ioctl_set_memory_region(kvm
,
959 &kvm_userspace_mem
, 0);
964 case KVM_CREATE_IRQCHIP
:
966 r
= kvm_ioapic_init(kvm
);
969 r
= kvm_setup_default_irq_routing(kvm
);
971 kvm_ioapic_destroy(kvm
);
975 case KVM_IRQ_LINE_STATUS
:
977 struct kvm_irq_level irq_event
;
980 if (copy_from_user(&irq_event
, argp
, sizeof irq_event
))
982 if (irqchip_in_kernel(kvm
)) {
984 status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
985 irq_event
.irq
, irq_event
.level
);
986 if (ioctl
== KVM_IRQ_LINE_STATUS
) {
987 irq_event
.status
= status
;
988 if (copy_to_user(argp
, &irq_event
,
996 case KVM_GET_IRQCHIP
: {
997 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
998 struct kvm_irqchip chip
;
1001 if (copy_from_user(&chip
, argp
, sizeof chip
))
1004 if (!irqchip_in_kernel(kvm
))
1006 r
= kvm_vm_ioctl_get_irqchip(kvm
, &chip
);
1010 if (copy_to_user(argp
, &chip
, sizeof chip
))
1015 case KVM_SET_IRQCHIP
: {
1016 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1017 struct kvm_irqchip chip
;
1020 if (copy_from_user(&chip
, argp
, sizeof chip
))
1023 if (!irqchip_in_kernel(kvm
))
1025 r
= kvm_vm_ioctl_set_irqchip(kvm
, &chip
);
1038 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1039 struct kvm_sregs
*sregs
)
1044 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1045 struct kvm_sregs
*sregs
)
1050 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1051 struct kvm_translation
*tr
)
1057 static int kvm_alloc_vmm_area(void)
1059 if (!kvm_vmm_base
&& (kvm_vm_buffer_size
< KVM_VM_BUFFER_SIZE
)) {
1060 kvm_vmm_base
= __get_free_pages(GFP_KERNEL
,
1061 get_order(KVM_VMM_SIZE
));
1065 memset((void *)kvm_vmm_base
, 0, KVM_VMM_SIZE
);
1066 kvm_vm_buffer
= kvm_vmm_base
+ VMM_SIZE
;
1068 printk(KERN_DEBUG
"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1069 kvm_vmm_base
, kvm_vm_buffer
);
1075 static void kvm_free_vmm_area(void)
1078 /*Zero this area before free to avoid bits leak!!*/
1079 memset((void *)kvm_vmm_base
, 0, KVM_VMM_SIZE
);
1080 free_pages(kvm_vmm_base
, get_order(KVM_VMM_SIZE
));
1087 static int vti_init_vpd(struct kvm_vcpu
*vcpu
)
1090 union cpuid3_t cpuid3
;
1091 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1094 return PTR_ERR(vpd
);
1097 for (i
= 0; i
< 5; i
++)
1098 vpd
->vcpuid
[i
] = ia64_get_cpuid(i
);
1100 /* Limit the CPUID number to 5 */
1101 cpuid3
.value
= vpd
->vcpuid
[3];
1102 cpuid3
.number
= 4; /* 5 - 1 */
1103 vpd
->vcpuid
[3] = cpuid3
.value
;
1105 /*Set vac and vdc fields*/
1106 vpd
->vac
.a_from_int_cr
= 1;
1107 vpd
->vac
.a_to_int_cr
= 1;
1108 vpd
->vac
.a_from_psr
= 1;
1109 vpd
->vac
.a_from_cpuid
= 1;
1110 vpd
->vac
.a_cover
= 1;
1113 vpd
->vdc
.d_vmsw
= 1;
1115 /*Set virtual buffer*/
1116 vpd
->virt_env_vaddr
= KVM_VM_BUFFER_BASE
;
1121 static int vti_create_vp(struct kvm_vcpu
*vcpu
)
1124 struct vpd
*vpd
= vcpu
->arch
.vpd
;
1125 unsigned long vmm_ivt
;
1127 vmm_ivt
= kvm_vmm_info
->vmm_ivt
;
1129 printk(KERN_DEBUG
"kvm: vcpu:%p,ivt: 0x%lx\n", vcpu
, vmm_ivt
);
1131 ret
= ia64_pal_vp_create((u64
*)vpd
, (u64
*)vmm_ivt
, 0);
1134 printk(KERN_ERR
"kvm: ia64_pal_vp_create failed!\n");
1140 static void init_ptce_info(struct kvm_vcpu
*vcpu
)
1142 ia64_ptce_info_t ptce
= {0};
1144 ia64_get_ptce(&ptce
);
1145 vcpu
->arch
.ptce_base
= ptce
.base
;
1146 vcpu
->arch
.ptce_count
[0] = ptce
.count
[0];
1147 vcpu
->arch
.ptce_count
[1] = ptce
.count
[1];
1148 vcpu
->arch
.ptce_stride
[0] = ptce
.stride
[0];
1149 vcpu
->arch
.ptce_stride
[1] = ptce
.stride
[1];
1152 static void kvm_migrate_hlt_timer(struct kvm_vcpu
*vcpu
)
1154 struct hrtimer
*p_ht
= &vcpu
->arch
.hlt_timer
;
1156 if (hrtimer_cancel(p_ht
))
1157 hrtimer_start_expires(p_ht
, HRTIMER_MODE_ABS
);
1160 static enum hrtimer_restart
hlt_timer_fn(struct hrtimer
*data
)
1162 struct kvm_vcpu
*vcpu
;
1163 wait_queue_head_t
*q
;
1165 vcpu
= container_of(data
, struct kvm_vcpu
, arch
.hlt_timer
);
1168 if (vcpu
->arch
.mp_state
!= KVM_MP_STATE_HALTED
)
1171 if (waitqueue_active(q
))
1172 wake_up_interruptible(q
);
1175 vcpu
->arch
.timer_fired
= 1;
1176 vcpu
->arch
.timer_check
= 1;
1177 return HRTIMER_NORESTART
;
1180 #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1182 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1188 struct kvm
*kvm
= vcpu
->kvm
;
1189 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1191 union context
*p_ctx
= &vcpu
->arch
.guest
;
1192 struct kvm_vcpu
*vmm_vcpu
= to_guest(vcpu
->kvm
, vcpu
);
1194 /*Init vcpu context for first run.*/
1195 if (IS_ERR(vmm_vcpu
))
1196 return PTR_ERR(vmm_vcpu
);
1198 if (kvm_vcpu_is_bsp(vcpu
)) {
1199 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
1201 /*Set entry address for first run.*/
1202 regs
->cr_iip
= PALE_RESET_ENTRY
;
1204 /*Initialize itc offset for vcpus*/
1205 itc_offset
= 0UL - kvm_get_itc(vcpu
);
1206 for (i
= 0; i
< KVM_MAX_VCPUS
; i
++) {
1207 v
= (struct kvm_vcpu
*)((char *)vcpu
+
1208 sizeof(struct kvm_vcpu_data
) * i
);
1209 v
->arch
.itc_offset
= itc_offset
;
1210 v
->arch
.last_itc
= 0;
1213 vcpu
->arch
.mp_state
= KVM_MP_STATE_UNINITIALIZED
;
1216 vcpu
->arch
.apic
= kzalloc(sizeof(struct kvm_lapic
), GFP_KERNEL
);
1217 if (!vcpu
->arch
.apic
)
1219 vcpu
->arch
.apic
->vcpu
= vcpu
;
1222 p_ctx
->gr
[12] = (unsigned long)((char *)vmm_vcpu
+ KVM_STK_OFFSET
);
1223 p_ctx
->gr
[13] = (unsigned long)vmm_vcpu
;
1224 p_ctx
->psr
= 0x1008522000UL
;
1225 p_ctx
->ar
[40] = FPSR_DEFAULT
; /*fpsr*/
1226 p_ctx
->caller_unat
= 0;
1228 p_ctx
->ar
[36] = 0x0; /*unat*/
1229 p_ctx
->ar
[19] = 0x0; /*rnat*/
1230 p_ctx
->ar
[18] = (unsigned long)vmm_vcpu
+
1231 ((sizeof(struct kvm_vcpu
)+15) & ~15);
1232 p_ctx
->ar
[64] = 0x0; /*pfs*/
1233 p_ctx
->cr
[0] = 0x7e04UL
;
1234 p_ctx
->cr
[2] = (unsigned long)kvm_vmm_info
->vmm_ivt
;
1235 p_ctx
->cr
[8] = 0x3c;
1237 /*Initilize region register*/
1238 p_ctx
->rr
[0] = 0x30;
1239 p_ctx
->rr
[1] = 0x30;
1240 p_ctx
->rr
[2] = 0x30;
1241 p_ctx
->rr
[3] = 0x30;
1242 p_ctx
->rr
[4] = 0x30;
1243 p_ctx
->rr
[5] = 0x30;
1244 p_ctx
->rr
[7] = 0x30;
1246 /*Initilize branch register 0*/
1247 p_ctx
->br
[0] = *(unsigned long *)kvm_vmm_info
->vmm_entry
;
1249 vcpu
->arch
.vmm_rr
= kvm
->arch
.vmm_init_rr
;
1250 vcpu
->arch
.metaphysical_rr0
= kvm
->arch
.metaphysical_rr0
;
1251 vcpu
->arch
.metaphysical_rr4
= kvm
->arch
.metaphysical_rr4
;
1253 hrtimer_init(&vcpu
->arch
.hlt_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
1254 vcpu
->arch
.hlt_timer
.function
= hlt_timer_fn
;
1256 vcpu
->arch
.last_run_cpu
= -1;
1257 vcpu
->arch
.vpd
= (struct vpd
*)VPD_BASE(vcpu
->vcpu_id
);
1258 vcpu
->arch
.vsa_base
= kvm_vsa_base
;
1259 vcpu
->arch
.__gp
= kvm_vmm_gp
;
1260 vcpu
->arch
.dirty_log_lock_pa
= __pa(&kvm
->arch
.dirty_log_lock
);
1261 vcpu
->arch
.vhpt
.hash
= (struct thash_data
*)VHPT_BASE(vcpu
->vcpu_id
);
1262 vcpu
->arch
.vtlb
.hash
= (struct thash_data
*)VTLB_BASE(vcpu
->vcpu_id
);
1263 init_ptce_info(vcpu
);
1270 static int vti_vcpu_setup(struct kvm_vcpu
*vcpu
, int id
)
1275 local_irq_save(psr
);
1276 r
= kvm_insert_vmm_mapping(vcpu
);
1277 local_irq_restore(psr
);
1280 r
= kvm_vcpu_init(vcpu
, vcpu
->kvm
, id
);
1284 r
= vti_init_vpd(vcpu
);
1286 printk(KERN_DEBUG
"kvm: vpd init error!!\n");
1290 r
= vti_create_vp(vcpu
);
1294 kvm_purge_vmm_mapping(vcpu
);
1298 kvm_vcpu_uninit(vcpu
);
1303 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1306 struct kvm_vcpu
*vcpu
;
1307 unsigned long vm_base
= kvm
->arch
.vm_base
;
1311 BUG_ON(sizeof(struct kvm_vcpu
) > VCPU_STRUCT_SIZE
/2);
1314 if (id
>= KVM_MAX_VCPUS
) {
1315 printk(KERN_ERR
"kvm: Can't configure vcpus > %ld",
1322 printk(KERN_ERR
"kvm: Create vcpu[%d] error!\n", id
);
1325 vcpu
= (struct kvm_vcpu
*)(vm_base
+ offsetof(struct kvm_vm_data
,
1326 vcpu_data
[id
].vcpu_struct
));
1330 r
= vti_vcpu_setup(vcpu
, id
);
1334 printk(KERN_DEBUG
"kvm: vcpu_setup error!!\n");
1343 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1348 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1353 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1358 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
1359 struct kvm_guest_debug
*dbg
)
1364 static void free_kvm(struct kvm
*kvm
)
1366 unsigned long vm_base
= kvm
->arch
.vm_base
;
1369 memset((void *)vm_base
, 0, KVM_VM_DATA_SIZE
);
1370 free_pages(vm_base
, get_order(KVM_VM_DATA_SIZE
));
1375 static void kvm_release_vm_pages(struct kvm
*kvm
)
1377 struct kvm_memslots
*slots
;
1378 struct kvm_memory_slot
*memslot
;
1380 unsigned long base_gfn
;
1382 slots
= rcu_dereference(kvm
->memslots
);
1383 for (i
= 0; i
< slots
->nmemslots
; i
++) {
1384 memslot
= &slots
->memslots
[i
];
1385 base_gfn
= memslot
->base_gfn
;
1387 for (j
= 0; j
< memslot
->npages
; j
++) {
1388 if (memslot
->rmap
[j
])
1389 put_page((struct page
*)memslot
->rmap
[j
]);
1394 void kvm_arch_sync_events(struct kvm
*kvm
)
1398 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1400 kvm_iommu_unmap_guest(kvm
);
1401 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1402 kvm_free_all_assigned_devices(kvm
);
1404 kfree(kvm
->arch
.vioapic
);
1405 kvm_release_vm_pages(kvm
);
1406 kvm_free_physmem(kvm
);
1407 cleanup_srcu_struct(&kvm
->srcu
);
1411 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1415 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1417 if (cpu
!= vcpu
->cpu
) {
1419 if (vcpu
->arch
.ht_active
)
1420 kvm_migrate_hlt_timer(vcpu
);
1424 #define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1426 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1428 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1433 for (i
= 0; i
< 16; i
++) {
1434 regs
->vpd
.vgr
[i
] = vpd
->vgr
[i
];
1435 regs
->vpd
.vbgr
[i
] = vpd
->vbgr
[i
];
1437 for (i
= 0; i
< 128; i
++)
1438 regs
->vpd
.vcr
[i
] = vpd
->vcr
[i
];
1439 regs
->vpd
.vhpi
= vpd
->vhpi
;
1440 regs
->vpd
.vnat
= vpd
->vnat
;
1441 regs
->vpd
.vbnat
= vpd
->vbnat
;
1442 regs
->vpd
.vpsr
= vpd
->vpsr
;
1443 regs
->vpd
.vpr
= vpd
->vpr
;
1445 memcpy(®s
->saved_guest
, &vcpu
->arch
.guest
, sizeof(union context
));
1447 SAVE_REGS(mp_state
);
1449 memcpy(regs
->itrs
, vcpu
->arch
.itrs
, sizeof(struct thash_data
) * NITRS
);
1450 memcpy(regs
->dtrs
, vcpu
->arch
.dtrs
, sizeof(struct thash_data
) * NDTRS
);
1451 SAVE_REGS(itr_regions
);
1452 SAVE_REGS(dtr_regions
);
1453 SAVE_REGS(tc_regions
);
1454 SAVE_REGS(irq_check
);
1455 SAVE_REGS(itc_check
);
1456 SAVE_REGS(timer_check
);
1457 SAVE_REGS(timer_pending
);
1458 SAVE_REGS(last_itc
);
1459 for (i
= 0; i
< 8; i
++) {
1460 regs
->vrr
[i
] = vcpu
->arch
.vrr
[i
];
1461 regs
->ibr
[i
] = vcpu
->arch
.ibr
[i
];
1462 regs
->dbr
[i
] = vcpu
->arch
.dbr
[i
];
1464 for (i
= 0; i
< 4; i
++)
1465 regs
->insvc
[i
] = vcpu
->arch
.insvc
[i
];
1466 regs
->saved_itc
= vcpu
->arch
.itc_offset
+ kvm_get_itc(vcpu
);
1468 SAVE_REGS(metaphysical_rr0
);
1469 SAVE_REGS(metaphysical_rr4
);
1470 SAVE_REGS(metaphysical_saved_rr0
);
1471 SAVE_REGS(metaphysical_saved_rr4
);
1473 SAVE_REGS(saved_gp
);
1479 int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu
*vcpu
,
1480 struct kvm_ia64_vcpu_stack
*stack
)
1482 memcpy(stack
, vcpu
, sizeof(struct kvm_ia64_vcpu_stack
));
1486 int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu
*vcpu
,
1487 struct kvm_ia64_vcpu_stack
*stack
)
1489 memcpy(vcpu
+ 1, &stack
->stack
[0] + sizeof(struct kvm_vcpu
),
1490 sizeof(struct kvm_ia64_vcpu_stack
) - sizeof(struct kvm_vcpu
));
1492 vcpu
->arch
.exit_data
= ((struct kvm_vcpu
*)stack
)->arch
.exit_data
;
1496 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
1499 hrtimer_cancel(&vcpu
->arch
.hlt_timer
);
1500 kfree(vcpu
->arch
.apic
);
1504 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1505 unsigned int ioctl
, unsigned long arg
)
1507 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1508 void __user
*argp
= (void __user
*)arg
;
1509 struct kvm_ia64_vcpu_stack
*stack
= NULL
;
1513 case KVM_IA64_VCPU_GET_STACK
: {
1514 struct kvm_ia64_vcpu_stack __user
*user_stack
;
1515 void __user
*first_p
= argp
;
1518 if (copy_from_user(&user_stack
, first_p
, sizeof(void *)))
1521 if (!access_ok(VERIFY_WRITE
, user_stack
,
1522 sizeof(struct kvm_ia64_vcpu_stack
))) {
1523 printk(KERN_INFO
"KVM_IA64_VCPU_GET_STACK: "
1524 "Illegal user destination address for stack\n");
1527 stack
= kzalloc(sizeof(struct kvm_ia64_vcpu_stack
), GFP_KERNEL
);
1533 r
= kvm_arch_vcpu_ioctl_get_stack(vcpu
, stack
);
1537 if (copy_to_user(user_stack
, stack
,
1538 sizeof(struct kvm_ia64_vcpu_stack
)))
1543 case KVM_IA64_VCPU_SET_STACK
: {
1544 struct kvm_ia64_vcpu_stack __user
*user_stack
;
1545 void __user
*first_p
= argp
;
1548 if (copy_from_user(&user_stack
, first_p
, sizeof(void *)))
1551 if (!access_ok(VERIFY_READ
, user_stack
,
1552 sizeof(struct kvm_ia64_vcpu_stack
))) {
1553 printk(KERN_INFO
"KVM_IA64_VCPU_SET_STACK: "
1554 "Illegal user address for stack\n");
1557 stack
= kmalloc(sizeof(struct kvm_ia64_vcpu_stack
), GFP_KERNEL
);
1562 if (copy_from_user(stack
, user_stack
,
1563 sizeof(struct kvm_ia64_vcpu_stack
)))
1566 r
= kvm_arch_vcpu_ioctl_set_stack(vcpu
, stack
);
1579 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
1580 struct kvm_memory_slot
*memslot
,
1581 struct kvm_memory_slot old
,
1582 struct kvm_userspace_memory_region
*mem
,
1587 int npages
= memslot
->npages
;
1588 unsigned long base_gfn
= memslot
->base_gfn
;
1590 if (base_gfn
+ npages
> (KVM_MAX_MEM_SIZE
>> PAGE_SHIFT
))
1593 for (i
= 0; i
< npages
; i
++) {
1594 pfn
= gfn_to_pfn(kvm
, base_gfn
+ i
);
1595 if (!kvm_is_mmio_pfn(pfn
)) {
1596 kvm_set_pmt_entry(kvm
, base_gfn
+ i
,
1598 _PAGE_AR_RWX
| _PAGE_MA_WB
);
1599 memslot
->rmap
[i
] = (unsigned long)pfn_to_page(pfn
);
1601 kvm_set_pmt_entry(kvm
, base_gfn
+ i
,
1602 GPFN_PHYS_MMIO
| (pfn
<< PAGE_SHIFT
),
1604 memslot
->rmap
[i
] = 0;
1611 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
1612 struct kvm_userspace_memory_region
*mem
,
1613 struct kvm_memory_slot old
,
1619 void kvm_arch_flush_shadow(struct kvm
*kvm
)
1621 kvm_flush_remote_tlbs(kvm
);
1624 long kvm_arch_dev_ioctl(struct file
*filp
,
1625 unsigned int ioctl
, unsigned long arg
)
1630 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1632 kvm_vcpu_uninit(vcpu
);
1635 static int vti_cpu_has_kvm_support(void)
1637 long avail
= 1, status
= 1, control
= 1;
1640 ret
= ia64_pal_proc_get_features(&avail
, &status
, &control
, 0);
1644 if (!(avail
& PAL_PROC_VM_BIT
))
1647 printk(KERN_DEBUG
"kvm: Hardware Supports VT\n");
1649 ret
= ia64_pal_vp_env_info(&kvm_vm_buffer_size
, &vp_env_info
);
1652 printk(KERN_DEBUG
"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size
);
1654 if (!(vp_env_info
& VP_OPCODE
)) {
1655 printk(KERN_WARNING
"kvm: No opcode ability on hardware, "
1656 "vm_env_info:0x%lx\n", vp_env_info
);
1666 * On SN2, the ITC isn't stable, so copy in fast path code to use the
1667 * SN2 RTC, replacing the ITC based default verion.
1669 static void kvm_patch_vmm(struct kvm_vmm_info
*vmm_info
,
1670 struct module
*module
)
1672 unsigned long new_ar
, new_ar_sn2
;
1673 unsigned long module_base
;
1675 if (!ia64_platform_is("sn2"))
1678 module_base
= (unsigned long)module
->module_core
;
1680 new_ar
= kvm_vmm_base
+ vmm_info
->patch_mov_ar
- module_base
;
1681 new_ar_sn2
= kvm_vmm_base
+ vmm_info
->patch_mov_ar_sn2
- module_base
;
1683 printk(KERN_INFO
"kvm: Patching ITC emulation to use SGI SN2 RTC "
1687 * Copy the SN2 version of mov_ar into place. They are both
1688 * the same size, so 6 bundles is sufficient (6 * 0x10).
1690 memcpy((void *)new_ar
, (void *)new_ar_sn2
, 0x60);
1693 static int kvm_relocate_vmm(struct kvm_vmm_info
*vmm_info
,
1694 struct module
*module
)
1696 unsigned long module_base
;
1697 unsigned long vmm_size
;
1699 unsigned long vmm_offset
, func_offset
, fdesc_offset
;
1700 struct fdesc
*p_fdesc
;
1704 if (!kvm_vmm_base
) {
1705 printk("kvm: kvm area hasn't been initilized yet!!\n");
1709 /*Calculate new position of relocated vmm module.*/
1710 module_base
= (unsigned long)module
->module_core
;
1711 vmm_size
= module
->core_size
;
1712 if (unlikely(vmm_size
> KVM_VMM_SIZE
))
1715 memcpy((void *)kvm_vmm_base
, (void *)module_base
, vmm_size
);
1716 kvm_patch_vmm(vmm_info
, module
);
1717 kvm_flush_icache(kvm_vmm_base
, vmm_size
);
1719 /*Recalculate kvm_vmm_info based on new VMM*/
1720 vmm_offset
= vmm_info
->vmm_ivt
- module_base
;
1721 kvm_vmm_info
->vmm_ivt
= KVM_VMM_BASE
+ vmm_offset
;
1722 printk(KERN_DEBUG
"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1723 kvm_vmm_info
->vmm_ivt
);
1725 fdesc_offset
= (unsigned long)vmm_info
->vmm_entry
- module_base
;
1726 kvm_vmm_info
->vmm_entry
= (kvm_vmm_entry
*)(KVM_VMM_BASE
+
1728 func_offset
= *(unsigned long *)vmm_info
->vmm_entry
- module_base
;
1729 p_fdesc
= (struct fdesc
*)(kvm_vmm_base
+ fdesc_offset
);
1730 p_fdesc
->ip
= KVM_VMM_BASE
+ func_offset
;
1731 p_fdesc
->gp
= KVM_VMM_BASE
+(p_fdesc
->gp
- module_base
);
1733 printk(KERN_DEBUG
"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1734 KVM_VMM_BASE
+func_offset
);
1736 fdesc_offset
= (unsigned long)vmm_info
->tramp_entry
- module_base
;
1737 kvm_vmm_info
->tramp_entry
= (kvm_tramp_entry
*)(KVM_VMM_BASE
+
1739 func_offset
= *(unsigned long *)vmm_info
->tramp_entry
- module_base
;
1740 p_fdesc
= (struct fdesc
*)(kvm_vmm_base
+ fdesc_offset
);
1741 p_fdesc
->ip
= KVM_VMM_BASE
+ func_offset
;
1742 p_fdesc
->gp
= KVM_VMM_BASE
+ (p_fdesc
->gp
- module_base
);
1744 kvm_vmm_gp
= p_fdesc
->gp
;
1746 printk(KERN_DEBUG
"kvm: Relocated VMM's Entry IP:%p\n",
1747 kvm_vmm_info
->vmm_entry
);
1748 printk(KERN_DEBUG
"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1749 KVM_VMM_BASE
+ func_offset
);
1754 int kvm_arch_init(void *opaque
)
1757 struct kvm_vmm_info
*vmm_info
= (struct kvm_vmm_info
*)opaque
;
1759 if (!vti_cpu_has_kvm_support()) {
1760 printk(KERN_ERR
"kvm: No Hardware Virtualization Support!\n");
1766 printk(KERN_ERR
"kvm: Already loaded VMM module!\n");
1772 kvm_vmm_info
= kzalloc(sizeof(struct kvm_vmm_info
), GFP_KERNEL
);
1776 if (kvm_alloc_vmm_area())
1779 r
= kvm_relocate_vmm(vmm_info
, vmm_info
->module
);
1786 kvm_free_vmm_area();
1788 kfree(kvm_vmm_info
);
1793 void kvm_arch_exit(void)
1795 kvm_free_vmm_area();
1796 kfree(kvm_vmm_info
);
1797 kvm_vmm_info
= NULL
;
1800 static int kvm_ia64_sync_dirty_log(struct kvm
*kvm
,
1801 struct kvm_dirty_log
*log
)
1803 struct kvm_memory_slot
*memslot
;
1807 unsigned long *dirty_bitmap
= (unsigned long *)(kvm
->arch
.vm_base
+
1808 offsetof(struct kvm_vm_data
, kvm_mem_dirty_log
));
1811 if (log
->slot
>= KVM_MEMORY_SLOTS
)
1814 memslot
= &kvm
->memslots
->memslots
[log
->slot
];
1816 if (!memslot
->dirty_bitmap
)
1819 n
= kvm_dirty_bitmap_bytes(memslot
);
1820 base
= memslot
->base_gfn
/ BITS_PER_LONG
;
1822 for (i
= 0; i
< n
/sizeof(long); ++i
) {
1823 memslot
->dirty_bitmap
[i
] = dirty_bitmap
[base
+ i
];
1824 dirty_bitmap
[base
+ i
] = 0;
1831 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
1832 struct kvm_dirty_log
*log
)
1836 struct kvm_memory_slot
*memslot
;
1839 mutex_lock(&kvm
->slots_lock
);
1840 spin_lock(&kvm
->arch
.dirty_log_lock
);
1842 r
= kvm_ia64_sync_dirty_log(kvm
, log
);
1846 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
1850 /* If nothing is dirty, don't bother messing with page tables. */
1852 kvm_flush_remote_tlbs(kvm
);
1853 memslot
= &kvm
->memslots
->memslots
[log
->slot
];
1854 n
= kvm_dirty_bitmap_bytes(memslot
);
1855 memset(memslot
->dirty_bitmap
, 0, n
);
1859 mutex_unlock(&kvm
->slots_lock
);
1860 spin_unlock(&kvm
->arch
.dirty_log_lock
);
1864 int kvm_arch_hardware_setup(void)
1869 void kvm_arch_hardware_unsetup(void)
1873 void kvm_vcpu_kick(struct kvm_vcpu
*vcpu
)
1876 int cpu
= vcpu
->cpu
;
1878 if (waitqueue_active(&vcpu
->wq
))
1879 wake_up_interruptible(&vcpu
->wq
);
1882 if (cpu
!= me
&& (unsigned) cpu
< nr_cpu_ids
&& cpu_online(cpu
))
1883 if (!test_and_set_bit(KVM_REQ_KICK
, &vcpu
->requests
))
1884 smp_send_reschedule(cpu
);
1888 int kvm_apic_set_irq(struct kvm_vcpu
*vcpu
, struct kvm_lapic_irq
*irq
)
1890 return __apic_accept_irq(vcpu
, irq
->vector
);
1893 int kvm_apic_match_physical_addr(struct kvm_lapic
*apic
, u16 dest
)
1895 return apic
->vcpu
->vcpu_id
== dest
;
1898 int kvm_apic_match_logical_addr(struct kvm_lapic
*apic
, u8 mda
)
1903 int kvm_apic_compare_prio(struct kvm_vcpu
*vcpu1
, struct kvm_vcpu
*vcpu2
)
1905 return vcpu1
->arch
.xtp
- vcpu2
->arch
.xtp
;
1908 int kvm_apic_match_dest(struct kvm_vcpu
*vcpu
, struct kvm_lapic
*source
,
1909 int short_hand
, int dest
, int dest_mode
)
1911 struct kvm_lapic
*target
= vcpu
->arch
.apic
;
1912 return (dest_mode
== 0) ?
1913 kvm_apic_match_physical_addr(target
, dest
) :
1914 kvm_apic_match_logical_addr(target
, dest
);
1917 static int find_highest_bits(int *dat
)
1922 /* loop for all 256 bits */
1923 for (i
= 7; i
>= 0 ; i
--) {
1927 return i
* 32 + bitnum
- 1;
1934 int kvm_highest_pending_irq(struct kvm_vcpu
*vcpu
)
1936 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1938 if (vpd
->irr
[0] & (1UL << NMI_VECTOR
))
1940 if (vpd
->irr
[0] & (1UL << ExtINT_VECTOR
))
1941 return ExtINT_VECTOR
;
1943 return find_highest_bits((int *)&vpd
->irr
[0]);
1946 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1948 return vcpu
->arch
.timer_fired
;
1951 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
1956 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1958 return (vcpu
->arch
.mp_state
== KVM_MP_STATE_RUNNABLE
) ||
1959 (kvm_highest_pending_irq(vcpu
) != -1);
1962 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1963 struct kvm_mp_state
*mp_state
)
1966 mp_state
->mp_state
= vcpu
->arch
.mp_state
;
1971 static int vcpu_reset(struct kvm_vcpu
*vcpu
)
1975 local_irq_save(psr
);
1976 r
= kvm_insert_vmm_mapping(vcpu
);
1977 local_irq_restore(psr
);
1981 vcpu
->arch
.launched
= 0;
1982 kvm_arch_vcpu_uninit(vcpu
);
1983 r
= kvm_arch_vcpu_init(vcpu
);
1987 kvm_purge_vmm_mapping(vcpu
);
1993 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1994 struct kvm_mp_state
*mp_state
)
1999 vcpu
->arch
.mp_state
= mp_state
->mp_state
;
2000 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_UNINITIALIZED
)
2001 r
= vcpu_reset(vcpu
);