2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/gfp.h>
28 #include <linux/smp.h>
29 #include <linux/kvm_host.h>
30 #include <linux/kvm.h>
31 #include <linux/bitops.h>
32 #include <linux/hrtimer.h>
33 #include <linux/uaccess.h>
34 #include <linux/iommu.h>
35 #include <linux/intel-iommu.h>
37 #include <asm/pgtable.h>
38 #include <asm/gcc_intrin.h>
40 #include <asm/cacheflush.h>
41 #include <asm/div64.h>
44 #include <asm/sn/addrs.h>
45 #include <asm/sn/clksupport.h>
46 #include <asm/sn/shub_mmr.h>
55 static unsigned long kvm_vmm_base
;
56 static unsigned long kvm_vsa_base
;
57 static unsigned long kvm_vm_buffer
;
58 static unsigned long kvm_vm_buffer_size
;
59 unsigned long kvm_vmm_gp
;
61 static long vp_env_info
;
63 static struct kvm_vmm_info
*kvm_vmm_info
;
65 static DEFINE_PER_CPU(struct kvm_vcpu
*, last_vcpu
);
67 struct kvm_stats_debugfs_item debugfs_entries
[] = {
71 static unsigned long kvm_get_itc(struct kvm_vcpu
*vcpu
)
73 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
74 if (vcpu
->kvm
->arch
.is_sn2
)
78 return ia64_getreg(_IA64_REG_AR_ITC
);
81 static void kvm_flush_icache(unsigned long start
, unsigned long len
)
85 for (l
= 0; l
< (len
+ 32); l
+= 32)
86 ia64_fc((void *)(start
+ l
));
92 static void kvm_flush_tlb_all(void)
94 unsigned long i
, j
, count0
, count1
, stride0
, stride1
, addr
;
97 addr
= local_cpu_data
->ptce_base
;
98 count0
= local_cpu_data
->ptce_count
[0];
99 count1
= local_cpu_data
->ptce_count
[1];
100 stride0
= local_cpu_data
->ptce_stride
[0];
101 stride1
= local_cpu_data
->ptce_stride
[1];
103 local_irq_save(flags
);
104 for (i
= 0; i
< count0
; ++i
) {
105 for (j
= 0; j
< count1
; ++j
) {
111 local_irq_restore(flags
);
112 ia64_srlz_i(); /* srlz.i implies srlz.d */
115 long ia64_pal_vp_create(u64
*vpd
, u64
*host_iva
, u64
*opt_handler
)
117 struct ia64_pal_retval iprv
;
119 PAL_CALL_STK(iprv
, PAL_VP_CREATE
, (u64
)vpd
, (u64
)host_iva
,
125 static DEFINE_SPINLOCK(vp_lock
);
127 void kvm_arch_hardware_enable(void *garbage
)
132 unsigned long saved_psr
;
135 pte
= pte_val(mk_pte_phys(__pa(kvm_vmm_base
), PAGE_KERNEL
));
136 local_irq_save(saved_psr
);
137 slot
= ia64_itr_entry(0x3, KVM_VMM_BASE
, pte
, KVM_VMM_SHIFT
);
138 local_irq_restore(saved_psr
);
143 status
= ia64_pal_vp_init_env(kvm_vsa_base
?
144 VP_INIT_ENV
: VP_INIT_ENV_INITALIZE
,
145 __pa(kvm_vm_buffer
), KVM_VM_BUFFER_BASE
, &tmp_base
);
147 printk(KERN_WARNING
"kvm: Failed to Enable VT Support!!!!\n");
152 kvm_vsa_base
= tmp_base
;
153 printk(KERN_INFO
"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base
);
155 spin_unlock(&vp_lock
);
156 ia64_ptr_entry(0x3, slot
);
159 void kvm_arch_hardware_disable(void *garbage
)
165 unsigned long saved_psr
;
166 unsigned long host_iva
= ia64_getreg(_IA64_REG_CR_IVA
);
168 pte
= pte_val(mk_pte_phys(__pa(kvm_vmm_base
),
171 local_irq_save(saved_psr
);
172 slot
= ia64_itr_entry(0x3, KVM_VMM_BASE
, pte
, KVM_VMM_SHIFT
);
173 local_irq_restore(saved_psr
);
177 status
= ia64_pal_vp_exit_env(host_iva
);
179 printk(KERN_DEBUG
"kvm: Failed to disable VT support! :%ld\n",
181 ia64_ptr_entry(0x3, slot
);
184 void kvm_arch_check_processor_compat(void *rtn
)
189 int kvm_dev_ioctl_check_extension(long ext
)
195 case KVM_CAP_IRQCHIP
:
196 case KVM_CAP_MP_STATE
:
197 case KVM_CAP_IRQ_INJECT_STATUS
:
200 case KVM_CAP_COALESCED_MMIO
:
201 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
213 static int handle_vm_error(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
215 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
216 kvm_run
->hw
.hardware_exit_reason
= 1;
220 static int handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
222 struct kvm_mmio_req
*p
;
223 struct kvm_io_device
*mmio_dev
;
226 p
= kvm_get_vcpu_ioreq(vcpu
);
228 if ((p
->addr
& PAGE_MASK
) == IOAPIC_DEFAULT_BASE_ADDRESS
)
230 vcpu
->mmio_needed
= 1;
231 vcpu
->mmio_phys_addr
= kvm_run
->mmio
.phys_addr
= p
->addr
;
232 vcpu
->mmio_size
= kvm_run
->mmio
.len
= p
->size
;
233 vcpu
->mmio_is_write
= kvm_run
->mmio
.is_write
= !p
->dir
;
235 if (vcpu
->mmio_is_write
)
236 memcpy(vcpu
->mmio_data
, &p
->data
, p
->size
);
237 memcpy(kvm_run
->mmio
.data
, &p
->data
, p
->size
);
238 kvm_run
->exit_reason
= KVM_EXIT_MMIO
;
242 r
= kvm_io_bus_read(&vcpu
->kvm
->mmio_bus
, p
->addr
,
245 r
= kvm_io_bus_write(&vcpu
->kvm
->mmio_bus
, p
->addr
,
248 printk(KERN_ERR
"kvm: No iodevice found! addr:%lx\n", p
->addr
);
249 p
->state
= STATE_IORESP_READY
;
254 static int handle_pal_call(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
256 struct exit_ctl_data
*p
;
258 p
= kvm_get_exit_data(vcpu
);
260 if (p
->exit_reason
== EXIT_REASON_PAL_CALL
)
261 return kvm_pal_emul(vcpu
, kvm_run
);
263 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
264 kvm_run
->hw
.hardware_exit_reason
= 2;
269 static int handle_sal_call(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
271 struct exit_ctl_data
*p
;
273 p
= kvm_get_exit_data(vcpu
);
275 if (p
->exit_reason
== EXIT_REASON_SAL_CALL
) {
279 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
280 kvm_run
->hw
.hardware_exit_reason
= 3;
286 static int __apic_accept_irq(struct kvm_vcpu
*vcpu
, uint64_t vector
)
288 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
290 if (!test_and_set_bit(vector
, &vpd
->irr
[0])) {
291 vcpu
->arch
.irq_new_pending
= 1;
299 * offset: address offset to IPI space.
300 * value: deliver value.
302 static void vcpu_deliver_ipi(struct kvm_vcpu
*vcpu
, uint64_t dm
,
317 printk(KERN_ERR
"kvm: Unimplemented Deliver reserved IPI!\n");
320 __apic_accept_irq(vcpu
, vector
);
323 static struct kvm_vcpu
*lid_to_vcpu(struct kvm
*kvm
, unsigned long id
,
328 struct kvm_vcpu
*vcpu
;
330 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
331 lid
.val
= VCPU_LID(vcpu
);
332 if (lid
.id
== id
&& lid
.eid
== eid
)
339 static int handle_ipi(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
341 struct exit_ctl_data
*p
= kvm_get_exit_data(vcpu
);
342 struct kvm_vcpu
*target_vcpu
;
343 struct kvm_pt_regs
*regs
;
344 union ia64_ipi_a addr
= p
->u
.ipi_data
.addr
;
345 union ia64_ipi_d data
= p
->u
.ipi_data
.data
;
347 target_vcpu
= lid_to_vcpu(vcpu
->kvm
, addr
.id
, addr
.eid
);
349 return handle_vm_error(vcpu
, kvm_run
);
351 if (!target_vcpu
->arch
.launched
) {
352 regs
= vcpu_regs(target_vcpu
);
354 regs
->cr_iip
= vcpu
->kvm
->arch
.rdv_sal_data
.boot_ip
;
355 regs
->r1
= vcpu
->kvm
->arch
.rdv_sal_data
.boot_gp
;
357 target_vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
358 if (waitqueue_active(&target_vcpu
->wq
))
359 wake_up_interruptible(&target_vcpu
->wq
);
361 vcpu_deliver_ipi(target_vcpu
, data
.dm
, data
.vector
);
362 if (target_vcpu
!= vcpu
)
363 kvm_vcpu_kick(target_vcpu
);
370 struct kvm_ptc_g ptc_g_data
;
371 struct kvm_vcpu
*vcpu
;
374 static void vcpu_global_purge(void *info
)
376 struct call_data
*p
= (struct call_data
*)info
;
377 struct kvm_vcpu
*vcpu
= p
->vcpu
;
379 if (test_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
382 set_bit(KVM_REQ_PTC_G
, &vcpu
->requests
);
383 if (vcpu
->arch
.ptc_g_count
< MAX_PTC_G_NUM
) {
384 vcpu
->arch
.ptc_g_data
[vcpu
->arch
.ptc_g_count
++] =
387 clear_bit(KVM_REQ_PTC_G
, &vcpu
->requests
);
388 vcpu
->arch
.ptc_g_count
= 0;
389 set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
);
393 static int handle_global_purge(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
395 struct exit_ctl_data
*p
= kvm_get_exit_data(vcpu
);
396 struct kvm
*kvm
= vcpu
->kvm
;
397 struct call_data call_data
;
399 struct kvm_vcpu
*vcpui
;
401 call_data
.ptc_g_data
= p
->u
.ptc_g_data
;
403 kvm_for_each_vcpu(i
, vcpui
, kvm
) {
404 if (vcpui
->arch
.mp_state
== KVM_MP_STATE_UNINITIALIZED
||
408 if (waitqueue_active(&vcpui
->wq
))
409 wake_up_interruptible(&vcpui
->wq
);
411 if (vcpui
->cpu
!= -1) {
412 call_data
.vcpu
= vcpui
;
413 smp_call_function_single(vcpui
->cpu
,
414 vcpu_global_purge
, &call_data
, 1);
416 printk(KERN_WARNING
"kvm: Uninit vcpu received ipi!\n");
422 static int handle_switch_rr6(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
427 static int kvm_sn2_setup_mappings(struct kvm_vcpu
*vcpu
)
429 unsigned long pte
, rtc_phys_addr
, map_addr
;
432 map_addr
= KVM_VMM_BASE
+ (1UL << KVM_VMM_SHIFT
);
433 rtc_phys_addr
= LOCAL_MMR_OFFSET
| SH_RTC
;
434 pte
= pte_val(mk_pte_phys(rtc_phys_addr
, PAGE_KERNEL_UC
));
435 slot
= ia64_itr_entry(0x3, map_addr
, pte
, PAGE_SHIFT
);
436 vcpu
->arch
.sn_rtc_tr_slot
= slot
;
438 printk(KERN_ERR
"Mayday mayday! RTC mapping failed!\n");
444 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
)
449 unsigned long vcpu_now_itc
;
450 unsigned long expires
;
451 struct hrtimer
*p_ht
= &vcpu
->arch
.hlt_timer
;
452 unsigned long cyc_per_usec
= local_cpu_data
->cyc_per_usec
;
453 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
455 if (irqchip_in_kernel(vcpu
->kvm
)) {
457 vcpu_now_itc
= kvm_get_itc(vcpu
) + vcpu
->arch
.itc_offset
;
459 if (time_after(vcpu_now_itc
, vpd
->itm
)) {
460 vcpu
->arch
.timer_check
= 1;
463 itc_diff
= vpd
->itm
- vcpu_now_itc
;
465 itc_diff
= -itc_diff
;
467 expires
= div64_u64(itc_diff
, cyc_per_usec
);
468 kt
= ktime_set(0, 1000 * expires
);
470 vcpu
->arch
.ht_active
= 1;
471 hrtimer_start(p_ht
, kt
, HRTIMER_MODE_ABS
);
473 vcpu
->arch
.mp_state
= KVM_MP_STATE_HALTED
;
474 kvm_vcpu_block(vcpu
);
475 hrtimer_cancel(p_ht
);
476 vcpu
->arch
.ht_active
= 0;
478 if (test_and_clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
) ||
479 kvm_cpu_has_pending_timer(vcpu
))
480 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_HALTED
)
481 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
483 if (vcpu
->arch
.mp_state
!= KVM_MP_STATE_RUNNABLE
)
487 printk(KERN_ERR
"kvm: Unsupported userspace halt!");
492 static int handle_vm_shutdown(struct kvm_vcpu
*vcpu
,
493 struct kvm_run
*kvm_run
)
495 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
499 static int handle_external_interrupt(struct kvm_vcpu
*vcpu
,
500 struct kvm_run
*kvm_run
)
505 static int handle_vcpu_debug(struct kvm_vcpu
*vcpu
,
506 struct kvm_run
*kvm_run
)
508 printk("VMM: %s", vcpu
->arch
.log_buf
);
512 static int (*kvm_vti_exit_handlers
[])(struct kvm_vcpu
*vcpu
,
513 struct kvm_run
*kvm_run
) = {
514 [EXIT_REASON_VM_PANIC
] = handle_vm_error
,
515 [EXIT_REASON_MMIO_INSTRUCTION
] = handle_mmio
,
516 [EXIT_REASON_PAL_CALL
] = handle_pal_call
,
517 [EXIT_REASON_SAL_CALL
] = handle_sal_call
,
518 [EXIT_REASON_SWITCH_RR6
] = handle_switch_rr6
,
519 [EXIT_REASON_VM_DESTROY
] = handle_vm_shutdown
,
520 [EXIT_REASON_EXTERNAL_INTERRUPT
] = handle_external_interrupt
,
521 [EXIT_REASON_IPI
] = handle_ipi
,
522 [EXIT_REASON_PTC_G
] = handle_global_purge
,
523 [EXIT_REASON_DEBUG
] = handle_vcpu_debug
,
527 static const int kvm_vti_max_exit_handlers
=
528 sizeof(kvm_vti_exit_handlers
)/sizeof(*kvm_vti_exit_handlers
);
530 static uint32_t kvm_get_exit_reason(struct kvm_vcpu
*vcpu
)
532 struct exit_ctl_data
*p_exit_data
;
534 p_exit_data
= kvm_get_exit_data(vcpu
);
535 return p_exit_data
->exit_reason
;
539 * The guest has exited. See if we can fix it or if we need userspace
542 static int kvm_handle_exit(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
544 u32 exit_reason
= kvm_get_exit_reason(vcpu
);
545 vcpu
->arch
.last_exit
= exit_reason
;
547 if (exit_reason
< kvm_vti_max_exit_handlers
548 && kvm_vti_exit_handlers
[exit_reason
])
549 return kvm_vti_exit_handlers
[exit_reason
](vcpu
, kvm_run
);
551 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
552 kvm_run
->hw
.hardware_exit_reason
= exit_reason
;
557 static inline void vti_set_rr6(unsigned long rr6
)
559 ia64_set_rr(RR6
, rr6
);
563 static int kvm_insert_vmm_mapping(struct kvm_vcpu
*vcpu
)
566 struct kvm
*kvm
= vcpu
->kvm
;
569 /*Insert a pair of tr to map vmm*/
570 pte
= pte_val(mk_pte_phys(__pa(kvm_vmm_base
), PAGE_KERNEL
));
571 r
= ia64_itr_entry(0x3, KVM_VMM_BASE
, pte
, KVM_VMM_SHIFT
);
574 vcpu
->arch
.vmm_tr_slot
= r
;
575 /*Insert a pairt of tr to map data of vm*/
576 pte
= pte_val(mk_pte_phys(__pa(kvm
->arch
.vm_base
), PAGE_KERNEL
));
577 r
= ia64_itr_entry(0x3, KVM_VM_DATA_BASE
,
578 pte
, KVM_VM_DATA_SHIFT
);
581 vcpu
->arch
.vm_tr_slot
= r
;
583 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
584 if (kvm
->arch
.is_sn2
) {
585 r
= kvm_sn2_setup_mappings(vcpu
);
596 static void kvm_purge_vmm_mapping(struct kvm_vcpu
*vcpu
)
598 struct kvm
*kvm
= vcpu
->kvm
;
599 ia64_ptr_entry(0x3, vcpu
->arch
.vmm_tr_slot
);
600 ia64_ptr_entry(0x3, vcpu
->arch
.vm_tr_slot
);
601 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
602 if (kvm
->arch
.is_sn2
)
603 ia64_ptr_entry(0x3, vcpu
->arch
.sn_rtc_tr_slot
);
607 static int kvm_vcpu_pre_transition(struct kvm_vcpu
*vcpu
)
611 int cpu
= smp_processor_id();
613 if (vcpu
->arch
.last_run_cpu
!= cpu
||
614 per_cpu(last_vcpu
, cpu
) != vcpu
) {
615 per_cpu(last_vcpu
, cpu
) = vcpu
;
616 vcpu
->arch
.last_run_cpu
= cpu
;
620 vcpu
->arch
.host_rr6
= ia64_get_rr(RR6
);
621 vti_set_rr6(vcpu
->arch
.vmm_rr
);
623 r
= kvm_insert_vmm_mapping(vcpu
);
624 local_irq_restore(psr
);
628 static void kvm_vcpu_post_transition(struct kvm_vcpu
*vcpu
)
630 kvm_purge_vmm_mapping(vcpu
);
631 vti_set_rr6(vcpu
->arch
.host_rr6
);
634 static int __vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
636 union context
*host_ctx
, *guest_ctx
;
640 * down_read() may sleep and return with interrupts enabled
642 down_read(&vcpu
->kvm
->slots_lock
);
645 if (signal_pending(current
)) {
647 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
654 /*Get host and guest context with guest address space.*/
655 host_ctx
= kvm_get_host_context(vcpu
);
656 guest_ctx
= kvm_get_guest_context(vcpu
);
658 clear_bit(KVM_REQ_KICK
, &vcpu
->requests
);
660 r
= kvm_vcpu_pre_transition(vcpu
);
664 up_read(&vcpu
->kvm
->slots_lock
);
668 * Transition to the guest
670 kvm_vmm_info
->tramp_entry(host_ctx
, guest_ctx
);
672 kvm_vcpu_post_transition(vcpu
);
674 vcpu
->arch
.launched
= 1;
675 set_bit(KVM_REQ_KICK
, &vcpu
->requests
);
679 * We must have an instruction between local_irq_enable() and
680 * kvm_guest_exit(), so the timer interrupt isn't delayed by
681 * the interrupt shadow. The stat.exits increment will do nicely.
682 * But we need to prevent reordering, hence this barrier():
688 down_read(&vcpu
->kvm
->slots_lock
);
690 r
= kvm_handle_exit(kvm_run
, vcpu
);
698 up_read(&vcpu
->kvm
->slots_lock
);
701 down_read(&vcpu
->kvm
->slots_lock
);
710 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
714 static void kvm_set_mmio_data(struct kvm_vcpu
*vcpu
)
716 struct kvm_mmio_req
*p
= kvm_get_vcpu_ioreq(vcpu
);
718 if (!vcpu
->mmio_is_write
)
719 memcpy(&p
->data
, vcpu
->mmio_data
, 8);
720 p
->state
= STATE_IORESP_READY
;
723 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
730 if (vcpu
->sigset_active
)
731 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
733 if (unlikely(vcpu
->arch
.mp_state
== KVM_MP_STATE_UNINITIALIZED
)) {
734 kvm_vcpu_block(vcpu
);
735 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
740 if (vcpu
->mmio_needed
) {
741 memcpy(vcpu
->mmio_data
, kvm_run
->mmio
.data
, 8);
742 kvm_set_mmio_data(vcpu
);
743 vcpu
->mmio_read_completed
= 1;
744 vcpu
->mmio_needed
= 0;
746 r
= __vcpu_run(vcpu
, kvm_run
);
748 if (vcpu
->sigset_active
)
749 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
755 static struct kvm
*kvm_alloc_kvm(void)
761 BUG_ON(sizeof(struct kvm
) > KVM_VM_STRUCT_SIZE
);
763 vm_base
= __get_free_pages(GFP_KERNEL
, get_order(KVM_VM_DATA_SIZE
));
766 return ERR_PTR(-ENOMEM
);
768 memset((void *)vm_base
, 0, KVM_VM_DATA_SIZE
);
769 kvm
= (struct kvm
*)(vm_base
+
770 offsetof(struct kvm_vm_data
, kvm_vm_struct
));
771 kvm
->arch
.vm_base
= vm_base
;
772 printk(KERN_DEBUG
"kvm: vm's data area:0x%lx\n", vm_base
);
777 struct kvm_io_range
{
783 static const struct kvm_io_range io_ranges
[] = {
784 {VGA_IO_START
, VGA_IO_SIZE
, GPFN_FRAME_BUFFER
},
785 {MMIO_START
, MMIO_SIZE
, GPFN_LOW_MMIO
},
786 {LEGACY_IO_START
, LEGACY_IO_SIZE
, GPFN_LEGACY_IO
},
787 {IO_SAPIC_START
, IO_SAPIC_SIZE
, GPFN_IOSAPIC
},
788 {PIB_START
, PIB_SIZE
, GPFN_PIB
},
791 static void kvm_build_io_pmt(struct kvm
*kvm
)
795 /* Mark I/O ranges */
796 for (i
= 0; i
< (sizeof(io_ranges
) / sizeof(struct kvm_io_range
));
798 for (j
= io_ranges
[i
].start
;
799 j
< io_ranges
[i
].start
+ io_ranges
[i
].size
;
801 kvm_set_pmt_entry(kvm
, j
>> PAGE_SHIFT
,
802 io_ranges
[i
].type
, 0);
807 /*Use unused rids to virtualize guest rid.*/
808 #define GUEST_PHYSICAL_RR0 0x1739
809 #define GUEST_PHYSICAL_RR4 0x2739
810 #define VMM_INIT_RR 0x1660
812 static void kvm_init_vm(struct kvm
*kvm
)
816 kvm
->arch
.metaphysical_rr0
= GUEST_PHYSICAL_RR0
;
817 kvm
->arch
.metaphysical_rr4
= GUEST_PHYSICAL_RR4
;
818 kvm
->arch
.vmm_init_rr
= VMM_INIT_RR
;
821 *Fill P2M entries for MMIO/IO ranges
823 kvm_build_io_pmt(kvm
);
825 INIT_LIST_HEAD(&kvm
->arch
.assigned_dev_head
);
827 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
828 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID
, &kvm
->arch
.irq_sources_bitmap
);
831 struct kvm
*kvm_arch_create_vm(void)
833 struct kvm
*kvm
= kvm_alloc_kvm();
836 return ERR_PTR(-ENOMEM
);
838 kvm
->arch
.is_sn2
= ia64_platform_is("sn2");
846 static int kvm_vm_ioctl_get_irqchip(struct kvm
*kvm
,
847 struct kvm_irqchip
*chip
)
852 switch (chip
->chip_id
) {
853 case KVM_IRQCHIP_IOAPIC
:
854 memcpy(&chip
->chip
.ioapic
, ioapic_irqchip(kvm
),
855 sizeof(struct kvm_ioapic_state
));
864 static int kvm_vm_ioctl_set_irqchip(struct kvm
*kvm
, struct kvm_irqchip
*chip
)
869 switch (chip
->chip_id
) {
870 case KVM_IRQCHIP_IOAPIC
:
871 memcpy(ioapic_irqchip(kvm
),
873 sizeof(struct kvm_ioapic_state
));
882 #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
884 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
886 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
891 for (i
= 0; i
< 16; i
++) {
892 vpd
->vgr
[i
] = regs
->vpd
.vgr
[i
];
893 vpd
->vbgr
[i
] = regs
->vpd
.vbgr
[i
];
895 for (i
= 0; i
< 128; i
++)
896 vpd
->vcr
[i
] = regs
->vpd
.vcr
[i
];
897 vpd
->vhpi
= regs
->vpd
.vhpi
;
898 vpd
->vnat
= regs
->vpd
.vnat
;
899 vpd
->vbnat
= regs
->vpd
.vbnat
;
900 vpd
->vpsr
= regs
->vpd
.vpsr
;
902 vpd
->vpr
= regs
->vpd
.vpr
;
904 memcpy(&vcpu
->arch
.guest
, ®s
->saved_guest
, sizeof(union context
));
906 RESTORE_REGS(mp_state
);
907 RESTORE_REGS(vmm_rr
);
908 memcpy(vcpu
->arch
.itrs
, regs
->itrs
, sizeof(struct thash_data
) * NITRS
);
909 memcpy(vcpu
->arch
.dtrs
, regs
->dtrs
, sizeof(struct thash_data
) * NDTRS
);
910 RESTORE_REGS(itr_regions
);
911 RESTORE_REGS(dtr_regions
);
912 RESTORE_REGS(tc_regions
);
913 RESTORE_REGS(irq_check
);
914 RESTORE_REGS(itc_check
);
915 RESTORE_REGS(timer_check
);
916 RESTORE_REGS(timer_pending
);
917 RESTORE_REGS(last_itc
);
918 for (i
= 0; i
< 8; i
++) {
919 vcpu
->arch
.vrr
[i
] = regs
->vrr
[i
];
920 vcpu
->arch
.ibr
[i
] = regs
->ibr
[i
];
921 vcpu
->arch
.dbr
[i
] = regs
->dbr
[i
];
923 for (i
= 0; i
< 4; i
++)
924 vcpu
->arch
.insvc
[i
] = regs
->insvc
[i
];
926 RESTORE_REGS(metaphysical_rr0
);
927 RESTORE_REGS(metaphysical_rr4
);
928 RESTORE_REGS(metaphysical_saved_rr0
);
929 RESTORE_REGS(metaphysical_saved_rr4
);
930 RESTORE_REGS(fp_psr
);
931 RESTORE_REGS(saved_gp
);
933 vcpu
->arch
.irq_new_pending
= 1;
934 vcpu
->arch
.itc_offset
= regs
->saved_itc
- kvm_get_itc(vcpu
);
935 set_bit(KVM_REQ_RESUME
, &vcpu
->requests
);
942 long kvm_arch_vm_ioctl(struct file
*filp
,
943 unsigned int ioctl
, unsigned long arg
)
945 struct kvm
*kvm
= filp
->private_data
;
946 void __user
*argp
= (void __user
*)arg
;
950 case KVM_SET_MEMORY_REGION
: {
951 struct kvm_memory_region kvm_mem
;
952 struct kvm_userspace_memory_region kvm_userspace_mem
;
955 if (copy_from_user(&kvm_mem
, argp
, sizeof kvm_mem
))
957 kvm_userspace_mem
.slot
= kvm_mem
.slot
;
958 kvm_userspace_mem
.flags
= kvm_mem
.flags
;
959 kvm_userspace_mem
.guest_phys_addr
=
960 kvm_mem
.guest_phys_addr
;
961 kvm_userspace_mem
.memory_size
= kvm_mem
.memory_size
;
962 r
= kvm_vm_ioctl_set_memory_region(kvm
,
963 &kvm_userspace_mem
, 0);
968 case KVM_CREATE_IRQCHIP
:
970 r
= kvm_ioapic_init(kvm
);
973 r
= kvm_setup_default_irq_routing(kvm
);
975 kfree(kvm
->arch
.vioapic
);
979 case KVM_IRQ_LINE_STATUS
:
981 struct kvm_irq_level irq_event
;
984 if (copy_from_user(&irq_event
, argp
, sizeof irq_event
))
986 if (irqchip_in_kernel(kvm
)) {
988 mutex_lock(&kvm
->irq_lock
);
989 status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
990 irq_event
.irq
, irq_event
.level
);
991 mutex_unlock(&kvm
->irq_lock
);
992 if (ioctl
== KVM_IRQ_LINE_STATUS
) {
993 irq_event
.status
= status
;
994 if (copy_to_user(argp
, &irq_event
,
1002 case KVM_GET_IRQCHIP
: {
1003 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1004 struct kvm_irqchip chip
;
1007 if (copy_from_user(&chip
, argp
, sizeof chip
))
1010 if (!irqchip_in_kernel(kvm
))
1012 r
= kvm_vm_ioctl_get_irqchip(kvm
, &chip
);
1016 if (copy_to_user(argp
, &chip
, sizeof chip
))
1021 case KVM_SET_IRQCHIP
: {
1022 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1023 struct kvm_irqchip chip
;
1026 if (copy_from_user(&chip
, argp
, sizeof chip
))
1029 if (!irqchip_in_kernel(kvm
))
1031 r
= kvm_vm_ioctl_set_irqchip(kvm
, &chip
);
1044 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1045 struct kvm_sregs
*sregs
)
1050 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1051 struct kvm_sregs
*sregs
)
1056 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1057 struct kvm_translation
*tr
)
1063 static int kvm_alloc_vmm_area(void)
1065 if (!kvm_vmm_base
&& (kvm_vm_buffer_size
< KVM_VM_BUFFER_SIZE
)) {
1066 kvm_vmm_base
= __get_free_pages(GFP_KERNEL
,
1067 get_order(KVM_VMM_SIZE
));
1071 memset((void *)kvm_vmm_base
, 0, KVM_VMM_SIZE
);
1072 kvm_vm_buffer
= kvm_vmm_base
+ VMM_SIZE
;
1074 printk(KERN_DEBUG
"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1075 kvm_vmm_base
, kvm_vm_buffer
);
1081 static void kvm_free_vmm_area(void)
1084 /*Zero this area before free to avoid bits leak!!*/
1085 memset((void *)kvm_vmm_base
, 0, KVM_VMM_SIZE
);
1086 free_pages(kvm_vmm_base
, get_order(KVM_VMM_SIZE
));
1093 static int vti_init_vpd(struct kvm_vcpu
*vcpu
)
1096 union cpuid3_t cpuid3
;
1097 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1100 return PTR_ERR(vpd
);
1103 for (i
= 0; i
< 5; i
++)
1104 vpd
->vcpuid
[i
] = ia64_get_cpuid(i
);
1106 /* Limit the CPUID number to 5 */
1107 cpuid3
.value
= vpd
->vcpuid
[3];
1108 cpuid3
.number
= 4; /* 5 - 1 */
1109 vpd
->vcpuid
[3] = cpuid3
.value
;
1111 /*Set vac and vdc fields*/
1112 vpd
->vac
.a_from_int_cr
= 1;
1113 vpd
->vac
.a_to_int_cr
= 1;
1114 vpd
->vac
.a_from_psr
= 1;
1115 vpd
->vac
.a_from_cpuid
= 1;
1116 vpd
->vac
.a_cover
= 1;
1119 vpd
->vdc
.d_vmsw
= 1;
1121 /*Set virtual buffer*/
1122 vpd
->virt_env_vaddr
= KVM_VM_BUFFER_BASE
;
1127 static int vti_create_vp(struct kvm_vcpu
*vcpu
)
1130 struct vpd
*vpd
= vcpu
->arch
.vpd
;
1131 unsigned long vmm_ivt
;
1133 vmm_ivt
= kvm_vmm_info
->vmm_ivt
;
1135 printk(KERN_DEBUG
"kvm: vcpu:%p,ivt: 0x%lx\n", vcpu
, vmm_ivt
);
1137 ret
= ia64_pal_vp_create((u64
*)vpd
, (u64
*)vmm_ivt
, 0);
1140 printk(KERN_ERR
"kvm: ia64_pal_vp_create failed!\n");
1146 static void init_ptce_info(struct kvm_vcpu
*vcpu
)
1148 ia64_ptce_info_t ptce
= {0};
1150 ia64_get_ptce(&ptce
);
1151 vcpu
->arch
.ptce_base
= ptce
.base
;
1152 vcpu
->arch
.ptce_count
[0] = ptce
.count
[0];
1153 vcpu
->arch
.ptce_count
[1] = ptce
.count
[1];
1154 vcpu
->arch
.ptce_stride
[0] = ptce
.stride
[0];
1155 vcpu
->arch
.ptce_stride
[1] = ptce
.stride
[1];
1158 static void kvm_migrate_hlt_timer(struct kvm_vcpu
*vcpu
)
1160 struct hrtimer
*p_ht
= &vcpu
->arch
.hlt_timer
;
1162 if (hrtimer_cancel(p_ht
))
1163 hrtimer_start_expires(p_ht
, HRTIMER_MODE_ABS
);
1166 static enum hrtimer_restart
hlt_timer_fn(struct hrtimer
*data
)
1168 struct kvm_vcpu
*vcpu
;
1169 wait_queue_head_t
*q
;
1171 vcpu
= container_of(data
, struct kvm_vcpu
, arch
.hlt_timer
);
1174 if (vcpu
->arch
.mp_state
!= KVM_MP_STATE_HALTED
)
1177 if (waitqueue_active(q
))
1178 wake_up_interruptible(q
);
1181 vcpu
->arch
.timer_fired
= 1;
1182 vcpu
->arch
.timer_check
= 1;
1183 return HRTIMER_NORESTART
;
1186 #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1188 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1194 struct kvm
*kvm
= vcpu
->kvm
;
1195 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1197 union context
*p_ctx
= &vcpu
->arch
.guest
;
1198 struct kvm_vcpu
*vmm_vcpu
= to_guest(vcpu
->kvm
, vcpu
);
1200 /*Init vcpu context for first run.*/
1201 if (IS_ERR(vmm_vcpu
))
1202 return PTR_ERR(vmm_vcpu
);
1204 if (kvm_vcpu_is_bsp(vcpu
)) {
1205 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
1207 /*Set entry address for first run.*/
1208 regs
->cr_iip
= PALE_RESET_ENTRY
;
1210 /*Initialize itc offset for vcpus*/
1211 itc_offset
= 0UL - kvm_get_itc(vcpu
);
1212 for (i
= 0; i
< KVM_MAX_VCPUS
; i
++) {
1213 v
= (struct kvm_vcpu
*)((char *)vcpu
+
1214 sizeof(struct kvm_vcpu_data
) * i
);
1215 v
->arch
.itc_offset
= itc_offset
;
1216 v
->arch
.last_itc
= 0;
1219 vcpu
->arch
.mp_state
= KVM_MP_STATE_UNINITIALIZED
;
1222 vcpu
->arch
.apic
= kzalloc(sizeof(struct kvm_lapic
), GFP_KERNEL
);
1223 if (!vcpu
->arch
.apic
)
1225 vcpu
->arch
.apic
->vcpu
= vcpu
;
1228 p_ctx
->gr
[12] = (unsigned long)((char *)vmm_vcpu
+ KVM_STK_OFFSET
);
1229 p_ctx
->gr
[13] = (unsigned long)vmm_vcpu
;
1230 p_ctx
->psr
= 0x1008522000UL
;
1231 p_ctx
->ar
[40] = FPSR_DEFAULT
; /*fpsr*/
1232 p_ctx
->caller_unat
= 0;
1234 p_ctx
->ar
[36] = 0x0; /*unat*/
1235 p_ctx
->ar
[19] = 0x0; /*rnat*/
1236 p_ctx
->ar
[18] = (unsigned long)vmm_vcpu
+
1237 ((sizeof(struct kvm_vcpu
)+15) & ~15);
1238 p_ctx
->ar
[64] = 0x0; /*pfs*/
1239 p_ctx
->cr
[0] = 0x7e04UL
;
1240 p_ctx
->cr
[2] = (unsigned long)kvm_vmm_info
->vmm_ivt
;
1241 p_ctx
->cr
[8] = 0x3c;
1243 /*Initilize region register*/
1244 p_ctx
->rr
[0] = 0x30;
1245 p_ctx
->rr
[1] = 0x30;
1246 p_ctx
->rr
[2] = 0x30;
1247 p_ctx
->rr
[3] = 0x30;
1248 p_ctx
->rr
[4] = 0x30;
1249 p_ctx
->rr
[5] = 0x30;
1250 p_ctx
->rr
[7] = 0x30;
1252 /*Initilize branch register 0*/
1253 p_ctx
->br
[0] = *(unsigned long *)kvm_vmm_info
->vmm_entry
;
1255 vcpu
->arch
.vmm_rr
= kvm
->arch
.vmm_init_rr
;
1256 vcpu
->arch
.metaphysical_rr0
= kvm
->arch
.metaphysical_rr0
;
1257 vcpu
->arch
.metaphysical_rr4
= kvm
->arch
.metaphysical_rr4
;
1259 hrtimer_init(&vcpu
->arch
.hlt_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
1260 vcpu
->arch
.hlt_timer
.function
= hlt_timer_fn
;
1262 vcpu
->arch
.last_run_cpu
= -1;
1263 vcpu
->arch
.vpd
= (struct vpd
*)VPD_BASE(vcpu
->vcpu_id
);
1264 vcpu
->arch
.vsa_base
= kvm_vsa_base
;
1265 vcpu
->arch
.__gp
= kvm_vmm_gp
;
1266 vcpu
->arch
.dirty_log_lock_pa
= __pa(&kvm
->arch
.dirty_log_lock
);
1267 vcpu
->arch
.vhpt
.hash
= (struct thash_data
*)VHPT_BASE(vcpu
->vcpu_id
);
1268 vcpu
->arch
.vtlb
.hash
= (struct thash_data
*)VTLB_BASE(vcpu
->vcpu_id
);
1269 init_ptce_info(vcpu
);
1276 static int vti_vcpu_setup(struct kvm_vcpu
*vcpu
, int id
)
1281 local_irq_save(psr
);
1282 r
= kvm_insert_vmm_mapping(vcpu
);
1283 local_irq_restore(psr
);
1286 r
= kvm_vcpu_init(vcpu
, vcpu
->kvm
, id
);
1290 r
= vti_init_vpd(vcpu
);
1292 printk(KERN_DEBUG
"kvm: vpd init error!!\n");
1296 r
= vti_create_vp(vcpu
);
1300 kvm_purge_vmm_mapping(vcpu
);
1304 kvm_vcpu_uninit(vcpu
);
1309 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1312 struct kvm_vcpu
*vcpu
;
1313 unsigned long vm_base
= kvm
->arch
.vm_base
;
1317 BUG_ON(sizeof(struct kvm_vcpu
) > VCPU_STRUCT_SIZE
/2);
1320 if (id
>= KVM_MAX_VCPUS
) {
1321 printk(KERN_ERR
"kvm: Can't configure vcpus > %ld",
1328 printk(KERN_ERR
"kvm: Create vcpu[%d] error!\n", id
);
1331 vcpu
= (struct kvm_vcpu
*)(vm_base
+ offsetof(struct kvm_vm_data
,
1332 vcpu_data
[id
].vcpu_struct
));
1336 r
= vti_vcpu_setup(vcpu
, id
);
1340 printk(KERN_DEBUG
"kvm: vcpu_setup error!!\n");
1349 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1354 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1359 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1364 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
1365 struct kvm_guest_debug
*dbg
)
1370 static void free_kvm(struct kvm
*kvm
)
1372 unsigned long vm_base
= kvm
->arch
.vm_base
;
1375 memset((void *)vm_base
, 0, KVM_VM_DATA_SIZE
);
1376 free_pages(vm_base
, get_order(KVM_VM_DATA_SIZE
));
1381 static void kvm_release_vm_pages(struct kvm
*kvm
)
1383 struct kvm_memory_slot
*memslot
;
1385 unsigned long base_gfn
;
1387 for (i
= 0; i
< kvm
->nmemslots
; i
++) {
1388 memslot
= &kvm
->memslots
[i
];
1389 base_gfn
= memslot
->base_gfn
;
1391 for (j
= 0; j
< memslot
->npages
; j
++) {
1392 if (memslot
->rmap
[j
])
1393 put_page((struct page
*)memslot
->rmap
[j
]);
1398 void kvm_arch_sync_events(struct kvm
*kvm
)
1402 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1404 kvm_iommu_unmap_guest(kvm
);
1405 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1406 kvm_free_all_assigned_devices(kvm
);
1408 kfree(kvm
->arch
.vioapic
);
1409 kvm_release_vm_pages(kvm
);
1410 kvm_free_physmem(kvm
);
1414 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1418 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1420 if (cpu
!= vcpu
->cpu
) {
1422 if (vcpu
->arch
.ht_active
)
1423 kvm_migrate_hlt_timer(vcpu
);
1427 #define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1429 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1431 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1436 for (i
= 0; i
< 16; i
++) {
1437 regs
->vpd
.vgr
[i
] = vpd
->vgr
[i
];
1438 regs
->vpd
.vbgr
[i
] = vpd
->vbgr
[i
];
1440 for (i
= 0; i
< 128; i
++)
1441 regs
->vpd
.vcr
[i
] = vpd
->vcr
[i
];
1442 regs
->vpd
.vhpi
= vpd
->vhpi
;
1443 regs
->vpd
.vnat
= vpd
->vnat
;
1444 regs
->vpd
.vbnat
= vpd
->vbnat
;
1445 regs
->vpd
.vpsr
= vpd
->vpsr
;
1446 regs
->vpd
.vpr
= vpd
->vpr
;
1448 memcpy(®s
->saved_guest
, &vcpu
->arch
.guest
, sizeof(union context
));
1450 SAVE_REGS(mp_state
);
1452 memcpy(regs
->itrs
, vcpu
->arch
.itrs
, sizeof(struct thash_data
) * NITRS
);
1453 memcpy(regs
->dtrs
, vcpu
->arch
.dtrs
, sizeof(struct thash_data
) * NDTRS
);
1454 SAVE_REGS(itr_regions
);
1455 SAVE_REGS(dtr_regions
);
1456 SAVE_REGS(tc_regions
);
1457 SAVE_REGS(irq_check
);
1458 SAVE_REGS(itc_check
);
1459 SAVE_REGS(timer_check
);
1460 SAVE_REGS(timer_pending
);
1461 SAVE_REGS(last_itc
);
1462 for (i
= 0; i
< 8; i
++) {
1463 regs
->vrr
[i
] = vcpu
->arch
.vrr
[i
];
1464 regs
->ibr
[i
] = vcpu
->arch
.ibr
[i
];
1465 regs
->dbr
[i
] = vcpu
->arch
.dbr
[i
];
1467 for (i
= 0; i
< 4; i
++)
1468 regs
->insvc
[i
] = vcpu
->arch
.insvc
[i
];
1469 regs
->saved_itc
= vcpu
->arch
.itc_offset
+ kvm_get_itc(vcpu
);
1471 SAVE_REGS(metaphysical_rr0
);
1472 SAVE_REGS(metaphysical_rr4
);
1473 SAVE_REGS(metaphysical_saved_rr0
);
1474 SAVE_REGS(metaphysical_saved_rr4
);
1476 SAVE_REGS(saved_gp
);
1482 int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu
*vcpu
,
1483 struct kvm_ia64_vcpu_stack
*stack
)
1485 memcpy(stack
, vcpu
, sizeof(struct kvm_ia64_vcpu_stack
));
1489 int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu
*vcpu
,
1490 struct kvm_ia64_vcpu_stack
*stack
)
1492 memcpy(vcpu
+ 1, &stack
->stack
[0] + sizeof(struct kvm_vcpu
),
1493 sizeof(struct kvm_ia64_vcpu_stack
) - sizeof(struct kvm_vcpu
));
1495 vcpu
->arch
.exit_data
= ((struct kvm_vcpu
*)stack
)->arch
.exit_data
;
1499 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
1502 hrtimer_cancel(&vcpu
->arch
.hlt_timer
);
1503 kfree(vcpu
->arch
.apic
);
1507 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1508 unsigned int ioctl
, unsigned long arg
)
1510 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1511 void __user
*argp
= (void __user
*)arg
;
1512 struct kvm_ia64_vcpu_stack
*stack
= NULL
;
1516 case KVM_IA64_VCPU_GET_STACK
: {
1517 struct kvm_ia64_vcpu_stack __user
*user_stack
;
1518 void __user
*first_p
= argp
;
1521 if (copy_from_user(&user_stack
, first_p
, sizeof(void *)))
1524 if (!access_ok(VERIFY_WRITE
, user_stack
,
1525 sizeof(struct kvm_ia64_vcpu_stack
))) {
1526 printk(KERN_INFO
"KVM_IA64_VCPU_GET_STACK: "
1527 "Illegal user destination address for stack\n");
1530 stack
= kzalloc(sizeof(struct kvm_ia64_vcpu_stack
), GFP_KERNEL
);
1536 r
= kvm_arch_vcpu_ioctl_get_stack(vcpu
, stack
);
1540 if (copy_to_user(user_stack
, stack
,
1541 sizeof(struct kvm_ia64_vcpu_stack
)))
1546 case KVM_IA64_VCPU_SET_STACK
: {
1547 struct kvm_ia64_vcpu_stack __user
*user_stack
;
1548 void __user
*first_p
= argp
;
1551 if (copy_from_user(&user_stack
, first_p
, sizeof(void *)))
1554 if (!access_ok(VERIFY_READ
, user_stack
,
1555 sizeof(struct kvm_ia64_vcpu_stack
))) {
1556 printk(KERN_INFO
"KVM_IA64_VCPU_SET_STACK: "
1557 "Illegal user address for stack\n");
1560 stack
= kmalloc(sizeof(struct kvm_ia64_vcpu_stack
), GFP_KERNEL
);
1565 if (copy_from_user(stack
, user_stack
,
1566 sizeof(struct kvm_ia64_vcpu_stack
)))
1569 r
= kvm_arch_vcpu_ioctl_set_stack(vcpu
, stack
);
1582 int kvm_arch_set_memory_region(struct kvm
*kvm
,
1583 struct kvm_userspace_memory_region
*mem
,
1584 struct kvm_memory_slot old
,
1589 int npages
= mem
->memory_size
>> PAGE_SHIFT
;
1590 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[mem
->slot
];
1591 unsigned long base_gfn
= memslot
->base_gfn
;
1593 if (base_gfn
+ npages
> (KVM_MAX_MEM_SIZE
>> PAGE_SHIFT
))
1596 for (i
= 0; i
< npages
; i
++) {
1597 pfn
= gfn_to_pfn(kvm
, base_gfn
+ i
);
1598 if (!kvm_is_mmio_pfn(pfn
)) {
1599 kvm_set_pmt_entry(kvm
, base_gfn
+ i
,
1601 _PAGE_AR_RWX
| _PAGE_MA_WB
);
1602 memslot
->rmap
[i
] = (unsigned long)pfn_to_page(pfn
);
1604 kvm_set_pmt_entry(kvm
, base_gfn
+ i
,
1605 GPFN_PHYS_MMIO
| (pfn
<< PAGE_SHIFT
),
1607 memslot
->rmap
[i
] = 0;
1614 void kvm_arch_flush_shadow(struct kvm
*kvm
)
1616 kvm_flush_remote_tlbs(kvm
);
1619 long kvm_arch_dev_ioctl(struct file
*filp
,
1620 unsigned int ioctl
, unsigned long arg
)
1625 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1627 kvm_vcpu_uninit(vcpu
);
1630 static int vti_cpu_has_kvm_support(void)
1632 long avail
= 1, status
= 1, control
= 1;
1635 ret
= ia64_pal_proc_get_features(&avail
, &status
, &control
, 0);
1639 if (!(avail
& PAL_PROC_VM_BIT
))
1642 printk(KERN_DEBUG
"kvm: Hardware Supports VT\n");
1644 ret
= ia64_pal_vp_env_info(&kvm_vm_buffer_size
, &vp_env_info
);
1647 printk(KERN_DEBUG
"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size
);
1649 if (!(vp_env_info
& VP_OPCODE
)) {
1650 printk(KERN_WARNING
"kvm: No opcode ability on hardware, "
1651 "vm_env_info:0x%lx\n", vp_env_info
);
1661 * On SN2, the ITC isn't stable, so copy in fast path code to use the
1662 * SN2 RTC, replacing the ITC based default verion.
1664 static void kvm_patch_vmm(struct kvm_vmm_info
*vmm_info
,
1665 struct module
*module
)
1667 unsigned long new_ar
, new_ar_sn2
;
1668 unsigned long module_base
;
1670 if (!ia64_platform_is("sn2"))
1673 module_base
= (unsigned long)module
->module_core
;
1675 new_ar
= kvm_vmm_base
+ vmm_info
->patch_mov_ar
- module_base
;
1676 new_ar_sn2
= kvm_vmm_base
+ vmm_info
->patch_mov_ar_sn2
- module_base
;
1678 printk(KERN_INFO
"kvm: Patching ITC emulation to use SGI SN2 RTC "
1682 * Copy the SN2 version of mov_ar into place. They are both
1683 * the same size, so 6 bundles is sufficient (6 * 0x10).
1685 memcpy((void *)new_ar
, (void *)new_ar_sn2
, 0x60);
1688 static int kvm_relocate_vmm(struct kvm_vmm_info
*vmm_info
,
1689 struct module
*module
)
1691 unsigned long module_base
;
1692 unsigned long vmm_size
;
1694 unsigned long vmm_offset
, func_offset
, fdesc_offset
;
1695 struct fdesc
*p_fdesc
;
1699 if (!kvm_vmm_base
) {
1700 printk("kvm: kvm area hasn't been initilized yet!!\n");
1704 /*Calculate new position of relocated vmm module.*/
1705 module_base
= (unsigned long)module
->module_core
;
1706 vmm_size
= module
->core_size
;
1707 if (unlikely(vmm_size
> KVM_VMM_SIZE
))
1710 memcpy((void *)kvm_vmm_base
, (void *)module_base
, vmm_size
);
1711 kvm_patch_vmm(vmm_info
, module
);
1712 kvm_flush_icache(kvm_vmm_base
, vmm_size
);
1714 /*Recalculate kvm_vmm_info based on new VMM*/
1715 vmm_offset
= vmm_info
->vmm_ivt
- module_base
;
1716 kvm_vmm_info
->vmm_ivt
= KVM_VMM_BASE
+ vmm_offset
;
1717 printk(KERN_DEBUG
"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1718 kvm_vmm_info
->vmm_ivt
);
1720 fdesc_offset
= (unsigned long)vmm_info
->vmm_entry
- module_base
;
1721 kvm_vmm_info
->vmm_entry
= (kvm_vmm_entry
*)(KVM_VMM_BASE
+
1723 func_offset
= *(unsigned long *)vmm_info
->vmm_entry
- module_base
;
1724 p_fdesc
= (struct fdesc
*)(kvm_vmm_base
+ fdesc_offset
);
1725 p_fdesc
->ip
= KVM_VMM_BASE
+ func_offset
;
1726 p_fdesc
->gp
= KVM_VMM_BASE
+(p_fdesc
->gp
- module_base
);
1728 printk(KERN_DEBUG
"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1729 KVM_VMM_BASE
+func_offset
);
1731 fdesc_offset
= (unsigned long)vmm_info
->tramp_entry
- module_base
;
1732 kvm_vmm_info
->tramp_entry
= (kvm_tramp_entry
*)(KVM_VMM_BASE
+
1734 func_offset
= *(unsigned long *)vmm_info
->tramp_entry
- module_base
;
1735 p_fdesc
= (struct fdesc
*)(kvm_vmm_base
+ fdesc_offset
);
1736 p_fdesc
->ip
= KVM_VMM_BASE
+ func_offset
;
1737 p_fdesc
->gp
= KVM_VMM_BASE
+ (p_fdesc
->gp
- module_base
);
1739 kvm_vmm_gp
= p_fdesc
->gp
;
1741 printk(KERN_DEBUG
"kvm: Relocated VMM's Entry IP:%p\n",
1742 kvm_vmm_info
->vmm_entry
);
1743 printk(KERN_DEBUG
"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1744 KVM_VMM_BASE
+ func_offset
);
1749 int kvm_arch_init(void *opaque
)
1752 struct kvm_vmm_info
*vmm_info
= (struct kvm_vmm_info
*)opaque
;
1754 if (!vti_cpu_has_kvm_support()) {
1755 printk(KERN_ERR
"kvm: No Hardware Virtualization Support!\n");
1761 printk(KERN_ERR
"kvm: Already loaded VMM module!\n");
1767 kvm_vmm_info
= kzalloc(sizeof(struct kvm_vmm_info
), GFP_KERNEL
);
1771 if (kvm_alloc_vmm_area())
1774 r
= kvm_relocate_vmm(vmm_info
, vmm_info
->module
);
1781 kvm_free_vmm_area();
1783 kfree(kvm_vmm_info
);
1788 void kvm_arch_exit(void)
1790 kvm_free_vmm_area();
1791 kfree(kvm_vmm_info
);
1792 kvm_vmm_info
= NULL
;
1795 static int kvm_ia64_sync_dirty_log(struct kvm
*kvm
,
1796 struct kvm_dirty_log
*log
)
1798 struct kvm_memory_slot
*memslot
;
1801 unsigned long *dirty_bitmap
= (unsigned long *)(kvm
->arch
.vm_base
+
1802 offsetof(struct kvm_vm_data
, kvm_mem_dirty_log
));
1805 if (log
->slot
>= KVM_MEMORY_SLOTS
)
1808 memslot
= &kvm
->memslots
[log
->slot
];
1810 if (!memslot
->dirty_bitmap
)
1813 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
1814 base
= memslot
->base_gfn
/ BITS_PER_LONG
;
1816 for (i
= 0; i
< n
/sizeof(long); ++i
) {
1817 memslot
->dirty_bitmap
[i
] = dirty_bitmap
[base
+ i
];
1818 dirty_bitmap
[base
+ i
] = 0;
1825 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
1826 struct kvm_dirty_log
*log
)
1830 struct kvm_memory_slot
*memslot
;
1833 spin_lock(&kvm
->arch
.dirty_log_lock
);
1835 r
= kvm_ia64_sync_dirty_log(kvm
, log
);
1839 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
1843 /* If nothing is dirty, don't bother messing with page tables. */
1845 kvm_flush_remote_tlbs(kvm
);
1846 memslot
= &kvm
->memslots
[log
->slot
];
1847 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
1848 memset(memslot
->dirty_bitmap
, 0, n
);
1852 spin_unlock(&kvm
->arch
.dirty_log_lock
);
1856 int kvm_arch_hardware_setup(void)
1861 void kvm_arch_hardware_unsetup(void)
1865 void kvm_vcpu_kick(struct kvm_vcpu
*vcpu
)
1868 int cpu
= vcpu
->cpu
;
1870 if (waitqueue_active(&vcpu
->wq
))
1871 wake_up_interruptible(&vcpu
->wq
);
1874 if (cpu
!= me
&& (unsigned) cpu
< nr_cpu_ids
&& cpu_online(cpu
))
1875 if (!test_and_set_bit(KVM_REQ_KICK
, &vcpu
->requests
))
1876 smp_send_reschedule(cpu
);
1880 int kvm_apic_set_irq(struct kvm_vcpu
*vcpu
, struct kvm_lapic_irq
*irq
)
1882 return __apic_accept_irq(vcpu
, irq
->vector
);
1885 int kvm_apic_match_physical_addr(struct kvm_lapic
*apic
, u16 dest
)
1887 return apic
->vcpu
->vcpu_id
== dest
;
1890 int kvm_apic_match_logical_addr(struct kvm_lapic
*apic
, u8 mda
)
1895 int kvm_apic_compare_prio(struct kvm_vcpu
*vcpu1
, struct kvm_vcpu
*vcpu2
)
1897 return vcpu1
->arch
.xtp
- vcpu2
->arch
.xtp
;
1900 int kvm_apic_match_dest(struct kvm_vcpu
*vcpu
, struct kvm_lapic
*source
,
1901 int short_hand
, int dest
, int dest_mode
)
1903 struct kvm_lapic
*target
= vcpu
->arch
.apic
;
1904 return (dest_mode
== 0) ?
1905 kvm_apic_match_physical_addr(target
, dest
) :
1906 kvm_apic_match_logical_addr(target
, dest
);
1909 static int find_highest_bits(int *dat
)
1914 /* loop for all 256 bits */
1915 for (i
= 7; i
>= 0 ; i
--) {
1919 return i
* 32 + bitnum
- 1;
1926 int kvm_highest_pending_irq(struct kvm_vcpu
*vcpu
)
1928 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1930 if (vpd
->irr
[0] & (1UL << NMI_VECTOR
))
1932 if (vpd
->irr
[0] & (1UL << ExtINT_VECTOR
))
1933 return ExtINT_VECTOR
;
1935 return find_highest_bits((int *)&vpd
->irr
[0]);
1938 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1940 return vcpu
->arch
.timer_fired
;
1943 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
1948 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1950 return (vcpu
->arch
.mp_state
== KVM_MP_STATE_RUNNABLE
) ||
1951 (kvm_highest_pending_irq(vcpu
) != -1);
1954 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1955 struct kvm_mp_state
*mp_state
)
1958 mp_state
->mp_state
= vcpu
->arch
.mp_state
;
1963 static int vcpu_reset(struct kvm_vcpu
*vcpu
)
1967 local_irq_save(psr
);
1968 r
= kvm_insert_vmm_mapping(vcpu
);
1969 local_irq_restore(psr
);
1973 vcpu
->arch
.launched
= 0;
1974 kvm_arch_vcpu_uninit(vcpu
);
1975 r
= kvm_arch_vcpu_init(vcpu
);
1979 kvm_purge_vmm_mapping(vcpu
);
1985 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1986 struct kvm_mp_state
*mp_state
)
1991 vcpu
->arch
.mp_state
= mp_state
->mp_state
;
1992 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_UNINITIALIZED
)
1993 r
= vcpu_reset(vcpu
);