2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <asm/cputable.h>
31 #include <asm/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cputhreads.h>
35 #include <asm/irqflags.h>
38 #include "../mm/mmu_decl.h"
40 #define CREATE_TRACE_POINTS
43 struct kvmppc_ops
*kvmppc_hv_ops
;
44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops
);
45 struct kvmppc_ops
*kvmppc_pr_ops
;
46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops
);
49 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
51 return !!(v
->arch
.pending_exceptions
) ||
55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
61 * Common checks before entering the guest world. Call with interrupts
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
69 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
73 WARN_ON(irqs_disabled());
84 if (signal_pending(current
)) {
85 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
86 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
91 vcpu
->mode
= IN_GUEST_MODE
;
94 * Reading vcpu->requests must happen after setting vcpu->mode,
95 * so we don't miss a request because the requester sees
96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97 * before next entering the guest (and thus doesn't IPI).
101 if (vcpu
->requests
) {
102 /* Make sure we process requests preemptable */
104 trace_kvm_check_requests(vcpu
);
105 r
= kvmppc_core_check_requests(vcpu
);
112 if (kvmppc_core_prepare_to_enter(vcpu
)) {
113 /* interrupts got enabled in between, so we
114 are back at square 1 */
126 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter
);
128 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
129 static void kvmppc_swab_shared(struct kvm_vcpu
*vcpu
)
131 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
134 shared
->sprg0
= swab64(shared
->sprg0
);
135 shared
->sprg1
= swab64(shared
->sprg1
);
136 shared
->sprg2
= swab64(shared
->sprg2
);
137 shared
->sprg3
= swab64(shared
->sprg3
);
138 shared
->srr0
= swab64(shared
->srr0
);
139 shared
->srr1
= swab64(shared
->srr1
);
140 shared
->dar
= swab64(shared
->dar
);
141 shared
->msr
= swab64(shared
->msr
);
142 shared
->dsisr
= swab32(shared
->dsisr
);
143 shared
->int_pending
= swab32(shared
->int_pending
);
144 for (i
= 0; i
< ARRAY_SIZE(shared
->sr
); i
++)
145 shared
->sr
[i
] = swab32(shared
->sr
[i
]);
149 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
151 int nr
= kvmppc_get_gpr(vcpu
, 11);
153 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
154 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
155 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
156 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
157 unsigned long r2
= 0;
159 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
161 param1
&= 0xffffffff;
162 param2
&= 0xffffffff;
163 param3
&= 0xffffffff;
164 param4
&= 0xffffffff;
168 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
170 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171 /* Book3S can be little endian, find it out here */
172 int shared_big_endian
= true;
173 if (vcpu
->arch
.intr_msr
& MSR_LE
)
174 shared_big_endian
= false;
175 if (shared_big_endian
!= vcpu
->arch
.shared_big_endian
)
176 kvmppc_swab_shared(vcpu
);
177 vcpu
->arch
.shared_big_endian
= shared_big_endian
;
180 if (!(param2
& MAGIC_PAGE_FLAG_NOT_MAPPED_NX
)) {
182 * Older versions of the Linux magic page code had
183 * a bug where they would map their trampoline code
184 * NX. If that's the case, remove !PR NX capability.
186 vcpu
->arch
.disable_kernel_nx
= true;
187 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
190 vcpu
->arch
.magic_page_pa
= param1
& ~0xfffULL
;
191 vcpu
->arch
.magic_page_ea
= param2
& ~0xfffULL
;
193 #ifdef CONFIG_PPC_64K_PAGES
195 * Make sure our 4k magic page is in the same window of a 64k
196 * page within the guest and within the host's page.
198 if ((vcpu
->arch
.magic_page_pa
& 0xf000) !=
199 ((ulong
)vcpu
->arch
.shared
& 0xf000)) {
200 void *old_shared
= vcpu
->arch
.shared
;
201 ulong shared
= (ulong
)vcpu
->arch
.shared
;
205 shared
|= vcpu
->arch
.magic_page_pa
& 0xf000;
206 new_shared
= (void*)shared
;
207 memcpy(new_shared
, old_shared
, 0x1000);
208 vcpu
->arch
.shared
= new_shared
;
212 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
219 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
220 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
223 /* Second return value is in r4 */
225 case EV_HCALL_TOKEN(EV_IDLE
):
227 kvm_vcpu_block(vcpu
);
228 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
231 r
= EV_UNIMPLEMENTED
;
235 kvmppc_set_gpr(vcpu
, 4, r2
);
239 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv
);
241 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
245 /* We have to know what CPU to virtualize */
249 /* PAPR only works with book3s_64 */
250 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
253 /* HV KVM can only do PAPR mode for now */
254 if (!vcpu
->arch
.papr_enabled
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
257 #ifdef CONFIG_KVM_BOOKE_HV
258 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
266 return r
? 0 : -EINVAL
;
268 EXPORT_SYMBOL_GPL(kvmppc_sanity_check
);
270 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
272 enum emulation_result er
;
275 er
= kvmppc_emulate_loadstore(vcpu
);
278 /* Future optimization: only reload non-volatiles if they were
279 * actually modified. */
285 case EMULATE_DO_MMIO
:
286 run
->exit_reason
= KVM_EXIT_MMIO
;
287 /* We must reload nonvolatiles because "update" load/store
288 * instructions modify register state. */
289 /* Future optimization: only reload non-volatiles if they were
290 * actually modified. */
297 kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
298 /* XXX Deliver Program interrupt to guest. */
299 pr_emerg("%s: emulation failed (%08x)\n", __func__
, last_inst
);
310 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio
);
312 int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
315 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
316 struct kvmppc_pte pte
;
321 r
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
331 /* Magic page override */
332 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
333 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
334 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
335 void *magic
= vcpu
->arch
.shared
;
336 magic
+= pte
.eaddr
& 0xfff;
337 memcpy(magic
, ptr
, size
);
341 if (kvm_write_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
342 return EMULATE_DO_MMIO
;
346 EXPORT_SYMBOL_GPL(kvmppc_st
);
348 int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
351 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
352 struct kvmppc_pte pte
;
357 rc
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
367 if (!data
&& !pte
.may_execute
)
370 /* Magic page override */
371 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
372 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
373 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
374 void *magic
= vcpu
->arch
.shared
;
375 magic
+= pte
.eaddr
& 0xfff;
376 memcpy(ptr
, magic
, size
);
380 if (kvm_read_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
381 return EMULATE_DO_MMIO
;
385 EXPORT_SYMBOL_GPL(kvmppc_ld
);
387 int kvm_arch_hardware_enable(void)
392 int kvm_arch_hardware_setup(void)
397 void kvm_arch_check_processor_compat(void *rtn
)
399 *(int *)rtn
= kvmppc_core_check_processor_compat();
402 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
404 struct kvmppc_ops
*kvm_ops
= NULL
;
406 * if we have both HV and PR enabled, default is HV
410 kvm_ops
= kvmppc_hv_ops
;
412 kvm_ops
= kvmppc_pr_ops
;
415 } else if (type
== KVM_VM_PPC_HV
) {
418 kvm_ops
= kvmppc_hv_ops
;
419 } else if (type
== KVM_VM_PPC_PR
) {
422 kvm_ops
= kvmppc_pr_ops
;
426 if (kvm_ops
->owner
&& !try_module_get(kvm_ops
->owner
))
429 kvm
->arch
.kvm_ops
= kvm_ops
;
430 return kvmppc_core_init_vm(kvm
);
435 void kvm_arch_destroy_vm(struct kvm
*kvm
)
438 struct kvm_vcpu
*vcpu
;
440 kvm_for_each_vcpu(i
, vcpu
, kvm
)
441 kvm_arch_vcpu_free(vcpu
);
443 mutex_lock(&kvm
->lock
);
444 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
445 kvm
->vcpus
[i
] = NULL
;
447 atomic_set(&kvm
->online_vcpus
, 0);
449 kvmppc_core_destroy_vm(kvm
);
451 mutex_unlock(&kvm
->lock
);
453 /* drop the module reference */
454 module_put(kvm
->arch
.kvm_ops
->owner
);
457 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
460 /* Assume we're using HV mode when the HV module is loaded */
461 int hv_enabled
= kvmppc_hv_ops
? 1 : 0;
465 * Hooray - we know which VM type we're running on. Depend on
466 * that rather than the guess above.
468 hv_enabled
= is_kvmppc_hv_enabled(kvm
);
473 case KVM_CAP_PPC_BOOKE_SREGS
:
474 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
475 case KVM_CAP_PPC_EPR
:
477 case KVM_CAP_PPC_SEGSTATE
:
478 case KVM_CAP_PPC_HIOR
:
479 case KVM_CAP_PPC_PAPR
:
481 case KVM_CAP_PPC_UNSET_IRQ
:
482 case KVM_CAP_PPC_IRQ_LEVEL
:
483 case KVM_CAP_ENABLE_CAP
:
484 case KVM_CAP_ENABLE_CAP_VM
:
485 case KVM_CAP_ONE_REG
:
486 case KVM_CAP_IOEVENTFD
:
487 case KVM_CAP_DEVICE_CTRL
:
490 case KVM_CAP_PPC_PAIRED_SINGLES
:
491 case KVM_CAP_PPC_OSI
:
492 case KVM_CAP_PPC_GET_PVINFO
:
493 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
496 /* We support this only for PR */
499 #ifdef CONFIG_KVM_MMIO
500 case KVM_CAP_COALESCED_MMIO
:
501 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
504 #ifdef CONFIG_KVM_MPIC
505 case KVM_CAP_IRQ_MPIC
:
510 #ifdef CONFIG_PPC_BOOK3S_64
511 case KVM_CAP_SPAPR_TCE
:
512 case KVM_CAP_PPC_ALLOC_HTAB
:
513 case KVM_CAP_PPC_RTAS
:
514 case KVM_CAP_PPC_FIXUP_HCALL
:
515 case KVM_CAP_PPC_ENABLE_HCALL
:
516 #ifdef CONFIG_KVM_XICS
517 case KVM_CAP_IRQ_XICS
:
521 #endif /* CONFIG_PPC_BOOK3S_64 */
522 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
523 case KVM_CAP_PPC_SMT
:
525 r
= threads_per_subcore
;
529 case KVM_CAP_PPC_RMA
:
532 case KVM_CAP_PPC_HWRNG
:
533 r
= kvmppc_hwrng_present();
536 case KVM_CAP_SYNC_MMU
:
537 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
539 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
545 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
546 case KVM_CAP_PPC_HTAB_FD
:
550 case KVM_CAP_NR_VCPUS
:
552 * Recommending a number of CPUs is somewhat arbitrary; we
553 * return the number of present CPUs for -HV (since a host
554 * will have secondary threads "offline"), and for other KVM
555 * implementations just count online CPUs.
558 r
= num_present_cpus();
560 r
= num_online_cpus();
562 case KVM_CAP_MAX_VCPUS
:
565 #ifdef CONFIG_PPC_BOOK3S_64
566 case KVM_CAP_PPC_GET_SMMU_INFO
:
578 long kvm_arch_dev_ioctl(struct file
*filp
,
579 unsigned int ioctl
, unsigned long arg
)
584 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
585 struct kvm_memory_slot
*dont
)
587 kvmppc_core_free_memslot(kvm
, free
, dont
);
590 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
591 unsigned long npages
)
593 return kvmppc_core_create_memslot(kvm
, slot
, npages
);
596 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
597 struct kvm_memory_slot
*memslot
,
598 struct kvm_userspace_memory_region
*mem
,
599 enum kvm_mr_change change
)
601 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
);
604 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
605 struct kvm_userspace_memory_region
*mem
,
606 const struct kvm_memory_slot
*old
,
607 enum kvm_mr_change change
)
609 kvmppc_core_commit_memory_region(kvm
, mem
, old
);
612 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
613 struct kvm_memory_slot
*slot
)
615 kvmppc_core_flush_memslot(kvm
, slot
);
618 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
620 struct kvm_vcpu
*vcpu
;
621 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
623 vcpu
->arch
.wqp
= &vcpu
->wq
;
624 kvmppc_create_vcpu_debugfs(vcpu
, id
);
629 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
633 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
635 /* Make sure we're not using the vcpu anymore */
636 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
638 kvmppc_remove_vcpu_debugfs(vcpu
);
640 switch (vcpu
->arch
.irq_type
) {
641 case KVMPPC_IRQ_MPIC
:
642 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
644 case KVMPPC_IRQ_XICS
:
645 kvmppc_xics_free_icp(vcpu
);
649 kvmppc_core_vcpu_free(vcpu
);
652 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
654 kvm_arch_vcpu_free(vcpu
);
657 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
659 return kvmppc_core_pending_dec(vcpu
);
662 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
664 struct kvm_vcpu
*vcpu
;
666 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
667 kvmppc_decrementer_func(vcpu
);
669 return HRTIMER_NORESTART
;
672 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
676 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
677 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
678 vcpu
->arch
.dec_expires
= ~(u64
)0;
680 #ifdef CONFIG_KVM_EXIT_TIMING
681 mutex_init(&vcpu
->arch
.exit_timing_lock
);
683 ret
= kvmppc_subarch_vcpu_init(vcpu
);
687 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
689 kvmppc_mmu_destroy(vcpu
);
690 kvmppc_subarch_vcpu_uninit(vcpu
);
693 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
697 * vrsave (formerly usprg0) isn't used by Linux, but may
698 * be used by the guest.
700 * On non-booke this is associated with Altivec and
701 * is handled by code in book3s.c.
703 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
705 kvmppc_core_vcpu_load(vcpu
, cpu
);
708 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
710 kvmppc_core_vcpu_put(vcpu
);
712 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
716 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
719 u64
uninitialized_var(gpr
);
721 if (run
->mmio
.len
> sizeof(gpr
)) {
722 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
726 if (!vcpu
->arch
.mmio_host_swabbed
) {
727 switch (run
->mmio
.len
) {
728 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
729 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
730 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
731 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
734 switch (run
->mmio
.len
) {
735 case 8: gpr
= swab64(*(u64
*)run
->mmio
.data
); break;
736 case 4: gpr
= swab32(*(u32
*)run
->mmio
.data
); break;
737 case 2: gpr
= swab16(*(u16
*)run
->mmio
.data
); break;
738 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
742 if (vcpu
->arch
.mmio_sign_extend
) {
743 switch (run
->mmio
.len
) {
758 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
760 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
761 case KVM_MMIO_REG_GPR
:
762 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
764 case KVM_MMIO_REG_FPR
:
765 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
767 #ifdef CONFIG_PPC_BOOK3S
768 case KVM_MMIO_REG_QPR
:
769 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
771 case KVM_MMIO_REG_FQPR
:
772 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
773 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
781 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
782 unsigned int rt
, unsigned int bytes
,
783 int is_default_endian
)
788 /* Pity C doesn't have a logical XOR operator */
789 if (kvmppc_need_byteswap(vcpu
)) {
790 host_swabbed
= is_default_endian
;
792 host_swabbed
= !is_default_endian
;
795 if (bytes
> sizeof(run
->mmio
.data
)) {
796 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
800 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
801 run
->mmio
.len
= bytes
;
802 run
->mmio
.is_write
= 0;
804 vcpu
->arch
.io_gpr
= rt
;
805 vcpu
->arch
.mmio_host_swabbed
= host_swabbed
;
806 vcpu
->mmio_needed
= 1;
807 vcpu
->mmio_is_write
= 0;
808 vcpu
->arch
.mmio_sign_extend
= 0;
810 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
812 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
813 bytes
, &run
->mmio
.data
);
815 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
818 kvmppc_complete_mmio_load(vcpu
, run
);
819 vcpu
->mmio_needed
= 0;
823 return EMULATE_DO_MMIO
;
825 EXPORT_SYMBOL_GPL(kvmppc_handle_load
);
827 /* Same as above, but sign extends */
828 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
829 unsigned int rt
, unsigned int bytes
,
830 int is_default_endian
)
834 vcpu
->arch
.mmio_sign_extend
= 1;
835 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
);
840 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
841 u64 val
, unsigned int bytes
, int is_default_endian
)
843 void *data
= run
->mmio
.data
;
847 /* Pity C doesn't have a logical XOR operator */
848 if (kvmppc_need_byteswap(vcpu
)) {
849 host_swabbed
= is_default_endian
;
851 host_swabbed
= !is_default_endian
;
854 if (bytes
> sizeof(run
->mmio
.data
)) {
855 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
859 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
860 run
->mmio
.len
= bytes
;
861 run
->mmio
.is_write
= 1;
862 vcpu
->mmio_needed
= 1;
863 vcpu
->mmio_is_write
= 1;
865 /* Store the value at the lowest bytes in 'data'. */
868 case 8: *(u64
*)data
= val
; break;
869 case 4: *(u32
*)data
= val
; break;
870 case 2: *(u16
*)data
= val
; break;
871 case 1: *(u8
*)data
= val
; break;
875 case 8: *(u64
*)data
= swab64(val
); break;
876 case 4: *(u32
*)data
= swab32(val
); break;
877 case 2: *(u16
*)data
= swab16(val
); break;
878 case 1: *(u8
*)data
= val
; break;
882 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
884 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
885 bytes
, &run
->mmio
.data
);
887 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
890 vcpu
->mmio_needed
= 0;
894 return EMULATE_DO_MMIO
;
896 EXPORT_SYMBOL_GPL(kvmppc_handle_store
);
898 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
901 union kvmppc_one_reg val
;
904 size
= one_reg_size(reg
->id
);
905 if (size
> sizeof(val
))
908 r
= kvmppc_get_one_reg(vcpu
, reg
->id
, &val
);
912 #ifdef CONFIG_ALTIVEC
913 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
914 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
918 val
.vval
= vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
];
920 case KVM_REG_PPC_VSCR
:
921 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
925 val
= get_reg_val(reg
->id
, vcpu
->arch
.vr
.vscr
.u
[3]);
927 case KVM_REG_PPC_VRSAVE
:
928 val
= get_reg_val(reg
->id
, vcpu
->arch
.vrsave
);
930 #endif /* CONFIG_ALTIVEC */
940 if (copy_to_user((char __user
*)(unsigned long)reg
->addr
, &val
, size
))
946 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
949 union kvmppc_one_reg val
;
952 size
= one_reg_size(reg
->id
);
953 if (size
> sizeof(val
))
956 if (copy_from_user(&val
, (char __user
*)(unsigned long)reg
->addr
, size
))
959 r
= kvmppc_set_one_reg(vcpu
, reg
->id
, &val
);
963 #ifdef CONFIG_ALTIVEC
964 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
965 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
969 vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
] = val
.vval
;
971 case KVM_REG_PPC_VSCR
:
972 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
976 vcpu
->arch
.vr
.vscr
.u
[3] = set_reg_val(reg
->id
, val
);
978 case KVM_REG_PPC_VRSAVE
:
979 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
983 vcpu
->arch
.vrsave
= set_reg_val(reg
->id
, val
);
985 #endif /* CONFIG_ALTIVEC */
995 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1000 if (vcpu
->sigset_active
)
1001 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
1003 if (vcpu
->mmio_needed
) {
1004 if (!vcpu
->mmio_is_write
)
1005 kvmppc_complete_mmio_load(vcpu
, run
);
1006 vcpu
->mmio_needed
= 0;
1007 } else if (vcpu
->arch
.osi_needed
) {
1008 u64
*gprs
= run
->osi
.gprs
;
1011 for (i
= 0; i
< 32; i
++)
1012 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
1013 vcpu
->arch
.osi_needed
= 0;
1014 } else if (vcpu
->arch
.hcall_needed
) {
1017 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
1018 for (i
= 0; i
< 9; ++i
)
1019 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
1020 vcpu
->arch
.hcall_needed
= 0;
1022 } else if (vcpu
->arch
.epr_needed
) {
1023 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
1024 vcpu
->arch
.epr_needed
= 0;
1028 r
= kvmppc_vcpu_run(run
, vcpu
);
1030 if (vcpu
->sigset_active
)
1031 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
1036 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
1038 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
1039 kvmppc_core_dequeue_external(vcpu
);
1043 kvmppc_core_queue_external(vcpu
, irq
);
1045 kvm_vcpu_kick(vcpu
);
1050 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
1051 struct kvm_enable_cap
*cap
)
1059 case KVM_CAP_PPC_OSI
:
1061 vcpu
->arch
.osi_enabled
= true;
1063 case KVM_CAP_PPC_PAPR
:
1065 vcpu
->arch
.papr_enabled
= true;
1067 case KVM_CAP_PPC_EPR
:
1070 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
1072 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
1075 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
1077 vcpu
->arch
.watchdog_enabled
= true;
1080 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1081 case KVM_CAP_SW_TLB
: {
1082 struct kvm_config_tlb cfg
;
1083 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
1086 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
1089 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
1093 #ifdef CONFIG_KVM_MPIC
1094 case KVM_CAP_IRQ_MPIC
: {
1096 struct kvm_device
*dev
;
1099 f
= fdget(cap
->args
[0]);
1104 dev
= kvm_device_from_filp(f
.file
);
1106 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1112 #ifdef CONFIG_KVM_XICS
1113 case KVM_CAP_IRQ_XICS
: {
1115 struct kvm_device
*dev
;
1118 f
= fdget(cap
->args
[0]);
1123 dev
= kvm_device_from_filp(f
.file
);
1125 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1130 #endif /* CONFIG_KVM_XICS */
1137 r
= kvmppc_sanity_check(vcpu
);
1142 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1143 struct kvm_mp_state
*mp_state
)
1148 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1149 struct kvm_mp_state
*mp_state
)
1154 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1155 unsigned int ioctl
, unsigned long arg
)
1157 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1158 void __user
*argp
= (void __user
*)arg
;
1162 case KVM_INTERRUPT
: {
1163 struct kvm_interrupt irq
;
1165 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
1167 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
1171 case KVM_ENABLE_CAP
:
1173 struct kvm_enable_cap cap
;
1175 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1177 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1181 case KVM_SET_ONE_REG
:
1182 case KVM_GET_ONE_REG
:
1184 struct kvm_one_reg reg
;
1186 if (copy_from_user(®
, argp
, sizeof(reg
)))
1188 if (ioctl
== KVM_SET_ONE_REG
)
1189 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
1191 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
1195 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1196 case KVM_DIRTY_TLB
: {
1197 struct kvm_dirty_tlb dirty
;
1199 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
1201 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
1213 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1215 return VM_FAULT_SIGBUS
;
1218 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
1220 u32 inst_nop
= 0x60000000;
1221 #ifdef CONFIG_KVM_BOOKE_HV
1222 u32 inst_sc1
= 0x44000022;
1223 pvinfo
->hcall
[0] = cpu_to_be32(inst_sc1
);
1224 pvinfo
->hcall
[1] = cpu_to_be32(inst_nop
);
1225 pvinfo
->hcall
[2] = cpu_to_be32(inst_nop
);
1226 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1228 u32 inst_lis
= 0x3c000000;
1229 u32 inst_ori
= 0x60000000;
1230 u32 inst_sc
= 0x44000002;
1231 u32 inst_imm_mask
= 0xffff;
1234 * The hypercall to get into KVM from within guest context is as
1237 * lis r0, r0, KVM_SC_MAGIC_R0@h
1238 * ori r0, KVM_SC_MAGIC_R0@l
1242 pvinfo
->hcall
[0] = cpu_to_be32(inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
));
1243 pvinfo
->hcall
[1] = cpu_to_be32(inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
));
1244 pvinfo
->hcall
[2] = cpu_to_be32(inst_sc
);
1245 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1248 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
1253 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
1256 if (!irqchip_in_kernel(kvm
))
1259 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
1260 irq_event
->irq
, irq_event
->level
,
1266 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
1267 struct kvm_enable_cap
*cap
)
1275 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1276 case KVM_CAP_PPC_ENABLE_HCALL
: {
1277 unsigned long hcall
= cap
->args
[0];
1280 if (hcall
> MAX_HCALL_OPCODE
|| (hcall
& 3) ||
1283 if (!kvmppc_book3s_hcall_implemented(kvm
, hcall
))
1286 set_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1288 clear_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1301 long kvm_arch_vm_ioctl(struct file
*filp
,
1302 unsigned int ioctl
, unsigned long arg
)
1304 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
1305 void __user
*argp
= (void __user
*)arg
;
1309 case KVM_PPC_GET_PVINFO
: {
1310 struct kvm_ppc_pvinfo pvinfo
;
1311 memset(&pvinfo
, 0, sizeof(pvinfo
));
1312 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
1313 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
1320 case KVM_ENABLE_CAP
:
1322 struct kvm_enable_cap cap
;
1324 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1326 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
1329 #ifdef CONFIG_PPC_BOOK3S_64
1330 case KVM_CREATE_SPAPR_TCE
: {
1331 struct kvm_create_spapr_tce create_tce
;
1334 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
1336 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce
);
1339 case KVM_PPC_GET_SMMU_INFO
: {
1340 struct kvm_ppc_smmu_info info
;
1341 struct kvm
*kvm
= filp
->private_data
;
1343 memset(&info
, 0, sizeof(info
));
1344 r
= kvm
->arch
.kvm_ops
->get_smmu_info(kvm
, &info
);
1345 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
1349 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
1350 struct kvm
*kvm
= filp
->private_data
;
1352 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
1356 struct kvm
*kvm
= filp
->private_data
;
1357 r
= kvm
->arch
.kvm_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
1359 #else /* CONFIG_PPC_BOOK3S_64 */
1368 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
1369 static unsigned long nr_lpids
;
1371 long kvmppc_alloc_lpid(void)
1376 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
1377 if (lpid
>= nr_lpids
) {
1378 pr_err("%s: No LPIDs free\n", __func__
);
1381 } while (test_and_set_bit(lpid
, lpid_inuse
));
1385 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid
);
1387 void kvmppc_claim_lpid(long lpid
)
1389 set_bit(lpid
, lpid_inuse
);
1391 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid
);
1393 void kvmppc_free_lpid(long lpid
)
1395 clear_bit(lpid
, lpid_inuse
);
1397 EXPORT_SYMBOL_GPL(kvmppc_free_lpid
);
1399 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
1401 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
1402 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
1404 EXPORT_SYMBOL_GPL(kvmppc_init_lpid
);
1406 int kvm_arch_init(void *opaque
)
1411 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr
);