treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / kvm / powerpc.c
blob1af96fb5dc6fbedb7e03aa0a97ff4b06159f262e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
4 * Copyright IBM Corp. 2007
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8 */
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <asm/cputable.h>
23 #include <linux/uaccess.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/cputhreads.h>
26 #include <asm/irqflags.h>
27 #include <asm/iommu.h>
28 #include <asm/switch_to.h>
29 #include <asm/xive.h>
30 #ifdef CONFIG_PPC_PSERIES
31 #include <asm/hvcall.h>
32 #include <asm/plpar_wrappers.h>
33 #endif
34 #include <asm/ultravisor.h>
35 #include <asm/kvm_host.h>
37 #include "timing.h"
38 #include "irq.h"
39 #include "../mm/mmu_decl.h"
41 #define CREATE_TRACE_POINTS
42 #include "trace.h"
44 struct kvmppc_ops *kvmppc_hv_ops;
45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46 struct kvmppc_ops *kvmppc_pr_ops;
47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
52 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
57 return kvm_arch_vcpu_runnable(vcpu);
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
62 return false;
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
67 return 1;
71 * Common checks before entering the guest world. Call with interrupts
72 * disabled.
74 * returns:
76 * == 1 if we're ready to go into guest state
77 * <= 0 if we need to go back to the host with return value
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
81 int r;
83 WARN_ON(irqs_disabled());
84 hard_irq_disable();
86 while (true) {
87 if (need_resched()) {
88 local_irq_enable();
89 cond_resched();
90 hard_irq_disable();
91 continue;
94 if (signal_pending(current)) {
95 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
96 vcpu->run->exit_reason = KVM_EXIT_INTR;
97 r = -EINTR;
98 break;
101 vcpu->mode = IN_GUEST_MODE;
104 * Reading vcpu->requests must happen after setting vcpu->mode,
105 * so we don't miss a request because the requester sees
106 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
107 * before next entering the guest (and thus doesn't IPI).
108 * This also orders the write to mode from any reads
109 * to the page tables done while the VCPU is running.
110 * Please see the comment in kvm_flush_remote_tlbs.
112 smp_mb();
114 if (kvm_request_pending(vcpu)) {
115 /* Make sure we process requests preemptable */
116 local_irq_enable();
117 trace_kvm_check_requests(vcpu);
118 r = kvmppc_core_check_requests(vcpu);
119 hard_irq_disable();
120 if (r > 0)
121 continue;
122 break;
125 if (kvmppc_core_prepare_to_enter(vcpu)) {
126 /* interrupts got enabled in between, so we
127 are back at square 1 */
128 continue;
131 guest_enter_irqoff();
132 return 1;
135 /* return to host */
136 local_irq_enable();
137 return r;
139 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
141 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
145 int i;
147 shared->sprg0 = swab64(shared->sprg0);
148 shared->sprg1 = swab64(shared->sprg1);
149 shared->sprg2 = swab64(shared->sprg2);
150 shared->sprg3 = swab64(shared->sprg3);
151 shared->srr0 = swab64(shared->srr0);
152 shared->srr1 = swab64(shared->srr1);
153 shared->dar = swab64(shared->dar);
154 shared->msr = swab64(shared->msr);
155 shared->dsisr = swab32(shared->dsisr);
156 shared->int_pending = swab32(shared->int_pending);
157 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
158 shared->sr[i] = swab32(shared->sr[i]);
160 #endif
162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
164 int nr = kvmppc_get_gpr(vcpu, 11);
165 int r;
166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
170 unsigned long r2 = 0;
172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
173 /* 32 bit mode */
174 param1 &= 0xffffffff;
175 param2 &= 0xffffffff;
176 param3 &= 0xffffffff;
177 param4 &= 0xffffffff;
180 switch (nr) {
181 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
183 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
184 /* Book3S can be little endian, find it out here */
185 int shared_big_endian = true;
186 if (vcpu->arch.intr_msr & MSR_LE)
187 shared_big_endian = false;
188 if (shared_big_endian != vcpu->arch.shared_big_endian)
189 kvmppc_swab_shared(vcpu);
190 vcpu->arch.shared_big_endian = shared_big_endian;
191 #endif
193 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
195 * Older versions of the Linux magic page code had
196 * a bug where they would map their trampoline code
197 * NX. If that's the case, remove !PR NX capability.
199 vcpu->arch.disable_kernel_nx = true;
200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
206 #ifdef CONFIG_PPC_64K_PAGES
208 * Make sure our 4k magic page is in the same window of a 64k
209 * page within the guest and within the host's page.
211 if ((vcpu->arch.magic_page_pa & 0xf000) !=
212 ((ulong)vcpu->arch.shared & 0xf000)) {
213 void *old_shared = vcpu->arch.shared;
214 ulong shared = (ulong)vcpu->arch.shared;
215 void *new_shared;
217 shared &= PAGE_MASK;
218 shared |= vcpu->arch.magic_page_pa & 0xf000;
219 new_shared = (void*)shared;
220 memcpy(new_shared, old_shared, 0x1000);
221 vcpu->arch.shared = new_shared;
223 #endif
225 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
227 r = EV_SUCCESS;
228 break;
230 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
231 r = EV_SUCCESS;
232 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
233 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
234 #endif
236 /* Second return value is in r4 */
237 break;
238 case EV_HCALL_TOKEN(EV_IDLE):
239 r = EV_SUCCESS;
240 kvm_vcpu_block(vcpu);
241 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
242 break;
243 default:
244 r = EV_UNIMPLEMENTED;
245 break;
248 kvmppc_set_gpr(vcpu, 4, r2);
250 return r;
252 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
254 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
256 int r = false;
258 /* We have to know what CPU to virtualize */
259 if (!vcpu->arch.pvr)
260 goto out;
262 /* PAPR only works with book3s_64 */
263 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
264 goto out;
266 /* HV KVM can only do PAPR mode for now */
267 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
268 goto out;
270 #ifdef CONFIG_KVM_BOOKE_HV
271 if (!cpu_has_feature(CPU_FTR_EMB_HV))
272 goto out;
273 #endif
275 r = true;
277 out:
278 vcpu->arch.sane = r;
279 return r ? 0 : -EINVAL;
281 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
283 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
285 enum emulation_result er;
286 int r;
288 er = kvmppc_emulate_loadstore(vcpu);
289 switch (er) {
290 case EMULATE_DONE:
291 /* Future optimization: only reload non-volatiles if they were
292 * actually modified. */
293 r = RESUME_GUEST_NV;
294 break;
295 case EMULATE_AGAIN:
296 r = RESUME_GUEST;
297 break;
298 case EMULATE_DO_MMIO:
299 run->exit_reason = KVM_EXIT_MMIO;
300 /* We must reload nonvolatiles because "update" load/store
301 * instructions modify register state. */
302 /* Future optimization: only reload non-volatiles if they were
303 * actually modified. */
304 r = RESUME_HOST_NV;
305 break;
306 case EMULATE_FAIL:
308 u32 last_inst;
310 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
311 /* XXX Deliver Program interrupt to guest. */
312 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
313 r = RESUME_HOST;
314 break;
316 default:
317 WARN_ON(1);
318 r = RESUME_GUEST;
321 return r;
323 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
325 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
326 bool data)
328 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
329 struct kvmppc_pte pte;
330 int r = -EINVAL;
332 vcpu->stat.st++;
334 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
335 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
336 size);
338 if ((!r) || (r == -EAGAIN))
339 return r;
341 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
342 XLATE_WRITE, &pte);
343 if (r < 0)
344 return r;
346 *eaddr = pte.raddr;
348 if (!pte.may_write)
349 return -EPERM;
351 /* Magic page override */
352 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
353 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
354 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
355 void *magic = vcpu->arch.shared;
356 magic += pte.eaddr & 0xfff;
357 memcpy(magic, ptr, size);
358 return EMULATE_DONE;
361 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
362 return EMULATE_DO_MMIO;
364 return EMULATE_DONE;
366 EXPORT_SYMBOL_GPL(kvmppc_st);
368 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
369 bool data)
371 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
372 struct kvmppc_pte pte;
373 int rc = -EINVAL;
375 vcpu->stat.ld++;
377 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
378 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
379 size);
381 if ((!rc) || (rc == -EAGAIN))
382 return rc;
384 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
385 XLATE_READ, &pte);
386 if (rc)
387 return rc;
389 *eaddr = pte.raddr;
391 if (!pte.may_read)
392 return -EPERM;
394 if (!data && !pte.may_execute)
395 return -ENOEXEC;
397 /* Magic page override */
398 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
399 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
400 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
401 void *magic = vcpu->arch.shared;
402 magic += pte.eaddr & 0xfff;
403 memcpy(ptr, magic, size);
404 return EMULATE_DONE;
407 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
408 return EMULATE_DO_MMIO;
410 return EMULATE_DONE;
412 EXPORT_SYMBOL_GPL(kvmppc_ld);
414 int kvm_arch_hardware_enable(void)
416 return 0;
419 int kvm_arch_hardware_setup(void)
421 return 0;
424 int kvm_arch_check_processor_compat(void)
426 return kvmppc_core_check_processor_compat();
429 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
431 struct kvmppc_ops *kvm_ops = NULL;
433 * if we have both HV and PR enabled, default is HV
435 if (type == 0) {
436 if (kvmppc_hv_ops)
437 kvm_ops = kvmppc_hv_ops;
438 else
439 kvm_ops = kvmppc_pr_ops;
440 if (!kvm_ops)
441 goto err_out;
442 } else if (type == KVM_VM_PPC_HV) {
443 if (!kvmppc_hv_ops)
444 goto err_out;
445 kvm_ops = kvmppc_hv_ops;
446 } else if (type == KVM_VM_PPC_PR) {
447 if (!kvmppc_pr_ops)
448 goto err_out;
449 kvm_ops = kvmppc_pr_ops;
450 } else
451 goto err_out;
453 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
454 return -ENOENT;
456 kvm->arch.kvm_ops = kvm_ops;
457 return kvmppc_core_init_vm(kvm);
458 err_out:
459 return -EINVAL;
462 void kvm_arch_destroy_vm(struct kvm *kvm)
464 unsigned int i;
465 struct kvm_vcpu *vcpu;
467 #ifdef CONFIG_KVM_XICS
469 * We call kick_all_cpus_sync() to ensure that all
470 * CPUs have executed any pending IPIs before we
471 * continue and free VCPUs structures below.
473 if (is_kvmppc_hv_enabled(kvm))
474 kick_all_cpus_sync();
475 #endif
477 kvm_for_each_vcpu(i, vcpu, kvm)
478 kvm_vcpu_destroy(vcpu);
480 mutex_lock(&kvm->lock);
481 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
482 kvm->vcpus[i] = NULL;
484 atomic_set(&kvm->online_vcpus, 0);
486 kvmppc_core_destroy_vm(kvm);
488 mutex_unlock(&kvm->lock);
490 /* drop the module reference */
491 module_put(kvm->arch.kvm_ops->owner);
494 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
496 int r;
497 /* Assume we're using HV mode when the HV module is loaded */
498 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
500 if (kvm) {
502 * Hooray - we know which VM type we're running on. Depend on
503 * that rather than the guess above.
505 hv_enabled = is_kvmppc_hv_enabled(kvm);
508 switch (ext) {
509 #ifdef CONFIG_BOOKE
510 case KVM_CAP_PPC_BOOKE_SREGS:
511 case KVM_CAP_PPC_BOOKE_WATCHDOG:
512 case KVM_CAP_PPC_EPR:
513 #else
514 case KVM_CAP_PPC_SEGSTATE:
515 case KVM_CAP_PPC_HIOR:
516 case KVM_CAP_PPC_PAPR:
517 #endif
518 case KVM_CAP_PPC_UNSET_IRQ:
519 case KVM_CAP_PPC_IRQ_LEVEL:
520 case KVM_CAP_ENABLE_CAP:
521 case KVM_CAP_ONE_REG:
522 case KVM_CAP_IOEVENTFD:
523 case KVM_CAP_DEVICE_CTRL:
524 case KVM_CAP_IMMEDIATE_EXIT:
525 r = 1;
526 break;
527 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
528 /* fall through */
529 case KVM_CAP_PPC_PAIRED_SINGLES:
530 case KVM_CAP_PPC_OSI:
531 case KVM_CAP_PPC_GET_PVINFO:
532 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
533 case KVM_CAP_SW_TLB:
534 #endif
535 /* We support this only for PR */
536 r = !hv_enabled;
537 break;
538 #ifdef CONFIG_KVM_MPIC
539 case KVM_CAP_IRQ_MPIC:
540 r = 1;
541 break;
542 #endif
544 #ifdef CONFIG_PPC_BOOK3S_64
545 case KVM_CAP_SPAPR_TCE:
546 case KVM_CAP_SPAPR_TCE_64:
547 r = 1;
548 break;
549 case KVM_CAP_SPAPR_TCE_VFIO:
550 r = !!cpu_has_feature(CPU_FTR_HVMODE);
551 break;
552 case KVM_CAP_PPC_RTAS:
553 case KVM_CAP_PPC_FIXUP_HCALL:
554 case KVM_CAP_PPC_ENABLE_HCALL:
555 #ifdef CONFIG_KVM_XICS
556 case KVM_CAP_IRQ_XICS:
557 #endif
558 case KVM_CAP_PPC_GET_CPU_CHAR:
559 r = 1;
560 break;
561 #ifdef CONFIG_KVM_XIVE
562 case KVM_CAP_PPC_IRQ_XIVE:
564 * We need XIVE to be enabled on the platform (implies
565 * a POWER9 processor) and the PowerNV platform, as
566 * nested is not yet supported.
568 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
569 kvmppc_xive_native_supported();
570 break;
571 #endif
573 case KVM_CAP_PPC_ALLOC_HTAB:
574 r = hv_enabled;
575 break;
576 #endif /* CONFIG_PPC_BOOK3S_64 */
577 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
578 case KVM_CAP_PPC_SMT:
579 r = 0;
580 if (kvm) {
581 if (kvm->arch.emul_smt_mode > 1)
582 r = kvm->arch.emul_smt_mode;
583 else
584 r = kvm->arch.smt_mode;
585 } else if (hv_enabled) {
586 if (cpu_has_feature(CPU_FTR_ARCH_300))
587 r = 1;
588 else
589 r = threads_per_subcore;
591 break;
592 case KVM_CAP_PPC_SMT_POSSIBLE:
593 r = 1;
594 if (hv_enabled) {
595 if (!cpu_has_feature(CPU_FTR_ARCH_300))
596 r = ((threads_per_subcore << 1) - 1);
597 else
598 /* P9 can emulate dbells, so allow any mode */
599 r = 8 | 4 | 2 | 1;
601 break;
602 case KVM_CAP_PPC_RMA:
603 r = 0;
604 break;
605 case KVM_CAP_PPC_HWRNG:
606 r = kvmppc_hwrng_present();
607 break;
608 case KVM_CAP_PPC_MMU_RADIX:
609 r = !!(hv_enabled && radix_enabled());
610 break;
611 case KVM_CAP_PPC_MMU_HASH_V3:
612 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
613 cpu_has_feature(CPU_FTR_HVMODE));
614 break;
615 case KVM_CAP_PPC_NESTED_HV:
616 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
617 !kvmppc_hv_ops->enable_nested(NULL));
618 break;
619 #endif
620 case KVM_CAP_SYNC_MMU:
621 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
622 r = hv_enabled;
623 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
624 r = 1;
625 #else
626 r = 0;
627 #endif
628 break;
629 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
630 case KVM_CAP_PPC_HTAB_FD:
631 r = hv_enabled;
632 break;
633 #endif
634 case KVM_CAP_NR_VCPUS:
636 * Recommending a number of CPUs is somewhat arbitrary; we
637 * return the number of present CPUs for -HV (since a host
638 * will have secondary threads "offline"), and for other KVM
639 * implementations just count online CPUs.
641 if (hv_enabled)
642 r = num_present_cpus();
643 else
644 r = num_online_cpus();
645 break;
646 case KVM_CAP_MAX_VCPUS:
647 r = KVM_MAX_VCPUS;
648 break;
649 case KVM_CAP_MAX_VCPU_ID:
650 r = KVM_MAX_VCPU_ID;
651 break;
652 #ifdef CONFIG_PPC_BOOK3S_64
653 case KVM_CAP_PPC_GET_SMMU_INFO:
654 r = 1;
655 break;
656 case KVM_CAP_SPAPR_MULTITCE:
657 r = 1;
658 break;
659 case KVM_CAP_SPAPR_RESIZE_HPT:
660 r = !!hv_enabled;
661 break;
662 #endif
663 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
664 case KVM_CAP_PPC_FWNMI:
665 r = hv_enabled;
666 break;
667 #endif
668 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
669 case KVM_CAP_PPC_HTM:
670 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
671 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
672 break;
673 #endif
674 default:
675 r = 0;
676 break;
678 return r;
682 long kvm_arch_dev_ioctl(struct file *filp,
683 unsigned int ioctl, unsigned long arg)
685 return -EINVAL;
688 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
689 struct kvm_memory_slot *dont)
691 kvmppc_core_free_memslot(kvm, free, dont);
694 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
695 unsigned long npages)
697 return kvmppc_core_create_memslot(kvm, slot, npages);
700 int kvm_arch_prepare_memory_region(struct kvm *kvm,
701 struct kvm_memory_slot *memslot,
702 const struct kvm_userspace_memory_region *mem,
703 enum kvm_mr_change change)
705 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
708 void kvm_arch_commit_memory_region(struct kvm *kvm,
709 const struct kvm_userspace_memory_region *mem,
710 const struct kvm_memory_slot *old,
711 const struct kvm_memory_slot *new,
712 enum kvm_mr_change change)
714 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
717 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
718 struct kvm_memory_slot *slot)
720 kvmppc_core_flush_memslot(kvm, slot);
723 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
725 return 0;
728 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
730 struct kvm_vcpu *vcpu;
732 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
733 kvmppc_decrementer_func(vcpu);
735 return HRTIMER_NORESTART;
738 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
740 int err;
742 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
743 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
744 vcpu->arch.dec_expires = get_tb();
746 #ifdef CONFIG_KVM_EXIT_TIMING
747 mutex_init(&vcpu->arch.exit_timing_lock);
748 #endif
749 err = kvmppc_subarch_vcpu_init(vcpu);
750 if (err)
751 return err;
753 err = kvmppc_core_vcpu_create(vcpu);
754 if (err)
755 goto out_vcpu_uninit;
757 vcpu->arch.wqp = &vcpu->wq;
758 kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
759 return 0;
761 out_vcpu_uninit:
762 kvmppc_mmu_destroy(vcpu);
763 kvmppc_subarch_vcpu_uninit(vcpu);
764 return err;
767 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
771 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
773 /* Make sure we're not using the vcpu anymore */
774 hrtimer_cancel(&vcpu->arch.dec_timer);
776 kvmppc_remove_vcpu_debugfs(vcpu);
778 switch (vcpu->arch.irq_type) {
779 case KVMPPC_IRQ_MPIC:
780 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
781 break;
782 case KVMPPC_IRQ_XICS:
783 if (xics_on_xive())
784 kvmppc_xive_cleanup_vcpu(vcpu);
785 else
786 kvmppc_xics_free_icp(vcpu);
787 break;
788 case KVMPPC_IRQ_XIVE:
789 kvmppc_xive_native_cleanup_vcpu(vcpu);
790 break;
793 kvmppc_core_vcpu_free(vcpu);
795 kvmppc_mmu_destroy(vcpu);
796 kvmppc_subarch_vcpu_uninit(vcpu);
799 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
801 return kvmppc_core_pending_dec(vcpu);
804 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
806 #ifdef CONFIG_BOOKE
808 * vrsave (formerly usprg0) isn't used by Linux, but may
809 * be used by the guest.
811 * On non-booke this is associated with Altivec and
812 * is handled by code in book3s.c.
814 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
815 #endif
816 kvmppc_core_vcpu_load(vcpu, cpu);
819 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
821 kvmppc_core_vcpu_put(vcpu);
822 #ifdef CONFIG_BOOKE
823 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
824 #endif
828 * irq_bypass_add_producer and irq_bypass_del_producer are only
829 * useful if the architecture supports PCI passthrough.
830 * irq_bypass_stop and irq_bypass_start are not needed and so
831 * kvm_ops are not defined for them.
833 bool kvm_arch_has_irq_bypass(void)
835 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
836 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
839 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
840 struct irq_bypass_producer *prod)
842 struct kvm_kernel_irqfd *irqfd =
843 container_of(cons, struct kvm_kernel_irqfd, consumer);
844 struct kvm *kvm = irqfd->kvm;
846 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
847 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
849 return 0;
852 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
853 struct irq_bypass_producer *prod)
855 struct kvm_kernel_irqfd *irqfd =
856 container_of(cons, struct kvm_kernel_irqfd, consumer);
857 struct kvm *kvm = irqfd->kvm;
859 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
860 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
863 #ifdef CONFIG_VSX
864 static inline int kvmppc_get_vsr_dword_offset(int index)
866 int offset;
868 if ((index != 0) && (index != 1))
869 return -1;
871 #ifdef __BIG_ENDIAN
872 offset = index;
873 #else
874 offset = 1 - index;
875 #endif
877 return offset;
880 static inline int kvmppc_get_vsr_word_offset(int index)
882 int offset;
884 if ((index > 3) || (index < 0))
885 return -1;
887 #ifdef __BIG_ENDIAN
888 offset = index;
889 #else
890 offset = 3 - index;
891 #endif
892 return offset;
895 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
896 u64 gpr)
898 union kvmppc_one_reg val;
899 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
900 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
902 if (offset == -1)
903 return;
905 if (index >= 32) {
906 val.vval = VCPU_VSX_VR(vcpu, index - 32);
907 val.vsxval[offset] = gpr;
908 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
909 } else {
910 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
914 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
915 u64 gpr)
917 union kvmppc_one_reg val;
918 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
920 if (index >= 32) {
921 val.vval = VCPU_VSX_VR(vcpu, index - 32);
922 val.vsxval[0] = gpr;
923 val.vsxval[1] = gpr;
924 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
925 } else {
926 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
927 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
931 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
932 u32 gpr)
934 union kvmppc_one_reg val;
935 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
937 if (index >= 32) {
938 val.vsx32val[0] = gpr;
939 val.vsx32val[1] = gpr;
940 val.vsx32val[2] = gpr;
941 val.vsx32val[3] = gpr;
942 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
943 } else {
944 val.vsx32val[0] = gpr;
945 val.vsx32val[1] = gpr;
946 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
947 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
951 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
952 u32 gpr32)
954 union kvmppc_one_reg val;
955 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
956 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
957 int dword_offset, word_offset;
959 if (offset == -1)
960 return;
962 if (index >= 32) {
963 val.vval = VCPU_VSX_VR(vcpu, index - 32);
964 val.vsx32val[offset] = gpr32;
965 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
966 } else {
967 dword_offset = offset / 2;
968 word_offset = offset % 2;
969 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
970 val.vsx32val[word_offset] = gpr32;
971 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
974 #endif /* CONFIG_VSX */
976 #ifdef CONFIG_ALTIVEC
977 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
978 int index, int element_size)
980 int offset;
981 int elts = sizeof(vector128)/element_size;
983 if ((index < 0) || (index >= elts))
984 return -1;
986 if (kvmppc_need_byteswap(vcpu))
987 offset = elts - index - 1;
988 else
989 offset = index;
991 return offset;
994 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
995 int index)
997 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1000 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1001 int index)
1003 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1006 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1007 int index)
1009 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1012 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1013 int index)
1015 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1019 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1020 u64 gpr)
1022 union kvmppc_one_reg val;
1023 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1024 vcpu->arch.mmio_vmx_offset);
1025 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1027 if (offset == -1)
1028 return;
1030 val.vval = VCPU_VSX_VR(vcpu, index);
1031 val.vsxval[offset] = gpr;
1032 VCPU_VSX_VR(vcpu, index) = val.vval;
1035 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1036 u32 gpr32)
1038 union kvmppc_one_reg val;
1039 int offset = kvmppc_get_vmx_word_offset(vcpu,
1040 vcpu->arch.mmio_vmx_offset);
1041 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1043 if (offset == -1)
1044 return;
1046 val.vval = VCPU_VSX_VR(vcpu, index);
1047 val.vsx32val[offset] = gpr32;
1048 VCPU_VSX_VR(vcpu, index) = val.vval;
1051 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1052 u16 gpr16)
1054 union kvmppc_one_reg val;
1055 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1056 vcpu->arch.mmio_vmx_offset);
1057 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1059 if (offset == -1)
1060 return;
1062 val.vval = VCPU_VSX_VR(vcpu, index);
1063 val.vsx16val[offset] = gpr16;
1064 VCPU_VSX_VR(vcpu, index) = val.vval;
1067 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1068 u8 gpr8)
1070 union kvmppc_one_reg val;
1071 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1072 vcpu->arch.mmio_vmx_offset);
1073 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1075 if (offset == -1)
1076 return;
1078 val.vval = VCPU_VSX_VR(vcpu, index);
1079 val.vsx8val[offset] = gpr8;
1080 VCPU_VSX_VR(vcpu, index) = val.vval;
1082 #endif /* CONFIG_ALTIVEC */
1084 #ifdef CONFIG_PPC_FPU
1085 static inline u64 sp_to_dp(u32 fprs)
1087 u64 fprd;
1089 preempt_disable();
1090 enable_kernel_fp();
1091 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1092 : "fr0");
1093 preempt_enable();
1094 return fprd;
1097 static inline u32 dp_to_sp(u64 fprd)
1099 u32 fprs;
1101 preempt_disable();
1102 enable_kernel_fp();
1103 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1104 : "fr0");
1105 preempt_enable();
1106 return fprs;
1109 #else
1110 #define sp_to_dp(x) (x)
1111 #define dp_to_sp(x) (x)
1112 #endif /* CONFIG_PPC_FPU */
1114 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1115 struct kvm_run *run)
1117 u64 uninitialized_var(gpr);
1119 if (run->mmio.len > sizeof(gpr)) {
1120 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1121 return;
1124 if (!vcpu->arch.mmio_host_swabbed) {
1125 switch (run->mmio.len) {
1126 case 8: gpr = *(u64 *)run->mmio.data; break;
1127 case 4: gpr = *(u32 *)run->mmio.data; break;
1128 case 2: gpr = *(u16 *)run->mmio.data; break;
1129 case 1: gpr = *(u8 *)run->mmio.data; break;
1131 } else {
1132 switch (run->mmio.len) {
1133 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1134 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1135 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1136 case 1: gpr = *(u8 *)run->mmio.data; break;
1140 /* conversion between single and double precision */
1141 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1142 gpr = sp_to_dp(gpr);
1144 if (vcpu->arch.mmio_sign_extend) {
1145 switch (run->mmio.len) {
1146 #ifdef CONFIG_PPC64
1147 case 4:
1148 gpr = (s64)(s32)gpr;
1149 break;
1150 #endif
1151 case 2:
1152 gpr = (s64)(s16)gpr;
1153 break;
1154 case 1:
1155 gpr = (s64)(s8)gpr;
1156 break;
1160 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1161 case KVM_MMIO_REG_GPR:
1162 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1163 break;
1164 case KVM_MMIO_REG_FPR:
1165 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1166 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1168 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1169 break;
1170 #ifdef CONFIG_PPC_BOOK3S
1171 case KVM_MMIO_REG_QPR:
1172 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1173 break;
1174 case KVM_MMIO_REG_FQPR:
1175 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1176 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1177 break;
1178 #endif
1179 #ifdef CONFIG_VSX
1180 case KVM_MMIO_REG_VSX:
1181 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1182 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1184 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1185 kvmppc_set_vsr_dword(vcpu, gpr);
1186 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1187 kvmppc_set_vsr_word(vcpu, gpr);
1188 else if (vcpu->arch.mmio_copy_type ==
1189 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1190 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1191 else if (vcpu->arch.mmio_copy_type ==
1192 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1193 kvmppc_set_vsr_word_dump(vcpu, gpr);
1194 break;
1195 #endif
1196 #ifdef CONFIG_ALTIVEC
1197 case KVM_MMIO_REG_VMX:
1198 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1199 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1201 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1202 kvmppc_set_vmx_dword(vcpu, gpr);
1203 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1204 kvmppc_set_vmx_word(vcpu, gpr);
1205 else if (vcpu->arch.mmio_copy_type ==
1206 KVMPPC_VMX_COPY_HWORD)
1207 kvmppc_set_vmx_hword(vcpu, gpr);
1208 else if (vcpu->arch.mmio_copy_type ==
1209 KVMPPC_VMX_COPY_BYTE)
1210 kvmppc_set_vmx_byte(vcpu, gpr);
1211 break;
1212 #endif
1213 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1214 case KVM_MMIO_REG_NESTED_GPR:
1215 if (kvmppc_need_byteswap(vcpu))
1216 gpr = swab64(gpr);
1217 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1218 sizeof(gpr));
1219 break;
1220 #endif
1221 default:
1222 BUG();
1226 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1227 unsigned int rt, unsigned int bytes,
1228 int is_default_endian, int sign_extend)
1230 int idx, ret;
1231 bool host_swabbed;
1233 /* Pity C doesn't have a logical XOR operator */
1234 if (kvmppc_need_byteswap(vcpu)) {
1235 host_swabbed = is_default_endian;
1236 } else {
1237 host_swabbed = !is_default_endian;
1240 if (bytes > sizeof(run->mmio.data)) {
1241 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1242 run->mmio.len);
1245 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1246 run->mmio.len = bytes;
1247 run->mmio.is_write = 0;
1249 vcpu->arch.io_gpr = rt;
1250 vcpu->arch.mmio_host_swabbed = host_swabbed;
1251 vcpu->mmio_needed = 1;
1252 vcpu->mmio_is_write = 0;
1253 vcpu->arch.mmio_sign_extend = sign_extend;
1255 idx = srcu_read_lock(&vcpu->kvm->srcu);
1257 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1258 bytes, &run->mmio.data);
1260 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1262 if (!ret) {
1263 kvmppc_complete_mmio_load(vcpu, run);
1264 vcpu->mmio_needed = 0;
1265 return EMULATE_DONE;
1268 return EMULATE_DO_MMIO;
1271 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1272 unsigned int rt, unsigned int bytes,
1273 int is_default_endian)
1275 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1277 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1279 /* Same as above, but sign extends */
1280 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1281 unsigned int rt, unsigned int bytes,
1282 int is_default_endian)
1284 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1287 #ifdef CONFIG_VSX
1288 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1289 unsigned int rt, unsigned int bytes,
1290 int is_default_endian, int mmio_sign_extend)
1292 enum emulation_result emulated = EMULATE_DONE;
1294 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1295 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1296 return EMULATE_FAIL;
1298 while (vcpu->arch.mmio_vsx_copy_nums) {
1299 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1300 is_default_endian, mmio_sign_extend);
1302 if (emulated != EMULATE_DONE)
1303 break;
1305 vcpu->arch.paddr_accessed += run->mmio.len;
1307 vcpu->arch.mmio_vsx_copy_nums--;
1308 vcpu->arch.mmio_vsx_offset++;
1310 return emulated;
1312 #endif /* CONFIG_VSX */
1314 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1315 u64 val, unsigned int bytes, int is_default_endian)
1317 void *data = run->mmio.data;
1318 int idx, ret;
1319 bool host_swabbed;
1321 /* Pity C doesn't have a logical XOR operator */
1322 if (kvmppc_need_byteswap(vcpu)) {
1323 host_swabbed = is_default_endian;
1324 } else {
1325 host_swabbed = !is_default_endian;
1328 if (bytes > sizeof(run->mmio.data)) {
1329 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1330 run->mmio.len);
1333 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1334 run->mmio.len = bytes;
1335 run->mmio.is_write = 1;
1336 vcpu->mmio_needed = 1;
1337 vcpu->mmio_is_write = 1;
1339 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1340 val = dp_to_sp(val);
1342 /* Store the value at the lowest bytes in 'data'. */
1343 if (!host_swabbed) {
1344 switch (bytes) {
1345 case 8: *(u64 *)data = val; break;
1346 case 4: *(u32 *)data = val; break;
1347 case 2: *(u16 *)data = val; break;
1348 case 1: *(u8 *)data = val; break;
1350 } else {
1351 switch (bytes) {
1352 case 8: *(u64 *)data = swab64(val); break;
1353 case 4: *(u32 *)data = swab32(val); break;
1354 case 2: *(u16 *)data = swab16(val); break;
1355 case 1: *(u8 *)data = val; break;
1359 idx = srcu_read_lock(&vcpu->kvm->srcu);
1361 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1362 bytes, &run->mmio.data);
1364 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1366 if (!ret) {
1367 vcpu->mmio_needed = 0;
1368 return EMULATE_DONE;
1371 return EMULATE_DO_MMIO;
1373 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1375 #ifdef CONFIG_VSX
1376 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1378 u32 dword_offset, word_offset;
1379 union kvmppc_one_reg reg;
1380 int vsx_offset = 0;
1381 int copy_type = vcpu->arch.mmio_copy_type;
1382 int result = 0;
1384 switch (copy_type) {
1385 case KVMPPC_VSX_COPY_DWORD:
1386 vsx_offset =
1387 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1389 if (vsx_offset == -1) {
1390 result = -1;
1391 break;
1394 if (rs < 32) {
1395 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1396 } else {
1397 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1398 *val = reg.vsxval[vsx_offset];
1400 break;
1402 case KVMPPC_VSX_COPY_WORD:
1403 vsx_offset =
1404 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1406 if (vsx_offset == -1) {
1407 result = -1;
1408 break;
1411 if (rs < 32) {
1412 dword_offset = vsx_offset / 2;
1413 word_offset = vsx_offset % 2;
1414 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1415 *val = reg.vsx32val[word_offset];
1416 } else {
1417 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1418 *val = reg.vsx32val[vsx_offset];
1420 break;
1422 default:
1423 result = -1;
1424 break;
1427 return result;
1430 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1431 int rs, unsigned int bytes, int is_default_endian)
1433 u64 val;
1434 enum emulation_result emulated = EMULATE_DONE;
1436 vcpu->arch.io_gpr = rs;
1438 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1439 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1440 return EMULATE_FAIL;
1442 while (vcpu->arch.mmio_vsx_copy_nums) {
1443 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1444 return EMULATE_FAIL;
1446 emulated = kvmppc_handle_store(run, vcpu,
1447 val, bytes, is_default_endian);
1449 if (emulated != EMULATE_DONE)
1450 break;
1452 vcpu->arch.paddr_accessed += run->mmio.len;
1454 vcpu->arch.mmio_vsx_copy_nums--;
1455 vcpu->arch.mmio_vsx_offset++;
1458 return emulated;
1461 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1462 struct kvm_run *run)
1464 enum emulation_result emulated = EMULATE_FAIL;
1465 int r;
1467 vcpu->arch.paddr_accessed += run->mmio.len;
1469 if (!vcpu->mmio_is_write) {
1470 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1471 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1472 } else {
1473 emulated = kvmppc_handle_vsx_store(run, vcpu,
1474 vcpu->arch.io_gpr, run->mmio.len, 1);
1477 switch (emulated) {
1478 case EMULATE_DO_MMIO:
1479 run->exit_reason = KVM_EXIT_MMIO;
1480 r = RESUME_HOST;
1481 break;
1482 case EMULATE_FAIL:
1483 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1484 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1485 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1486 r = RESUME_HOST;
1487 break;
1488 default:
1489 r = RESUME_GUEST;
1490 break;
1492 return r;
1494 #endif /* CONFIG_VSX */
1496 #ifdef CONFIG_ALTIVEC
1497 int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1498 unsigned int rt, unsigned int bytes, int is_default_endian)
1500 enum emulation_result emulated = EMULATE_DONE;
1502 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1503 return EMULATE_FAIL;
1505 while (vcpu->arch.mmio_vmx_copy_nums) {
1506 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1507 is_default_endian, 0);
1509 if (emulated != EMULATE_DONE)
1510 break;
1512 vcpu->arch.paddr_accessed += run->mmio.len;
1513 vcpu->arch.mmio_vmx_copy_nums--;
1514 vcpu->arch.mmio_vmx_offset++;
1517 return emulated;
1520 int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1522 union kvmppc_one_reg reg;
1523 int vmx_offset = 0;
1524 int result = 0;
1526 vmx_offset =
1527 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1529 if (vmx_offset == -1)
1530 return -1;
1532 reg.vval = VCPU_VSX_VR(vcpu, index);
1533 *val = reg.vsxval[vmx_offset];
1535 return result;
1538 int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1540 union kvmppc_one_reg reg;
1541 int vmx_offset = 0;
1542 int result = 0;
1544 vmx_offset =
1545 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1547 if (vmx_offset == -1)
1548 return -1;
1550 reg.vval = VCPU_VSX_VR(vcpu, index);
1551 *val = reg.vsx32val[vmx_offset];
1553 return result;
1556 int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1558 union kvmppc_one_reg reg;
1559 int vmx_offset = 0;
1560 int result = 0;
1562 vmx_offset =
1563 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1565 if (vmx_offset == -1)
1566 return -1;
1568 reg.vval = VCPU_VSX_VR(vcpu, index);
1569 *val = reg.vsx16val[vmx_offset];
1571 return result;
1574 int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1576 union kvmppc_one_reg reg;
1577 int vmx_offset = 0;
1578 int result = 0;
1580 vmx_offset =
1581 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1583 if (vmx_offset == -1)
1584 return -1;
1586 reg.vval = VCPU_VSX_VR(vcpu, index);
1587 *val = reg.vsx8val[vmx_offset];
1589 return result;
1592 int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1593 unsigned int rs, unsigned int bytes, int is_default_endian)
1595 u64 val = 0;
1596 unsigned int index = rs & KVM_MMIO_REG_MASK;
1597 enum emulation_result emulated = EMULATE_DONE;
1599 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1600 return EMULATE_FAIL;
1602 vcpu->arch.io_gpr = rs;
1604 while (vcpu->arch.mmio_vmx_copy_nums) {
1605 switch (vcpu->arch.mmio_copy_type) {
1606 case KVMPPC_VMX_COPY_DWORD:
1607 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1608 return EMULATE_FAIL;
1610 break;
1611 case KVMPPC_VMX_COPY_WORD:
1612 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1613 return EMULATE_FAIL;
1614 break;
1615 case KVMPPC_VMX_COPY_HWORD:
1616 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1617 return EMULATE_FAIL;
1618 break;
1619 case KVMPPC_VMX_COPY_BYTE:
1620 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1621 return EMULATE_FAIL;
1622 break;
1623 default:
1624 return EMULATE_FAIL;
1627 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
1628 is_default_endian);
1629 if (emulated != EMULATE_DONE)
1630 break;
1632 vcpu->arch.paddr_accessed += run->mmio.len;
1633 vcpu->arch.mmio_vmx_copy_nums--;
1634 vcpu->arch.mmio_vmx_offset++;
1637 return emulated;
1640 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1641 struct kvm_run *run)
1643 enum emulation_result emulated = EMULATE_FAIL;
1644 int r;
1646 vcpu->arch.paddr_accessed += run->mmio.len;
1648 if (!vcpu->mmio_is_write) {
1649 emulated = kvmppc_handle_vmx_load(run, vcpu,
1650 vcpu->arch.io_gpr, run->mmio.len, 1);
1651 } else {
1652 emulated = kvmppc_handle_vmx_store(run, vcpu,
1653 vcpu->arch.io_gpr, run->mmio.len, 1);
1656 switch (emulated) {
1657 case EMULATE_DO_MMIO:
1658 run->exit_reason = KVM_EXIT_MMIO;
1659 r = RESUME_HOST;
1660 break;
1661 case EMULATE_FAIL:
1662 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1663 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1664 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1665 r = RESUME_HOST;
1666 break;
1667 default:
1668 r = RESUME_GUEST;
1669 break;
1671 return r;
1673 #endif /* CONFIG_ALTIVEC */
1675 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1677 int r = 0;
1678 union kvmppc_one_reg val;
1679 int size;
1681 size = one_reg_size(reg->id);
1682 if (size > sizeof(val))
1683 return -EINVAL;
1685 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1686 if (r == -EINVAL) {
1687 r = 0;
1688 switch (reg->id) {
1689 #ifdef CONFIG_ALTIVEC
1690 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1691 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1692 r = -ENXIO;
1693 break;
1695 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1696 break;
1697 case KVM_REG_PPC_VSCR:
1698 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1699 r = -ENXIO;
1700 break;
1702 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1703 break;
1704 case KVM_REG_PPC_VRSAVE:
1705 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1706 break;
1707 #endif /* CONFIG_ALTIVEC */
1708 default:
1709 r = -EINVAL;
1710 break;
1714 if (r)
1715 return r;
1717 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1718 r = -EFAULT;
1720 return r;
1723 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1725 int r;
1726 union kvmppc_one_reg val;
1727 int size;
1729 size = one_reg_size(reg->id);
1730 if (size > sizeof(val))
1731 return -EINVAL;
1733 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1734 return -EFAULT;
1736 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1737 if (r == -EINVAL) {
1738 r = 0;
1739 switch (reg->id) {
1740 #ifdef CONFIG_ALTIVEC
1741 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1742 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1743 r = -ENXIO;
1744 break;
1746 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1747 break;
1748 case KVM_REG_PPC_VSCR:
1749 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1750 r = -ENXIO;
1751 break;
1753 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1754 break;
1755 case KVM_REG_PPC_VRSAVE:
1756 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1757 r = -ENXIO;
1758 break;
1760 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1761 break;
1762 #endif /* CONFIG_ALTIVEC */
1763 default:
1764 r = -EINVAL;
1765 break;
1769 return r;
1772 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1774 int r;
1776 vcpu_load(vcpu);
1778 if (vcpu->mmio_needed) {
1779 vcpu->mmio_needed = 0;
1780 if (!vcpu->mmio_is_write)
1781 kvmppc_complete_mmio_load(vcpu, run);
1782 #ifdef CONFIG_VSX
1783 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1784 vcpu->arch.mmio_vsx_copy_nums--;
1785 vcpu->arch.mmio_vsx_offset++;
1788 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1789 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1790 if (r == RESUME_HOST) {
1791 vcpu->mmio_needed = 1;
1792 goto out;
1795 #endif
1796 #ifdef CONFIG_ALTIVEC
1797 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1798 vcpu->arch.mmio_vmx_copy_nums--;
1799 vcpu->arch.mmio_vmx_offset++;
1802 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1803 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1804 if (r == RESUME_HOST) {
1805 vcpu->mmio_needed = 1;
1806 goto out;
1809 #endif
1810 } else if (vcpu->arch.osi_needed) {
1811 u64 *gprs = run->osi.gprs;
1812 int i;
1814 for (i = 0; i < 32; i++)
1815 kvmppc_set_gpr(vcpu, i, gprs[i]);
1816 vcpu->arch.osi_needed = 0;
1817 } else if (vcpu->arch.hcall_needed) {
1818 int i;
1820 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1821 for (i = 0; i < 9; ++i)
1822 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1823 vcpu->arch.hcall_needed = 0;
1824 #ifdef CONFIG_BOOKE
1825 } else if (vcpu->arch.epr_needed) {
1826 kvmppc_set_epr(vcpu, run->epr.epr);
1827 vcpu->arch.epr_needed = 0;
1828 #endif
1831 kvm_sigset_activate(vcpu);
1833 if (run->immediate_exit)
1834 r = -EINTR;
1835 else
1836 r = kvmppc_vcpu_run(run, vcpu);
1838 kvm_sigset_deactivate(vcpu);
1840 #ifdef CONFIG_ALTIVEC
1841 out:
1842 #endif
1843 vcpu_put(vcpu);
1844 return r;
1847 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1849 if (irq->irq == KVM_INTERRUPT_UNSET) {
1850 kvmppc_core_dequeue_external(vcpu);
1851 return 0;
1854 kvmppc_core_queue_external(vcpu, irq);
1856 kvm_vcpu_kick(vcpu);
1858 return 0;
1861 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1862 struct kvm_enable_cap *cap)
1864 int r;
1866 if (cap->flags)
1867 return -EINVAL;
1869 switch (cap->cap) {
1870 case KVM_CAP_PPC_OSI:
1871 r = 0;
1872 vcpu->arch.osi_enabled = true;
1873 break;
1874 case KVM_CAP_PPC_PAPR:
1875 r = 0;
1876 vcpu->arch.papr_enabled = true;
1877 break;
1878 case KVM_CAP_PPC_EPR:
1879 r = 0;
1880 if (cap->args[0])
1881 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1882 else
1883 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1884 break;
1885 #ifdef CONFIG_BOOKE
1886 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1887 r = 0;
1888 vcpu->arch.watchdog_enabled = true;
1889 break;
1890 #endif
1891 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1892 case KVM_CAP_SW_TLB: {
1893 struct kvm_config_tlb cfg;
1894 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1896 r = -EFAULT;
1897 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1898 break;
1900 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1901 break;
1903 #endif
1904 #ifdef CONFIG_KVM_MPIC
1905 case KVM_CAP_IRQ_MPIC: {
1906 struct fd f;
1907 struct kvm_device *dev;
1909 r = -EBADF;
1910 f = fdget(cap->args[0]);
1911 if (!f.file)
1912 break;
1914 r = -EPERM;
1915 dev = kvm_device_from_filp(f.file);
1916 if (dev)
1917 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1919 fdput(f);
1920 break;
1922 #endif
1923 #ifdef CONFIG_KVM_XICS
1924 case KVM_CAP_IRQ_XICS: {
1925 struct fd f;
1926 struct kvm_device *dev;
1928 r = -EBADF;
1929 f = fdget(cap->args[0]);
1930 if (!f.file)
1931 break;
1933 r = -EPERM;
1934 dev = kvm_device_from_filp(f.file);
1935 if (dev) {
1936 if (xics_on_xive())
1937 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1938 else
1939 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1942 fdput(f);
1943 break;
1945 #endif /* CONFIG_KVM_XICS */
1946 #ifdef CONFIG_KVM_XIVE
1947 case KVM_CAP_PPC_IRQ_XIVE: {
1948 struct fd f;
1949 struct kvm_device *dev;
1951 r = -EBADF;
1952 f = fdget(cap->args[0]);
1953 if (!f.file)
1954 break;
1956 r = -ENXIO;
1957 if (!xive_enabled())
1958 break;
1960 r = -EPERM;
1961 dev = kvm_device_from_filp(f.file);
1962 if (dev)
1963 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1964 cap->args[1]);
1966 fdput(f);
1967 break;
1969 #endif /* CONFIG_KVM_XIVE */
1970 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1971 case KVM_CAP_PPC_FWNMI:
1972 r = -EINVAL;
1973 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1974 break;
1975 r = 0;
1976 vcpu->kvm->arch.fwnmi_enabled = true;
1977 break;
1978 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1979 default:
1980 r = -EINVAL;
1981 break;
1984 if (!r)
1985 r = kvmppc_sanity_check(vcpu);
1987 return r;
1990 bool kvm_arch_intc_initialized(struct kvm *kvm)
1992 #ifdef CONFIG_KVM_MPIC
1993 if (kvm->arch.mpic)
1994 return true;
1995 #endif
1996 #ifdef CONFIG_KVM_XICS
1997 if (kvm->arch.xics || kvm->arch.xive)
1998 return true;
1999 #endif
2000 return false;
2003 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2004 struct kvm_mp_state *mp_state)
2006 return -EINVAL;
2009 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2010 struct kvm_mp_state *mp_state)
2012 return -EINVAL;
2015 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2016 unsigned int ioctl, unsigned long arg)
2018 struct kvm_vcpu *vcpu = filp->private_data;
2019 void __user *argp = (void __user *)arg;
2021 if (ioctl == KVM_INTERRUPT) {
2022 struct kvm_interrupt irq;
2023 if (copy_from_user(&irq, argp, sizeof(irq)))
2024 return -EFAULT;
2025 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2027 return -ENOIOCTLCMD;
2030 long kvm_arch_vcpu_ioctl(struct file *filp,
2031 unsigned int ioctl, unsigned long arg)
2033 struct kvm_vcpu *vcpu = filp->private_data;
2034 void __user *argp = (void __user *)arg;
2035 long r;
2037 switch (ioctl) {
2038 case KVM_ENABLE_CAP:
2040 struct kvm_enable_cap cap;
2041 r = -EFAULT;
2042 vcpu_load(vcpu);
2043 if (copy_from_user(&cap, argp, sizeof(cap)))
2044 goto out;
2045 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2046 vcpu_put(vcpu);
2047 break;
2050 case KVM_SET_ONE_REG:
2051 case KVM_GET_ONE_REG:
2053 struct kvm_one_reg reg;
2054 r = -EFAULT;
2055 if (copy_from_user(&reg, argp, sizeof(reg)))
2056 goto out;
2057 if (ioctl == KVM_SET_ONE_REG)
2058 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2059 else
2060 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2061 break;
2064 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2065 case KVM_DIRTY_TLB: {
2066 struct kvm_dirty_tlb dirty;
2067 r = -EFAULT;
2068 vcpu_load(vcpu);
2069 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2070 goto out;
2071 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2072 vcpu_put(vcpu);
2073 break;
2075 #endif
2076 default:
2077 r = -EINVAL;
2080 out:
2081 return r;
2084 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2086 return VM_FAULT_SIGBUS;
2089 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2091 u32 inst_nop = 0x60000000;
2092 #ifdef CONFIG_KVM_BOOKE_HV
2093 u32 inst_sc1 = 0x44000022;
2094 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2095 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2096 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2097 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2098 #else
2099 u32 inst_lis = 0x3c000000;
2100 u32 inst_ori = 0x60000000;
2101 u32 inst_sc = 0x44000002;
2102 u32 inst_imm_mask = 0xffff;
2105 * The hypercall to get into KVM from within guest context is as
2106 * follows:
2108 * lis r0, r0, KVM_SC_MAGIC_R0@h
2109 * ori r0, KVM_SC_MAGIC_R0@l
2110 * sc
2111 * nop
2113 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2114 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2115 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2116 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2117 #endif
2119 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2121 return 0;
2124 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2125 bool line_status)
2127 if (!irqchip_in_kernel(kvm))
2128 return -ENXIO;
2130 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2131 irq_event->irq, irq_event->level,
2132 line_status);
2133 return 0;
2137 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2138 struct kvm_enable_cap *cap)
2140 int r;
2142 if (cap->flags)
2143 return -EINVAL;
2145 switch (cap->cap) {
2146 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2147 case KVM_CAP_PPC_ENABLE_HCALL: {
2148 unsigned long hcall = cap->args[0];
2150 r = -EINVAL;
2151 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2152 cap->args[1] > 1)
2153 break;
2154 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2155 break;
2156 if (cap->args[1])
2157 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2158 else
2159 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2160 r = 0;
2161 break;
2163 case KVM_CAP_PPC_SMT: {
2164 unsigned long mode = cap->args[0];
2165 unsigned long flags = cap->args[1];
2167 r = -EINVAL;
2168 if (kvm->arch.kvm_ops->set_smt_mode)
2169 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2170 break;
2173 case KVM_CAP_PPC_NESTED_HV:
2174 r = -EINVAL;
2175 if (!is_kvmppc_hv_enabled(kvm) ||
2176 !kvm->arch.kvm_ops->enable_nested)
2177 break;
2178 r = kvm->arch.kvm_ops->enable_nested(kvm);
2179 break;
2180 #endif
2181 default:
2182 r = -EINVAL;
2183 break;
2186 return r;
2189 #ifdef CONFIG_PPC_BOOK3S_64
2191 * These functions check whether the underlying hardware is safe
2192 * against attacks based on observing the effects of speculatively
2193 * executed instructions, and whether it supplies instructions for
2194 * use in workarounds. The information comes from firmware, either
2195 * via the device tree on powernv platforms or from an hcall on
2196 * pseries platforms.
2198 #ifdef CONFIG_PPC_PSERIES
2199 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2201 struct h_cpu_char_result c;
2202 unsigned long rc;
2204 if (!machine_is(pseries))
2205 return -ENOTTY;
2207 rc = plpar_get_cpu_characteristics(&c);
2208 if (rc == H_SUCCESS) {
2209 cp->character = c.character;
2210 cp->behaviour = c.behaviour;
2211 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2212 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2213 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2214 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2215 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2216 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2217 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2218 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2219 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2220 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2221 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2222 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2223 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2225 return 0;
2227 #else
2228 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2230 return -ENOTTY;
2232 #endif
2234 static inline bool have_fw_feat(struct device_node *fw_features,
2235 const char *state, const char *name)
2237 struct device_node *np;
2238 bool r = false;
2240 np = of_get_child_by_name(fw_features, name);
2241 if (np) {
2242 r = of_property_read_bool(np, state);
2243 of_node_put(np);
2245 return r;
2248 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2250 struct device_node *np, *fw_features;
2251 int r;
2253 memset(cp, 0, sizeof(*cp));
2254 r = pseries_get_cpu_char(cp);
2255 if (r != -ENOTTY)
2256 return r;
2258 np = of_find_node_by_name(NULL, "ibm,opal");
2259 if (np) {
2260 fw_features = of_get_child_by_name(np, "fw-features");
2261 of_node_put(np);
2262 if (!fw_features)
2263 return 0;
2264 if (have_fw_feat(fw_features, "enabled",
2265 "inst-spec-barrier-ori31,31,0"))
2266 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2267 if (have_fw_feat(fw_features, "enabled",
2268 "fw-bcctrl-serialized"))
2269 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2270 if (have_fw_feat(fw_features, "enabled",
2271 "inst-l1d-flush-ori30,30,0"))
2272 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2273 if (have_fw_feat(fw_features, "enabled",
2274 "inst-l1d-flush-trig2"))
2275 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2276 if (have_fw_feat(fw_features, "enabled",
2277 "fw-l1d-thread-split"))
2278 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2279 if (have_fw_feat(fw_features, "enabled",
2280 "fw-count-cache-disabled"))
2281 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2282 if (have_fw_feat(fw_features, "enabled",
2283 "fw-count-cache-flush-bcctr2,0,0"))
2284 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2285 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2286 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2287 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2288 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2289 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2290 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2291 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2293 if (have_fw_feat(fw_features, "enabled",
2294 "speculation-policy-favor-security"))
2295 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2296 if (!have_fw_feat(fw_features, "disabled",
2297 "needs-l1d-flush-msr-pr-0-to-1"))
2298 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2299 if (!have_fw_feat(fw_features, "disabled",
2300 "needs-spec-barrier-for-bound-checks"))
2301 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2302 if (have_fw_feat(fw_features, "enabled",
2303 "needs-count-cache-flush-on-context-switch"))
2304 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2305 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2306 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2307 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2308 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2310 of_node_put(fw_features);
2313 return 0;
2315 #endif
2317 long kvm_arch_vm_ioctl(struct file *filp,
2318 unsigned int ioctl, unsigned long arg)
2320 struct kvm *kvm __maybe_unused = filp->private_data;
2321 void __user *argp = (void __user *)arg;
2322 long r;
2324 switch (ioctl) {
2325 case KVM_PPC_GET_PVINFO: {
2326 struct kvm_ppc_pvinfo pvinfo;
2327 memset(&pvinfo, 0, sizeof(pvinfo));
2328 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2329 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2330 r = -EFAULT;
2331 goto out;
2334 break;
2336 #ifdef CONFIG_SPAPR_TCE_IOMMU
2337 case KVM_CREATE_SPAPR_TCE_64: {
2338 struct kvm_create_spapr_tce_64 create_tce_64;
2340 r = -EFAULT;
2341 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2342 goto out;
2343 if (create_tce_64.flags) {
2344 r = -EINVAL;
2345 goto out;
2347 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2348 goto out;
2350 case KVM_CREATE_SPAPR_TCE: {
2351 struct kvm_create_spapr_tce create_tce;
2352 struct kvm_create_spapr_tce_64 create_tce_64;
2354 r = -EFAULT;
2355 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2356 goto out;
2358 create_tce_64.liobn = create_tce.liobn;
2359 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2360 create_tce_64.offset = 0;
2361 create_tce_64.size = create_tce.window_size >>
2362 IOMMU_PAGE_SHIFT_4K;
2363 create_tce_64.flags = 0;
2364 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2365 goto out;
2367 #endif
2368 #ifdef CONFIG_PPC_BOOK3S_64
2369 case KVM_PPC_GET_SMMU_INFO: {
2370 struct kvm_ppc_smmu_info info;
2371 struct kvm *kvm = filp->private_data;
2373 memset(&info, 0, sizeof(info));
2374 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2375 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2376 r = -EFAULT;
2377 break;
2379 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2380 struct kvm *kvm = filp->private_data;
2382 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2383 break;
2385 case KVM_PPC_CONFIGURE_V3_MMU: {
2386 struct kvm *kvm = filp->private_data;
2387 struct kvm_ppc_mmuv3_cfg cfg;
2389 r = -EINVAL;
2390 if (!kvm->arch.kvm_ops->configure_mmu)
2391 goto out;
2392 r = -EFAULT;
2393 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2394 goto out;
2395 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2396 break;
2398 case KVM_PPC_GET_RMMU_INFO: {
2399 struct kvm *kvm = filp->private_data;
2400 struct kvm_ppc_rmmu_info info;
2402 r = -EINVAL;
2403 if (!kvm->arch.kvm_ops->get_rmmu_info)
2404 goto out;
2405 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2406 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2407 r = -EFAULT;
2408 break;
2410 case KVM_PPC_GET_CPU_CHAR: {
2411 struct kvm_ppc_cpu_char cpuchar;
2413 r = kvmppc_get_cpu_char(&cpuchar);
2414 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2415 r = -EFAULT;
2416 break;
2418 case KVM_PPC_SVM_OFF: {
2419 struct kvm *kvm = filp->private_data;
2421 r = 0;
2422 if (!kvm->arch.kvm_ops->svm_off)
2423 goto out;
2425 r = kvm->arch.kvm_ops->svm_off(kvm);
2426 break;
2428 default: {
2429 struct kvm *kvm = filp->private_data;
2430 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2432 #else /* CONFIG_PPC_BOOK3S_64 */
2433 default:
2434 r = -ENOTTY;
2435 #endif
2437 out:
2438 return r;
2441 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2442 static unsigned long nr_lpids;
2444 long kvmppc_alloc_lpid(void)
2446 long lpid;
2448 do {
2449 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2450 if (lpid >= nr_lpids) {
2451 pr_err("%s: No LPIDs free\n", __func__);
2452 return -ENOMEM;
2454 } while (test_and_set_bit(lpid, lpid_inuse));
2456 return lpid;
2458 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2460 void kvmppc_claim_lpid(long lpid)
2462 set_bit(lpid, lpid_inuse);
2464 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2466 void kvmppc_free_lpid(long lpid)
2468 clear_bit(lpid, lpid_inuse);
2470 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2472 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2474 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2475 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2477 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2479 int kvm_arch_init(void *opaque)
2481 return 0;
2484 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);