Revert "tty: hvc: Fix data abort due to race in hvc_open"
[linux/fpc-iii.git] / virt / kvm / arm / arm.c
blob3d7e8fdeebcd7f9a38913657d8e3198012e0e2be
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 */
7 #include <linux/bug.h>
8 #include <linux/cpu_pm.h>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/vmalloc.h>
15 #include <linux/fs.h>
16 #include <linux/mman.h>
17 #include <linux/sched.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_irqfd.h>
20 #include <linux/irqbypass.h>
21 #include <linux/sched/stat.h>
22 #include <trace/events/kvm.h>
24 #define CREATE_TRACE_POINTS
25 #include "trace.h"
27 #include <linux/uaccess.h>
28 #include <asm/ptrace.h>
29 #include <asm/mman.h>
30 #include <asm/tlbflush.h>
31 #include <asm/cacheflush.h>
32 #include <asm/cpufeature.h>
33 #include <asm/virt.h>
34 #include <asm/kvm_arm.h>
35 #include <asm/kvm_asm.h>
36 #include <asm/kvm_mmu.h>
37 #include <asm/kvm_emulate.h>
38 #include <asm/kvm_coproc.h>
39 #include <asm/sections.h>
41 #include <kvm/arm_hypercalls.h>
42 #include <kvm/arm_pmu.h>
43 #include <kvm/arm_psci.h>
45 #ifdef REQUIRES_VIRT
46 __asm__(".arch_extension virt");
47 #endif
49 DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
50 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
52 /* The VMID used in the VTTBR */
53 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
54 static u32 kvm_next_vmid;
55 static DEFINE_SPINLOCK(kvm_vmid_lock);
57 static bool vgic_present;
59 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
60 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
62 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
64 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
67 int kvm_arch_hardware_setup(void *opaque)
69 return 0;
72 int kvm_arch_check_processor_compat(void *opaque)
74 return 0;
77 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
78 struct kvm_enable_cap *cap)
80 int r;
82 if (cap->flags)
83 return -EINVAL;
85 switch (cap->cap) {
86 case KVM_CAP_ARM_NISV_TO_USER:
87 r = 0;
88 kvm->arch.return_nisv_io_abort_to_user = true;
89 break;
90 default:
91 r = -EINVAL;
92 break;
95 return r;
98 /**
99 * kvm_arch_init_vm - initializes a VM data structure
100 * @kvm: pointer to the KVM struct
102 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
104 int ret, cpu;
106 ret = kvm_arm_setup_stage2(kvm, type);
107 if (ret)
108 return ret;
110 kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
111 if (!kvm->arch.last_vcpu_ran)
112 return -ENOMEM;
114 for_each_possible_cpu(cpu)
115 *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
117 ret = kvm_alloc_stage2_pgd(kvm);
118 if (ret)
119 goto out_fail_alloc;
121 ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
122 if (ret)
123 goto out_free_stage2_pgd;
125 kvm_vgic_early_init(kvm);
127 /* Mark the initial VMID generation invalid */
128 kvm->arch.vmid.vmid_gen = 0;
130 /* The maximum number of VCPUs is limited by the host's GIC model */
131 kvm->arch.max_vcpus = vgic_present ?
132 kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
134 return ret;
135 out_free_stage2_pgd:
136 kvm_free_stage2_pgd(kvm);
137 out_fail_alloc:
138 free_percpu(kvm->arch.last_vcpu_ran);
139 kvm->arch.last_vcpu_ran = NULL;
140 return ret;
143 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
145 return 0;
148 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
150 return VM_FAULT_SIGBUS;
155 * kvm_arch_destroy_vm - destroy the VM data structure
156 * @kvm: pointer to the KVM struct
158 void kvm_arch_destroy_vm(struct kvm *kvm)
160 int i;
162 kvm_vgic_destroy(kvm);
164 free_percpu(kvm->arch.last_vcpu_ran);
165 kvm->arch.last_vcpu_ran = NULL;
167 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
168 if (kvm->vcpus[i]) {
169 kvm_vcpu_destroy(kvm->vcpus[i]);
170 kvm->vcpus[i] = NULL;
173 atomic_set(&kvm->online_vcpus, 0);
176 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
178 int r;
179 switch (ext) {
180 case KVM_CAP_IRQCHIP:
181 r = vgic_present;
182 break;
183 case KVM_CAP_IOEVENTFD:
184 case KVM_CAP_DEVICE_CTRL:
185 case KVM_CAP_USER_MEMORY:
186 case KVM_CAP_SYNC_MMU:
187 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
188 case KVM_CAP_ONE_REG:
189 case KVM_CAP_ARM_PSCI:
190 case KVM_CAP_ARM_PSCI_0_2:
191 case KVM_CAP_READONLY_MEM:
192 case KVM_CAP_MP_STATE:
193 case KVM_CAP_IMMEDIATE_EXIT:
194 case KVM_CAP_VCPU_EVENTS:
195 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
196 case KVM_CAP_ARM_NISV_TO_USER:
197 case KVM_CAP_ARM_INJECT_EXT_DABT:
198 r = 1;
199 break;
200 case KVM_CAP_ARM_SET_DEVICE_ADDR:
201 r = 1;
202 break;
203 case KVM_CAP_NR_VCPUS:
204 r = num_online_cpus();
205 break;
206 case KVM_CAP_MAX_VCPUS:
207 r = KVM_MAX_VCPUS;
208 break;
209 case KVM_CAP_MAX_VCPU_ID:
210 r = KVM_MAX_VCPU_ID;
211 break;
212 case KVM_CAP_MSI_DEVID:
213 if (!kvm)
214 r = -EINVAL;
215 else
216 r = kvm->arch.vgic.msis_require_devid;
217 break;
218 case KVM_CAP_ARM_USER_IRQ:
220 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
221 * (bump this number if adding more devices)
223 r = 1;
224 break;
225 default:
226 r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
227 break;
229 return r;
232 long kvm_arch_dev_ioctl(struct file *filp,
233 unsigned int ioctl, unsigned long arg)
235 return -EINVAL;
238 struct kvm *kvm_arch_alloc_vm(void)
240 if (!has_vhe())
241 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
243 return vzalloc(sizeof(struct kvm));
246 void kvm_arch_free_vm(struct kvm *kvm)
248 if (!has_vhe())
249 kfree(kvm);
250 else
251 vfree(kvm);
254 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
256 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
257 return -EBUSY;
259 if (id >= kvm->arch.max_vcpus)
260 return -EINVAL;
262 return 0;
265 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
267 int err;
269 /* Force users to call KVM_ARM_VCPU_INIT */
270 vcpu->arch.target = -1;
271 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
273 /* Set up the timer */
274 kvm_timer_vcpu_init(vcpu);
276 kvm_pmu_vcpu_init(vcpu);
278 kvm_arm_reset_debug_ptr(vcpu);
280 kvm_arm_pvtime_vcpu_init(&vcpu->arch);
282 err = kvm_vgic_vcpu_init(vcpu);
283 if (err)
284 return err;
286 return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
289 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
293 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
295 if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
296 static_branch_dec(&userspace_irqchip_in_use);
298 kvm_mmu_free_memory_caches(vcpu);
299 kvm_timer_vcpu_terminate(vcpu);
300 kvm_pmu_vcpu_destroy(vcpu);
302 kvm_arm_vcpu_destroy(vcpu);
305 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
307 return kvm_timer_is_pending(vcpu);
310 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
313 * If we're about to block (most likely because we've just hit a
314 * WFI), we need to sync back the state of the GIC CPU interface
315 * so that we have the latest PMR and group enables. This ensures
316 * that kvm_arch_vcpu_runnable has up-to-date data to decide
317 * whether we have pending interrupts.
319 * For the same reason, we want to tell GICv4 that we need
320 * doorbells to be signalled, should an interrupt become pending.
322 preempt_disable();
323 kvm_vgic_vmcr_sync(vcpu);
324 vgic_v4_put(vcpu, true);
325 preempt_enable();
328 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
330 preempt_disable();
331 vgic_v4_load(vcpu);
332 preempt_enable();
335 #define __ptrauth_save_key(regs, key) \
336 ({ \
337 regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
338 regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
341 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
343 int *last_ran;
344 kvm_host_data_t *cpu_data;
346 last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
347 cpu_data = this_cpu_ptr(&kvm_host_data);
350 * We might get preempted before the vCPU actually runs, but
351 * over-invalidation doesn't affect correctness.
353 if (*last_ran != vcpu->vcpu_id) {
354 kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
355 *last_ran = vcpu->vcpu_id;
358 vcpu->cpu = cpu;
359 vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
361 kvm_vgic_load(vcpu);
362 kvm_timer_vcpu_load(vcpu);
363 kvm_vcpu_load_sysregs(vcpu);
364 kvm_arch_vcpu_load_fp(vcpu);
365 kvm_vcpu_pmu_restore_guest(vcpu);
366 if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
367 kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
369 if (single_task_running())
370 vcpu_clear_wfx_traps(vcpu);
371 else
372 vcpu_set_wfx_traps(vcpu);
374 if (vcpu_has_ptrauth(vcpu)) {
375 struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context;
377 __ptrauth_save_key(ctxt->sys_regs, APIA);
378 __ptrauth_save_key(ctxt->sys_regs, APIB);
379 __ptrauth_save_key(ctxt->sys_regs, APDA);
380 __ptrauth_save_key(ctxt->sys_regs, APDB);
381 __ptrauth_save_key(ctxt->sys_regs, APGA);
383 vcpu_ptrauth_disable(vcpu);
387 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
389 kvm_arch_vcpu_put_fp(vcpu);
390 kvm_vcpu_put_sysregs(vcpu);
391 kvm_timer_vcpu_put(vcpu);
392 kvm_vgic_put(vcpu);
393 kvm_vcpu_pmu_restore_host(vcpu);
395 vcpu->cpu = -1;
398 static void vcpu_power_off(struct kvm_vcpu *vcpu)
400 vcpu->arch.power_off = true;
401 kvm_make_request(KVM_REQ_SLEEP, vcpu);
402 kvm_vcpu_kick(vcpu);
405 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
406 struct kvm_mp_state *mp_state)
408 if (vcpu->arch.power_off)
409 mp_state->mp_state = KVM_MP_STATE_STOPPED;
410 else
411 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
413 return 0;
416 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
417 struct kvm_mp_state *mp_state)
419 int ret = 0;
421 switch (mp_state->mp_state) {
422 case KVM_MP_STATE_RUNNABLE:
423 vcpu->arch.power_off = false;
424 break;
425 case KVM_MP_STATE_STOPPED:
426 vcpu_power_off(vcpu);
427 break;
428 default:
429 ret = -EINVAL;
432 return ret;
436 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
437 * @v: The VCPU pointer
439 * If the guest CPU is not waiting for interrupts or an interrupt line is
440 * asserted, the CPU is by definition runnable.
442 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
444 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
445 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
446 && !v->arch.power_off && !v->arch.pause);
449 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
451 return vcpu_mode_priv(vcpu);
454 /* Just ensure a guest exit from a particular CPU */
455 static void exit_vm_noop(void *info)
459 void force_vm_exit(const cpumask_t *mask)
461 preempt_disable();
462 smp_call_function_many(mask, exit_vm_noop, NULL, true);
463 preempt_enable();
467 * need_new_vmid_gen - check that the VMID is still valid
468 * @vmid: The VMID to check
470 * return true if there is a new generation of VMIDs being used
472 * The hardware supports a limited set of values with the value zero reserved
473 * for the host, so we check if an assigned value belongs to a previous
474 * generation, which which requires us to assign a new value. If we're the
475 * first to use a VMID for the new generation, we must flush necessary caches
476 * and TLBs on all CPUs.
478 static bool need_new_vmid_gen(struct kvm_vmid *vmid)
480 u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
481 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
482 return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
486 * update_vmid - Update the vmid with a valid VMID for the current generation
487 * @kvm: The guest that struct vmid belongs to
488 * @vmid: The stage-2 VMID information struct
490 static void update_vmid(struct kvm_vmid *vmid)
492 if (!need_new_vmid_gen(vmid))
493 return;
495 spin_lock(&kvm_vmid_lock);
498 * We need to re-check the vmid_gen here to ensure that if another vcpu
499 * already allocated a valid vmid for this vm, then this vcpu should
500 * use the same vmid.
502 if (!need_new_vmid_gen(vmid)) {
503 spin_unlock(&kvm_vmid_lock);
504 return;
507 /* First user of a new VMID generation? */
508 if (unlikely(kvm_next_vmid == 0)) {
509 atomic64_inc(&kvm_vmid_gen);
510 kvm_next_vmid = 1;
513 * On SMP we know no other CPUs can use this CPU's or each
514 * other's VMID after force_vm_exit returns since the
515 * kvm_vmid_lock blocks them from reentry to the guest.
517 force_vm_exit(cpu_all_mask);
519 * Now broadcast TLB + ICACHE invalidation over the inner
520 * shareable domain to make sure all data structures are
521 * clean.
523 kvm_call_hyp(__kvm_flush_vm_context);
526 vmid->vmid = kvm_next_vmid;
527 kvm_next_vmid++;
528 kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
530 smp_wmb();
531 WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
533 spin_unlock(&kvm_vmid_lock);
536 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
538 struct kvm *kvm = vcpu->kvm;
539 int ret = 0;
541 if (likely(vcpu->arch.has_run_once))
542 return 0;
544 if (!kvm_arm_vcpu_is_finalized(vcpu))
545 return -EPERM;
547 vcpu->arch.has_run_once = true;
549 if (likely(irqchip_in_kernel(kvm))) {
551 * Map the VGIC hardware resources before running a vcpu the
552 * first time on this VM.
554 if (unlikely(!vgic_ready(kvm))) {
555 ret = kvm_vgic_map_resources(kvm);
556 if (ret)
557 return ret;
559 } else {
561 * Tell the rest of the code that there are userspace irqchip
562 * VMs in the wild.
564 static_branch_inc(&userspace_irqchip_in_use);
567 ret = kvm_timer_enable(vcpu);
568 if (ret)
569 return ret;
571 ret = kvm_arm_pmu_v3_enable(vcpu);
573 return ret;
576 bool kvm_arch_intc_initialized(struct kvm *kvm)
578 return vgic_initialized(kvm);
581 void kvm_arm_halt_guest(struct kvm *kvm)
583 int i;
584 struct kvm_vcpu *vcpu;
586 kvm_for_each_vcpu(i, vcpu, kvm)
587 vcpu->arch.pause = true;
588 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
591 void kvm_arm_resume_guest(struct kvm *kvm)
593 int i;
594 struct kvm_vcpu *vcpu;
596 kvm_for_each_vcpu(i, vcpu, kvm) {
597 vcpu->arch.pause = false;
598 swake_up_one(kvm_arch_vcpu_wq(vcpu));
602 static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
604 struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
606 swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
607 (!vcpu->arch.pause)));
609 if (vcpu->arch.power_off || vcpu->arch.pause) {
610 /* Awaken to handle a signal, request we sleep again later. */
611 kvm_make_request(KVM_REQ_SLEEP, vcpu);
615 * Make sure we will observe a potential reset request if we've
616 * observed a change to the power state. Pairs with the smp_wmb() in
617 * kvm_psci_vcpu_on().
619 smp_rmb();
622 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
624 return vcpu->arch.target >= 0;
627 static void check_vcpu_requests(struct kvm_vcpu *vcpu)
629 if (kvm_request_pending(vcpu)) {
630 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
631 vcpu_req_sleep(vcpu);
633 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
634 kvm_reset_vcpu(vcpu);
637 * Clear IRQ_PENDING requests that were made to guarantee
638 * that a VCPU sees new virtual interrupts.
640 kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
642 if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
643 kvm_update_stolen_time(vcpu);
645 if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
646 /* The distributor enable bits were changed */
647 preempt_disable();
648 vgic_v4_put(vcpu, false);
649 vgic_v4_load(vcpu);
650 preempt_enable();
656 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
657 * @vcpu: The VCPU pointer
658 * @run: The kvm_run structure pointer used for userspace state exchange
660 * This function is called through the VCPU_RUN ioctl called from user space. It
661 * will execute VM code in a loop until the time slice for the process is used
662 * or some emulation is needed from user space in which case the function will
663 * return with return value 0 and with the kvm_run structure filled in with the
664 * required data for the requested emulation.
666 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
668 int ret;
670 if (unlikely(!kvm_vcpu_initialized(vcpu)))
671 return -ENOEXEC;
673 ret = kvm_vcpu_first_run_init(vcpu);
674 if (ret)
675 return ret;
677 if (run->exit_reason == KVM_EXIT_MMIO) {
678 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
679 if (ret)
680 return ret;
683 if (run->immediate_exit)
684 return -EINTR;
686 vcpu_load(vcpu);
688 kvm_sigset_activate(vcpu);
690 ret = 1;
691 run->exit_reason = KVM_EXIT_UNKNOWN;
692 while (ret > 0) {
694 * Check conditions before entering the guest
696 cond_resched();
698 update_vmid(&vcpu->kvm->arch.vmid);
700 check_vcpu_requests(vcpu);
703 * Preparing the interrupts to be injected also
704 * involves poking the GIC, which must be done in a
705 * non-preemptible context.
707 preempt_disable();
709 kvm_pmu_flush_hwstate(vcpu);
711 local_irq_disable();
713 kvm_vgic_flush_hwstate(vcpu);
716 * Exit if we have a signal pending so that we can deliver the
717 * signal to user space.
719 if (signal_pending(current)) {
720 ret = -EINTR;
721 run->exit_reason = KVM_EXIT_INTR;
725 * If we're using a userspace irqchip, then check if we need
726 * to tell a userspace irqchip about timer or PMU level
727 * changes and if so, exit to userspace (the actual level
728 * state gets updated in kvm_timer_update_run and
729 * kvm_pmu_update_run below).
731 if (static_branch_unlikely(&userspace_irqchip_in_use)) {
732 if (kvm_timer_should_notify_user(vcpu) ||
733 kvm_pmu_should_notify_user(vcpu)) {
734 ret = -EINTR;
735 run->exit_reason = KVM_EXIT_INTR;
740 * Ensure we set mode to IN_GUEST_MODE after we disable
741 * interrupts and before the final VCPU requests check.
742 * See the comment in kvm_vcpu_exiting_guest_mode() and
743 * Documentation/virt/kvm/vcpu-requests.rst
745 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
747 if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
748 kvm_request_pending(vcpu)) {
749 vcpu->mode = OUTSIDE_GUEST_MODE;
750 isb(); /* Ensure work in x_flush_hwstate is committed */
751 kvm_pmu_sync_hwstate(vcpu);
752 if (static_branch_unlikely(&userspace_irqchip_in_use))
753 kvm_timer_sync_hwstate(vcpu);
754 kvm_vgic_sync_hwstate(vcpu);
755 local_irq_enable();
756 preempt_enable();
757 continue;
760 kvm_arm_setup_debug(vcpu);
762 /**************************************************************
763 * Enter the guest
765 trace_kvm_entry(*vcpu_pc(vcpu));
766 guest_enter_irqoff();
768 if (has_vhe()) {
769 ret = kvm_vcpu_run_vhe(vcpu);
770 } else {
771 ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
774 vcpu->mode = OUTSIDE_GUEST_MODE;
775 vcpu->stat.exits++;
777 * Back from guest
778 *************************************************************/
780 kvm_arm_clear_debug(vcpu);
783 * We must sync the PMU state before the vgic state so
784 * that the vgic can properly sample the updated state of the
785 * interrupt line.
787 kvm_pmu_sync_hwstate(vcpu);
790 * Sync the vgic state before syncing the timer state because
791 * the timer code needs to know if the virtual timer
792 * interrupts are active.
794 kvm_vgic_sync_hwstate(vcpu);
797 * Sync the timer hardware state before enabling interrupts as
798 * we don't want vtimer interrupts to race with syncing the
799 * timer virtual interrupt state.
801 if (static_branch_unlikely(&userspace_irqchip_in_use))
802 kvm_timer_sync_hwstate(vcpu);
804 kvm_arch_vcpu_ctxsync_fp(vcpu);
807 * We may have taken a host interrupt in HYP mode (ie
808 * while executing the guest). This interrupt is still
809 * pending, as we haven't serviced it yet!
811 * We're now back in SVC mode, with interrupts
812 * disabled. Enabling the interrupts now will have
813 * the effect of taking the interrupt again, in SVC
814 * mode this time.
816 local_irq_enable();
819 * We do local_irq_enable() before calling guest_exit() so
820 * that if a timer interrupt hits while running the guest we
821 * account that tick as being spent in the guest. We enable
822 * preemption after calling guest_exit() so that if we get
823 * preempted we make sure ticks after that is not counted as
824 * guest time.
826 guest_exit();
827 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
829 /* Exit types that need handling before we can be preempted */
830 handle_exit_early(vcpu, run, ret);
832 preempt_enable();
834 ret = handle_exit(vcpu, run, ret);
837 /* Tell userspace about in-kernel device output levels */
838 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
839 kvm_timer_update_run(vcpu);
840 kvm_pmu_update_run(vcpu);
843 kvm_sigset_deactivate(vcpu);
845 vcpu_put(vcpu);
846 return ret;
849 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
851 int bit_index;
852 bool set;
853 unsigned long *hcr;
855 if (number == KVM_ARM_IRQ_CPU_IRQ)
856 bit_index = __ffs(HCR_VI);
857 else /* KVM_ARM_IRQ_CPU_FIQ */
858 bit_index = __ffs(HCR_VF);
860 hcr = vcpu_hcr(vcpu);
861 if (level)
862 set = test_and_set_bit(bit_index, hcr);
863 else
864 set = test_and_clear_bit(bit_index, hcr);
867 * If we didn't change anything, no need to wake up or kick other CPUs
869 if (set == level)
870 return 0;
873 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
874 * trigger a world-switch round on the running physical CPU to set the
875 * virtual IRQ/FIQ fields in the HCR appropriately.
877 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
878 kvm_vcpu_kick(vcpu);
880 return 0;
883 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
884 bool line_status)
886 u32 irq = irq_level->irq;
887 unsigned int irq_type, vcpu_idx, irq_num;
888 int nrcpus = atomic_read(&kvm->online_vcpus);
889 struct kvm_vcpu *vcpu = NULL;
890 bool level = irq_level->level;
892 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
893 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
894 vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
895 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
897 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
899 switch (irq_type) {
900 case KVM_ARM_IRQ_TYPE_CPU:
901 if (irqchip_in_kernel(kvm))
902 return -ENXIO;
904 if (vcpu_idx >= nrcpus)
905 return -EINVAL;
907 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
908 if (!vcpu)
909 return -EINVAL;
911 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
912 return -EINVAL;
914 return vcpu_interrupt_line(vcpu, irq_num, level);
915 case KVM_ARM_IRQ_TYPE_PPI:
916 if (!irqchip_in_kernel(kvm))
917 return -ENXIO;
919 if (vcpu_idx >= nrcpus)
920 return -EINVAL;
922 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
923 if (!vcpu)
924 return -EINVAL;
926 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
927 return -EINVAL;
929 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
930 case KVM_ARM_IRQ_TYPE_SPI:
931 if (!irqchip_in_kernel(kvm))
932 return -ENXIO;
934 if (irq_num < VGIC_NR_PRIVATE_IRQS)
935 return -EINVAL;
937 return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
940 return -EINVAL;
943 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
944 const struct kvm_vcpu_init *init)
946 unsigned int i, ret;
947 int phys_target = kvm_target_cpu();
949 if (init->target != phys_target)
950 return -EINVAL;
953 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
954 * use the same target.
956 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
957 return -EINVAL;
959 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
960 for (i = 0; i < sizeof(init->features) * 8; i++) {
961 bool set = (init->features[i / 32] & (1 << (i % 32)));
963 if (set && i >= KVM_VCPU_MAX_FEATURES)
964 return -ENOENT;
967 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
968 * use the same feature set.
970 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
971 test_bit(i, vcpu->arch.features) != set)
972 return -EINVAL;
974 if (set)
975 set_bit(i, vcpu->arch.features);
978 vcpu->arch.target = phys_target;
980 /* Now we know what it is, we can reset it. */
981 ret = kvm_reset_vcpu(vcpu);
982 if (ret) {
983 vcpu->arch.target = -1;
984 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
987 return ret;
990 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
991 struct kvm_vcpu_init *init)
993 int ret;
995 ret = kvm_vcpu_set_target(vcpu, init);
996 if (ret)
997 return ret;
1000 * Ensure a rebooted VM will fault in RAM pages and detect if the
1001 * guest MMU is turned off and flush the caches as needed.
1003 if (vcpu->arch.has_run_once)
1004 stage2_unmap_vm(vcpu->kvm);
1006 vcpu_reset_hcr(vcpu);
1009 * Handle the "start in power-off" case.
1011 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
1012 vcpu_power_off(vcpu);
1013 else
1014 vcpu->arch.power_off = false;
1016 return 0;
1019 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
1020 struct kvm_device_attr *attr)
1022 int ret = -ENXIO;
1024 switch (attr->group) {
1025 default:
1026 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1027 break;
1030 return ret;
1033 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1034 struct kvm_device_attr *attr)
1036 int ret = -ENXIO;
1038 switch (attr->group) {
1039 default:
1040 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1041 break;
1044 return ret;
1047 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1048 struct kvm_device_attr *attr)
1050 int ret = -ENXIO;
1052 switch (attr->group) {
1053 default:
1054 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1055 break;
1058 return ret;
1061 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1062 struct kvm_vcpu_events *events)
1064 memset(events, 0, sizeof(*events));
1066 return __kvm_arm_vcpu_get_events(vcpu, events);
1069 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1070 struct kvm_vcpu_events *events)
1072 int i;
1074 /* check whether the reserved field is zero */
1075 for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1076 if (events->reserved[i])
1077 return -EINVAL;
1079 /* check whether the pad field is zero */
1080 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1081 if (events->exception.pad[i])
1082 return -EINVAL;
1084 return __kvm_arm_vcpu_set_events(vcpu, events);
1087 long kvm_arch_vcpu_ioctl(struct file *filp,
1088 unsigned int ioctl, unsigned long arg)
1090 struct kvm_vcpu *vcpu = filp->private_data;
1091 void __user *argp = (void __user *)arg;
1092 struct kvm_device_attr attr;
1093 long r;
1095 switch (ioctl) {
1096 case KVM_ARM_VCPU_INIT: {
1097 struct kvm_vcpu_init init;
1099 r = -EFAULT;
1100 if (copy_from_user(&init, argp, sizeof(init)))
1101 break;
1103 r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1104 break;
1106 case KVM_SET_ONE_REG:
1107 case KVM_GET_ONE_REG: {
1108 struct kvm_one_reg reg;
1110 r = -ENOEXEC;
1111 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1112 break;
1114 r = -EFAULT;
1115 if (copy_from_user(&reg, argp, sizeof(reg)))
1116 break;
1118 if (ioctl == KVM_SET_ONE_REG)
1119 r = kvm_arm_set_reg(vcpu, &reg);
1120 else
1121 r = kvm_arm_get_reg(vcpu, &reg);
1122 break;
1124 case KVM_GET_REG_LIST: {
1125 struct kvm_reg_list __user *user_list = argp;
1126 struct kvm_reg_list reg_list;
1127 unsigned n;
1129 r = -ENOEXEC;
1130 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1131 break;
1133 r = -EPERM;
1134 if (!kvm_arm_vcpu_is_finalized(vcpu))
1135 break;
1137 r = -EFAULT;
1138 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
1139 break;
1140 n = reg_list.n;
1141 reg_list.n = kvm_arm_num_regs(vcpu);
1142 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
1143 break;
1144 r = -E2BIG;
1145 if (n < reg_list.n)
1146 break;
1147 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1148 break;
1150 case KVM_SET_DEVICE_ATTR: {
1151 r = -EFAULT;
1152 if (copy_from_user(&attr, argp, sizeof(attr)))
1153 break;
1154 r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1155 break;
1157 case KVM_GET_DEVICE_ATTR: {
1158 r = -EFAULT;
1159 if (copy_from_user(&attr, argp, sizeof(attr)))
1160 break;
1161 r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1162 break;
1164 case KVM_HAS_DEVICE_ATTR: {
1165 r = -EFAULT;
1166 if (copy_from_user(&attr, argp, sizeof(attr)))
1167 break;
1168 r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1169 break;
1171 case KVM_GET_VCPU_EVENTS: {
1172 struct kvm_vcpu_events events;
1174 if (kvm_arm_vcpu_get_events(vcpu, &events))
1175 return -EINVAL;
1177 if (copy_to_user(argp, &events, sizeof(events)))
1178 return -EFAULT;
1180 return 0;
1182 case KVM_SET_VCPU_EVENTS: {
1183 struct kvm_vcpu_events events;
1185 if (copy_from_user(&events, argp, sizeof(events)))
1186 return -EFAULT;
1188 return kvm_arm_vcpu_set_events(vcpu, &events);
1190 case KVM_ARM_VCPU_FINALIZE: {
1191 int what;
1193 if (!kvm_vcpu_initialized(vcpu))
1194 return -ENOEXEC;
1196 if (get_user(what, (const int __user *)argp))
1197 return -EFAULT;
1199 return kvm_arm_vcpu_finalize(vcpu, what);
1201 default:
1202 r = -EINVAL;
1205 return r;
1208 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1213 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1214 struct kvm_memory_slot *memslot)
1216 kvm_flush_remote_tlbs(kvm);
1219 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1220 struct kvm_arm_device_addr *dev_addr)
1222 unsigned long dev_id, type;
1224 dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
1225 KVM_ARM_DEVICE_ID_SHIFT;
1226 type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
1227 KVM_ARM_DEVICE_TYPE_SHIFT;
1229 switch (dev_id) {
1230 case KVM_ARM_DEVICE_VGIC_V2:
1231 if (!vgic_present)
1232 return -ENXIO;
1233 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
1234 default:
1235 return -ENODEV;
1239 long kvm_arch_vm_ioctl(struct file *filp,
1240 unsigned int ioctl, unsigned long arg)
1242 struct kvm *kvm = filp->private_data;
1243 void __user *argp = (void __user *)arg;
1245 switch (ioctl) {
1246 case KVM_CREATE_IRQCHIP: {
1247 int ret;
1248 if (!vgic_present)
1249 return -ENXIO;
1250 mutex_lock(&kvm->lock);
1251 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1252 mutex_unlock(&kvm->lock);
1253 return ret;
1255 case KVM_ARM_SET_DEVICE_ADDR: {
1256 struct kvm_arm_device_addr dev_addr;
1258 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1259 return -EFAULT;
1260 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1262 case KVM_ARM_PREFERRED_TARGET: {
1263 int err;
1264 struct kvm_vcpu_init init;
1266 err = kvm_vcpu_preferred_target(&init);
1267 if (err)
1268 return err;
1270 if (copy_to_user(argp, &init, sizeof(init)))
1271 return -EFAULT;
1273 return 0;
1275 default:
1276 return -EINVAL;
1280 static void cpu_init_hyp_mode(void)
1282 phys_addr_t pgd_ptr;
1283 unsigned long hyp_stack_ptr;
1284 unsigned long stack_page;
1285 unsigned long vector_ptr;
1287 /* Switch from the HYP stub to our own HYP init vector */
1288 __hyp_set_vectors(kvm_get_idmap_vector());
1290 pgd_ptr = kvm_mmu_get_httbr();
1291 stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
1292 hyp_stack_ptr = stack_page + PAGE_SIZE;
1293 vector_ptr = (unsigned long)kvm_get_hyp_vector();
1295 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
1296 __cpu_init_stage2();
1299 static void cpu_hyp_reset(void)
1301 if (!is_kernel_in_hyp_mode())
1302 __hyp_reset_vectors();
1305 static void cpu_hyp_reinit(void)
1307 kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
1309 cpu_hyp_reset();
1311 if (is_kernel_in_hyp_mode())
1312 kvm_timer_init_vhe();
1313 else
1314 cpu_init_hyp_mode();
1316 kvm_arm_init_debug();
1318 if (vgic_present)
1319 kvm_vgic_init_cpu_hardware();
1322 static void _kvm_arch_hardware_enable(void *discard)
1324 if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
1325 cpu_hyp_reinit();
1326 __this_cpu_write(kvm_arm_hardware_enabled, 1);
1330 int kvm_arch_hardware_enable(void)
1332 _kvm_arch_hardware_enable(NULL);
1333 return 0;
1336 static void _kvm_arch_hardware_disable(void *discard)
1338 if (__this_cpu_read(kvm_arm_hardware_enabled)) {
1339 cpu_hyp_reset();
1340 __this_cpu_write(kvm_arm_hardware_enabled, 0);
1344 void kvm_arch_hardware_disable(void)
1346 _kvm_arch_hardware_disable(NULL);
1349 #ifdef CONFIG_CPU_PM
1350 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1351 unsigned long cmd,
1352 void *v)
1355 * kvm_arm_hardware_enabled is left with its old value over
1356 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
1357 * re-enable hyp.
1359 switch (cmd) {
1360 case CPU_PM_ENTER:
1361 if (__this_cpu_read(kvm_arm_hardware_enabled))
1363 * don't update kvm_arm_hardware_enabled here
1364 * so that the hardware will be re-enabled
1365 * when we resume. See below.
1367 cpu_hyp_reset();
1369 return NOTIFY_OK;
1370 case CPU_PM_ENTER_FAILED:
1371 case CPU_PM_EXIT:
1372 if (__this_cpu_read(kvm_arm_hardware_enabled))
1373 /* The hardware was enabled before suspend. */
1374 cpu_hyp_reinit();
1376 return NOTIFY_OK;
1378 default:
1379 return NOTIFY_DONE;
1383 static struct notifier_block hyp_init_cpu_pm_nb = {
1384 .notifier_call = hyp_init_cpu_pm_notifier,
1387 static void __init hyp_cpu_pm_init(void)
1389 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1391 static void __init hyp_cpu_pm_exit(void)
1393 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1395 #else
1396 static inline void hyp_cpu_pm_init(void)
1399 static inline void hyp_cpu_pm_exit(void)
1402 #endif
1404 static int init_common_resources(void)
1406 kvm_set_ipa_limit();
1408 return 0;
1411 static int init_subsystems(void)
1413 int err = 0;
1416 * Enable hardware so that subsystem initialisation can access EL2.
1418 on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
1421 * Register CPU lower-power notifier
1423 hyp_cpu_pm_init();
1426 * Init HYP view of VGIC
1428 err = kvm_vgic_hyp_init();
1429 switch (err) {
1430 case 0:
1431 vgic_present = true;
1432 break;
1433 case -ENODEV:
1434 case -ENXIO:
1435 vgic_present = false;
1436 err = 0;
1437 break;
1438 default:
1439 goto out;
1443 * Init HYP architected timer support
1445 err = kvm_timer_hyp_init(vgic_present);
1446 if (err)
1447 goto out;
1449 kvm_perf_init();
1450 kvm_coproc_table_init();
1452 out:
1453 on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
1455 return err;
1458 static void teardown_hyp_mode(void)
1460 int cpu;
1462 free_hyp_pgds();
1463 for_each_possible_cpu(cpu)
1464 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1468 * Inits Hyp-mode on all online CPUs
1470 static int init_hyp_mode(void)
1472 int cpu;
1473 int err = 0;
1476 * Allocate Hyp PGD and setup Hyp identity mapping
1478 err = kvm_mmu_init();
1479 if (err)
1480 goto out_err;
1483 * Allocate stack pages for Hypervisor-mode
1485 for_each_possible_cpu(cpu) {
1486 unsigned long stack_page;
1488 stack_page = __get_free_page(GFP_KERNEL);
1489 if (!stack_page) {
1490 err = -ENOMEM;
1491 goto out_err;
1494 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
1498 * Map the Hyp-code called directly from the host
1500 err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
1501 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
1502 if (err) {
1503 kvm_err("Cannot map world-switch code\n");
1504 goto out_err;
1507 err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1508 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
1509 if (err) {
1510 kvm_err("Cannot map rodata section\n");
1511 goto out_err;
1514 err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
1515 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
1516 if (err) {
1517 kvm_err("Cannot map bss section\n");
1518 goto out_err;
1521 err = kvm_map_vectors();
1522 if (err) {
1523 kvm_err("Cannot map vectors\n");
1524 goto out_err;
1528 * Map the Hyp stack pages
1530 for_each_possible_cpu(cpu) {
1531 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
1532 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
1533 PAGE_HYP);
1535 if (err) {
1536 kvm_err("Cannot map hyp stack\n");
1537 goto out_err;
1541 for_each_possible_cpu(cpu) {
1542 kvm_host_data_t *cpu_data;
1544 cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
1545 err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
1547 if (err) {
1548 kvm_err("Cannot map host CPU state: %d\n", err);
1549 goto out_err;
1553 err = hyp_map_aux_data();
1554 if (err)
1555 kvm_err("Cannot map host auxiliary data: %d\n", err);
1557 return 0;
1559 out_err:
1560 teardown_hyp_mode();
1561 kvm_err("error initializing Hyp mode: %d\n", err);
1562 return err;
1565 static void check_kvm_target_cpu(void *ret)
1567 *(int *)ret = kvm_target_cpu();
1570 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
1572 struct kvm_vcpu *vcpu;
1573 int i;
1575 mpidr &= MPIDR_HWID_BITMASK;
1576 kvm_for_each_vcpu(i, vcpu, kvm) {
1577 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
1578 return vcpu;
1580 return NULL;
1583 bool kvm_arch_has_irq_bypass(void)
1585 return true;
1588 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
1589 struct irq_bypass_producer *prod)
1591 struct kvm_kernel_irqfd *irqfd =
1592 container_of(cons, struct kvm_kernel_irqfd, consumer);
1594 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
1595 &irqfd->irq_entry);
1597 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
1598 struct irq_bypass_producer *prod)
1600 struct kvm_kernel_irqfd *irqfd =
1601 container_of(cons, struct kvm_kernel_irqfd, consumer);
1603 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
1604 &irqfd->irq_entry);
1607 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
1609 struct kvm_kernel_irqfd *irqfd =
1610 container_of(cons, struct kvm_kernel_irqfd, consumer);
1612 kvm_arm_halt_guest(irqfd->kvm);
1615 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
1617 struct kvm_kernel_irqfd *irqfd =
1618 container_of(cons, struct kvm_kernel_irqfd, consumer);
1620 kvm_arm_resume_guest(irqfd->kvm);
1624 * Initialize Hyp-mode and memory mappings on all CPUs.
1626 int kvm_arch_init(void *opaque)
1628 int err;
1629 int ret, cpu;
1630 bool in_hyp_mode;
1632 if (!is_hyp_mode_available()) {
1633 kvm_info("HYP mode not available\n");
1634 return -ENODEV;
1637 in_hyp_mode = is_kernel_in_hyp_mode();
1639 if (!in_hyp_mode && kvm_arch_requires_vhe()) {
1640 kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
1641 return -ENODEV;
1644 for_each_online_cpu(cpu) {
1645 smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
1646 if (ret < 0) {
1647 kvm_err("Error, CPU %d not supported!\n", cpu);
1648 return -ENODEV;
1652 err = init_common_resources();
1653 if (err)
1654 return err;
1656 err = kvm_arm_init_sve();
1657 if (err)
1658 return err;
1660 if (!in_hyp_mode) {
1661 err = init_hyp_mode();
1662 if (err)
1663 goto out_err;
1666 err = init_subsystems();
1667 if (err)
1668 goto out_hyp;
1670 if (in_hyp_mode)
1671 kvm_info("VHE mode initialized successfully\n");
1672 else
1673 kvm_info("Hyp mode initialized successfully\n");
1675 return 0;
1677 out_hyp:
1678 hyp_cpu_pm_exit();
1679 if (!in_hyp_mode)
1680 teardown_hyp_mode();
1681 out_err:
1682 return err;
1685 /* NOP: Compiling as a module not supported */
1686 void kvm_arch_exit(void)
1688 kvm_perf_teardown();
1691 static int arm_init(void)
1693 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1694 return rc;
1697 module_init(arm_init);