1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
7 #ifndef __ARM_KVM_HOST_H__
8 #define __ARM_KVM_HOST_H__
10 #include <linux/arm-smccc.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/kvm_types.h>
14 #include <asm/cputype.h>
16 #include <asm/kvm_asm.h>
17 #include <asm/fpstate.h>
18 #include <kvm/arm_arch_timer.h>
20 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
22 #define KVM_USER_MEM_SLOTS 32
23 #define KVM_HAVE_ONE_REG
24 #define KVM_HALT_POLL_NS_DEFAULT 500000
26 #define KVM_VCPU_MAX_FEATURES 2
28 #include <kvm/arm_vgic.h>
31 #ifdef CONFIG_ARM_GIC_V3
32 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
34 #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
37 #define KVM_REQ_SLEEP \
38 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
40 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
41 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
43 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use
);
45 static inline int kvm_arm_init_sve(void) { return 0; }
47 u32
*kvm_vcpu_reg(struct kvm_vcpu
*vcpu
, u8 reg_num
, u32 mode
);
48 int __attribute_const__
kvm_target_cpu(void);
49 int kvm_reset_vcpu(struct kvm_vcpu
*vcpu
);
50 void kvm_reset_coprocs(struct kvm_vcpu
*vcpu
);
53 /* The VMID generation used for the virt. memory system */
59 /* The last vcpu id that ran on each physical CPU */
60 int __percpu
*last_vcpu_ran
;
63 * Anything that is not used directly from assembly code goes
67 /* The VMID generation used for the virt. memory system */
70 /* Stage-2 page table */
74 /* Interrupt controller */
75 struct vgic_dist vgic
;
78 /* Mandated version of PSCI */
82 * If we encounter a data abort without valid instruction syndrome
83 * information, report this to user space. User space can (and
84 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
87 bool return_nisv_io_abort_to_user
;
90 #define KVM_NR_MEM_OBJS 40
93 * We don't want allocation failures within the mmu code, so we preallocate
94 * enough memory for a single page fault in a cache.
96 struct kvm_mmu_memory_cache
{
98 void *objects
[KVM_NR_MEM_OBJS
];
101 struct kvm_vcpu_fault_info
{
102 u32 hsr
; /* Hyp Syndrome Register */
103 u32 hxfar
; /* Hyp Data/Inst. Fault Address Register */
104 u32 hpfar
; /* Hyp IPA Fault Address Register */
108 * 0 is reserved as an invalid value.
109 * Order should be kept in sync with the save/restore code.
113 c0_MPIDR
, /* MultiProcessor ID Register */
114 c0_CSSELR
, /* Cache Size Selection Register */
115 c1_SCTLR
, /* System Control Register */
116 c1_ACTLR
, /* Auxiliary Control Register */
117 c1_CPACR
, /* Coprocessor Access Control */
118 c2_TTBR0
, /* Translation Table Base Register 0 */
119 c2_TTBR0_high
, /* TTBR0 top 32 bits */
120 c2_TTBR1
, /* Translation Table Base Register 1 */
121 c2_TTBR1_high
, /* TTBR1 top 32 bits */
122 c2_TTBCR
, /* Translation Table Base Control R. */
123 c3_DACR
, /* Domain Access Control Register */
124 c5_DFSR
, /* Data Fault Status Register */
125 c5_IFSR
, /* Instruction Fault Status Register */
126 c5_ADFSR
, /* Auxilary Data Fault Status R */
127 c5_AIFSR
, /* Auxilary Instrunction Fault Status R */
128 c6_DFAR
, /* Data Fault Address Register */
129 c6_IFAR
, /* Instruction Fault Address Register */
130 c7_PAR
, /* Physical Address Register */
131 c7_PAR_high
, /* PAR top 32 bits */
132 c9_L2CTLR
, /* Cortex A15/A7 L2 Control Register */
133 c10_PRRR
, /* Primary Region Remap Register */
134 c10_NMRR
, /* Normal Memory Remap Register */
135 c12_VBAR
, /* Vector Base Address Register */
136 c13_CID
, /* Context ID Register */
137 c13_TID_URW
, /* Thread ID, User R/W */
138 c13_TID_URO
, /* Thread ID, User R/O */
139 c13_TID_PRIV
, /* Thread ID, Privileged */
140 c14_CNTKCTL
, /* Timer Control Register (PL1) */
141 c10_AMAIR0
, /* Auxilary Memory Attribute Indirection Reg0 */
142 c10_AMAIR1
, /* Auxilary Memory Attribute Indirection Reg1 */
143 NR_CP15_REGS
/* Number of regs (incl. invalid) */
146 struct kvm_cpu_context
{
147 struct kvm_regs gp_regs
;
148 struct vfp_hard_struct vfp
;
149 u32 cp15
[NR_CP15_REGS
];
152 struct kvm_host_data
{
153 struct kvm_cpu_context host_ctxt
;
156 typedef struct kvm_host_data kvm_host_data_t
;
158 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context
*cpu_ctxt
)
160 /* The host's MPIDR is immutable, so let's set it up at boot time */
161 cpu_ctxt
->cp15
[c0_MPIDR
] = read_cpuid_mpidr();
164 struct vcpu_reset_state
{
171 struct kvm_vcpu_arch
{
172 struct kvm_cpu_context ctxt
;
174 int target
; /* Processor target */
175 DECLARE_BITMAP(features
, KVM_VCPU_MAX_FEATURES
);
177 /* The CPU type we expose to the VM */
180 /* HYP trapping configuration */
183 /* Exception Information */
184 struct kvm_vcpu_fault_info fault
;
186 /* Host FP context */
187 struct kvm_cpu_context
*host_cpu_context
;
190 struct vgic_cpu vgic_cpu
;
191 struct arch_timer_cpu timer_cpu
;
194 * Anything that is not used directly from assembly code goes
198 /* vcpu power-off state */
201 /* Don't run the guest (internal implementation need) */
204 /* Cache some mmu pages needed inside spinlock regions */
205 struct kvm_mmu_memory_cache mmu_page_cache
;
207 struct vcpu_reset_state reset_state
;
209 /* Detect first run of a vcpu */
214 ulong remote_tlb_flush
;
217 struct kvm_vcpu_stat
{
218 u64 halt_successful_poll
;
219 u64 halt_attempted_poll
;
220 u64 halt_poll_invalid
;
226 u64 mmio_exit_kernel
;
230 #define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
232 int kvm_vcpu_preferred_target(struct kvm_vcpu_init
*init
);
233 unsigned long kvm_arm_num_regs(struct kvm_vcpu
*vcpu
);
234 int kvm_arm_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*indices
);
235 int kvm_arm_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
);
236 int kvm_arm_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
);
238 unsigned long __kvm_call_hyp(void *hypfn
, ...);
241 * The has_vhe() part doesn't get emitted, but is used for type-checking.
243 #define kvm_call_hyp(f, ...) \
248 __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
252 #define kvm_call_hyp_ret(f, ...) \
254 typeof(f(__VA_ARGS__)) ret; \
257 ret = f(__VA_ARGS__); \
259 ret = __kvm_call_hyp(kvm_ksym_ref(f), \
266 void force_vm_exit(const cpumask_t
*mask
);
267 int __kvm_arm_vcpu_get_events(struct kvm_vcpu
*vcpu
,
268 struct kvm_vcpu_events
*events
);
270 int __kvm_arm_vcpu_set_events(struct kvm_vcpu
*vcpu
,
271 struct kvm_vcpu_events
*events
);
273 #define KVM_ARCH_WANT_MMU_NOTIFIER
274 int kvm_unmap_hva_range(struct kvm
*kvm
,
275 unsigned long start
, unsigned long end
);
276 int kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
);
278 unsigned long kvm_arm_num_regs(struct kvm_vcpu
*vcpu
);
279 int kvm_arm_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*indices
);
280 int kvm_age_hva(struct kvm
*kvm
, unsigned long start
, unsigned long end
);
281 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
);
283 void kvm_arm_halt_guest(struct kvm
*kvm
);
284 void kvm_arm_resume_guest(struct kvm
*kvm
);
286 int kvm_arm_copy_coproc_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
);
287 unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu
*vcpu
);
288 int kvm_arm_coproc_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*);
289 int kvm_arm_coproc_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*);
291 int handle_exit(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
292 int exception_index
);
294 static inline void handle_exit_early(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
295 int exception_index
) {}
298 void kvm_mmio_write_buf(void *buf
, unsigned int len
, unsigned long data
);
299 unsigned long kvm_mmio_read_buf(const void *buf
, unsigned int len
);
301 int kvm_handle_mmio_return(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
);
302 int io_mem_abort(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
303 phys_addr_t fault_ipa
);
305 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr
,
306 unsigned long hyp_stack_ptr
,
307 unsigned long vector_ptr
)
310 * Call initialization code, and switch to the full blown HYP
311 * code. The init code doesn't need to preserve these
312 * registers as r0-r3 are already callee saved according to
314 * Note that we slightly misuse the prototype by casting the
315 * stack pointer to a void *.
317 * The PGDs are always passed as the third argument, in order
318 * to be passed into r2-r3 to the init code (yes, this is
319 * compliant with the PCS!).
322 __kvm_call_hyp((void*)hyp_stack_ptr
, vector_ptr
, pgd_ptr
);
325 static inline void __cpu_init_stage2(void)
327 kvm_call_hyp(__init_stage2_translation
);
330 static inline int kvm_arch_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
335 int kvm_perf_init(void);
336 int kvm_perf_teardown(void);
338 static inline long kvm_hypercall_pv_features(struct kvm_vcpu
*vcpu
)
340 return SMCCC_RET_NOT_SUPPORTED
;
343 static inline gpa_t
kvm_init_stolen_time(struct kvm_vcpu
*vcpu
)
348 static inline void kvm_update_stolen_time(struct kvm_vcpu
*vcpu
)
352 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch
*vcpu_arch
)
356 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch
*vcpu_arch
)
361 void kvm_mmu_wp_memory_region(struct kvm
*kvm
, int slot
);
363 struct kvm_vcpu
*kvm_mpidr_to_vcpu(struct kvm
*kvm
, unsigned long mpidr
);
365 static inline bool kvm_arch_requires_vhe(void) { return false; }
366 static inline void kvm_arch_hardware_unsetup(void) {}
367 static inline void kvm_arch_sync_events(struct kvm
*kvm
) {}
368 static inline void kvm_arch_sched_in(struct kvm_vcpu
*vcpu
, int cpu
) {}
369 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu
*vcpu
) {}
370 static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu
*vcpu
) {}
372 static inline void kvm_arm_init_debug(void) {}
373 static inline void kvm_arm_setup_debug(struct kvm_vcpu
*vcpu
) {}
374 static inline void kvm_arm_clear_debug(struct kvm_vcpu
*vcpu
) {}
375 static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu
*vcpu
) {}
377 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu
*vcpu
,
378 struct kvm_device_attr
*attr
);
379 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu
*vcpu
,
380 struct kvm_device_attr
*attr
);
381 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu
*vcpu
,
382 struct kvm_device_attr
*attr
);
385 * VFP/NEON switching is all done by the hyp switch code, so no need to
386 * coordinate with host context handling for this state:
388 static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu
*vcpu
) {}
389 static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu
*vcpu
) {}
390 static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu
*vcpu
) {}
392 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu
*vcpu
) {}
393 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu
*vcpu
) {}
395 static inline void kvm_arm_vhe_guest_enter(void) {}
396 static inline void kvm_arm_vhe_guest_exit(void) {}
398 #define KVM_BP_HARDEN_UNKNOWN -1
399 #define KVM_BP_HARDEN_WA_NEEDED 0
400 #define KVM_BP_HARDEN_NOT_REQUIRED 1
402 static inline int kvm_arm_harden_branch_predictor(void)
404 switch(read_cpuid_part()) {
405 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
406 case ARM_CPU_PART_BRAHMA_B15
:
407 case ARM_CPU_PART_CORTEX_A12
:
408 case ARM_CPU_PART_CORTEX_A15
:
409 case ARM_CPU_PART_CORTEX_A17
:
410 return KVM_BP_HARDEN_WA_NEEDED
;
412 case ARM_CPU_PART_CORTEX_A7
:
413 return KVM_BP_HARDEN_NOT_REQUIRED
;
415 return KVM_BP_HARDEN_UNKNOWN
;
419 #define KVM_SSBD_UNKNOWN -1
420 #define KVM_SSBD_FORCE_DISABLE 0
421 #define KVM_SSBD_KERNEL 1
422 #define KVM_SSBD_FORCE_ENABLE 2
423 #define KVM_SSBD_MITIGATED 3
425 static inline int kvm_arm_have_ssbd(void)
427 /* No way to detect it yet, pretend it is not there. */
428 return KVM_SSBD_UNKNOWN
;
431 static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu
*vcpu
) {}
432 static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu
*vcpu
) {}
434 #define __KVM_HAVE_ARCH_VM_ALLOC
435 struct kvm
*kvm_arch_alloc_vm(void);
436 void kvm_arch_free_vm(struct kvm
*kvm
);
438 static inline int kvm_arm_setup_stage2(struct kvm
*kvm
, unsigned long type
)
441 * On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
442 * so any non-zero value used as type is illegal.
449 static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu
*vcpu
, int feature
)
454 static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu
*vcpu
)
459 #endif /* __ARM_KVM_HOST_H__ */