1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #ifndef __KVM_TYPES_H__
4 #define __KVM_TYPES_H__
11 struct kvm_irq_routing_table
;
12 struct kvm_memory_slot
;
15 struct kvm_userspace_memory_region
;
22 #include <linux/bits.h>
23 #include <linux/mutex.h>
24 #include <linux/types.h>
25 #include <linux/spinlock_types.h>
27 #include <asm/kvm_types.h>
32 * gva - guest virtual address
33 * gpa - guest physical address
34 * gfn - guest frame number
35 * hva - host virtual address
36 * hpa - host physical address
37 * hfn - host frame number
40 typedef unsigned long gva_t
;
44 #define INVALID_GPA (~(gpa_t)0)
46 typedef unsigned long hva_t
;
50 typedef hfn_t kvm_pfn_t
;
52 struct gfn_to_hva_cache
{
57 struct kvm_memory_slot
*memslot
;
60 struct gfn_to_pfn_cache
{
64 struct kvm_memory_slot
*memslot
;
66 struct list_head list
;
68 struct mutex refresh_lock
;
75 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
77 * Memory caches are used to preallocate memory ahead of various MMU flows,
78 * e.g. page fault handlers. Gracefully handling allocation failures deep in
79 * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
80 * holding MMU locks. Note, these caches act more like prefetch buffers than
81 * classical caches, i.e. objects are not returned to the cache on being freed.
83 * The @capacity field and @objects array are lazily initialized when the cache
84 * is topped up (__kvm_mmu_topup_memory_cache()).
86 struct kvm_mmu_memory_cache
{
90 struct kmem_cache
*kmem_cache
;
97 #define HALT_POLL_HIST_COUNT 32
99 struct kvm_vm_stat_generic
{
100 u64 remote_tlb_flush
;
101 u64 remote_tlb_flush_requests
;
104 struct kvm_vcpu_stat_generic
{
105 u64 halt_successful_poll
;
106 u64 halt_attempted_poll
;
107 u64 halt_poll_invalid
;
109 u64 halt_poll_success_ns
;
110 u64 halt_poll_fail_ns
;
112 u64 halt_poll_success_hist
[HALT_POLL_HIST_COUNT
];
113 u64 halt_poll_fail_hist
[HALT_POLL_HIST_COUNT
];
114 u64 halt_wait_hist
[HALT_POLL_HIST_COUNT
];
118 #define KVM_STATS_NAME_SIZE 48
120 #endif /* __KVM_TYPES_H__ */