1 #ifndef KVM_DIRTY_RING_H
2 #define KVM_DIRTY_RING_H
7 * kvm_dirty_ring: KVM internal dirty ring structure
9 * @dirty_index: free running counter that points to the next slot in
10 * dirty_ring->dirty_gfns, where a new dirty page should go
11 * @reset_index: free running counter that points to the next dirty page
12 * in dirty_ring->dirty_gfns for which dirty trap needs to
14 * @size: size of the compact list, dirty_ring->dirty_gfns
15 * @soft_limit: when the number of dirty pages in the list reaches this
16 * limit, vcpu that owns this ring should exit to userspace
17 * to allow userspace to harvest all the dirty pages
18 * @dirty_gfns: the array to keep the dirty gfns
19 * @index: index of this dirty ring
21 struct kvm_dirty_ring
{
26 struct kvm_dirty_gfn
*dirty_gfns
;
30 #ifndef CONFIG_HAVE_KVM_DIRTY_RING
32 * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
33 * not be included as well, so define these nop functions for the arch.
35 static inline u32
kvm_dirty_ring_get_rsvd_entries(void)
40 static inline bool kvm_use_dirty_bitmap(struct kvm
*kvm
)
45 static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring
*ring
,
51 static inline int kvm_dirty_ring_reset(struct kvm
*kvm
,
52 struct kvm_dirty_ring
*ring
)
57 static inline void kvm_dirty_ring_push(struct kvm_vcpu
*vcpu
,
62 static inline struct page
*kvm_dirty_ring_get_page(struct kvm_dirty_ring
*ring
,
68 static inline void kvm_dirty_ring_free(struct kvm_dirty_ring
*ring
)
72 #else /* CONFIG_HAVE_KVM_DIRTY_RING */
74 int kvm_cpu_dirty_log_size(void);
75 bool kvm_use_dirty_bitmap(struct kvm
*kvm
);
76 bool kvm_arch_allow_write_without_running_vcpu(struct kvm
*kvm
);
77 u32
kvm_dirty_ring_get_rsvd_entries(void);
78 int kvm_dirty_ring_alloc(struct kvm_dirty_ring
*ring
, int index
, u32 size
);
81 * called with kvm->slots_lock held, returns the number of
84 int kvm_dirty_ring_reset(struct kvm
*kvm
, struct kvm_dirty_ring
*ring
);
87 * returns =0: successfully pushed
88 * <0: unable to push, need to wait
90 void kvm_dirty_ring_push(struct kvm_vcpu
*vcpu
, u32 slot
, u64 offset
);
92 bool kvm_dirty_ring_check_request(struct kvm_vcpu
*vcpu
);
94 /* for use in vm_operations_struct */
95 struct page
*kvm_dirty_ring_get_page(struct kvm_dirty_ring
*ring
, u32 offset
);
97 void kvm_dirty_ring_free(struct kvm_dirty_ring
*ring
);
99 #endif /* CONFIG_HAVE_KVM_DIRTY_RING */
101 #endif /* KVM_DIRTY_RING_H */