1 // SPDX-License-Identifier: GPL-2.0-only
3 * Support KVM gust page tracking
5 * This feature allows us to track page access in guest. Currently, only
6 * write access is tracked.
8 * Copyright(C) 2015 Intel Corporation.
11 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
14 #include <linux/kvm_host.h>
15 #include <linux/rculist.h>
17 #include <asm/kvm_host.h>
18 #include <asm/kvm_page_track.h>
22 void kvm_page_track_free_memslot(struct kvm_memory_slot
*free
,
23 struct kvm_memory_slot
*dont
)
27 for (i
= 0; i
< KVM_PAGE_TRACK_MAX
; i
++)
28 if (!dont
|| free
->arch
.gfn_track
[i
] !=
29 dont
->arch
.gfn_track
[i
]) {
30 kvfree(free
->arch
.gfn_track
[i
]);
31 free
->arch
.gfn_track
[i
] = NULL
;
35 int kvm_page_track_create_memslot(struct kvm_memory_slot
*slot
,
40 for (i
= 0; i
< KVM_PAGE_TRACK_MAX
; i
++) {
41 slot
->arch
.gfn_track
[i
] =
42 kvcalloc(npages
, sizeof(*slot
->arch
.gfn_track
[i
]),
44 if (!slot
->arch
.gfn_track
[i
])
51 kvm_page_track_free_memslot(slot
, NULL
);
55 static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode
)
57 if (mode
< 0 || mode
>= KVM_PAGE_TRACK_MAX
)
63 static void update_gfn_track(struct kvm_memory_slot
*slot
, gfn_t gfn
,
64 enum kvm_page_track_mode mode
, short count
)
68 index
= gfn_to_index(gfn
, slot
->base_gfn
, PT_PAGE_TABLE_LEVEL
);
70 val
= slot
->arch
.gfn_track
[mode
][index
];
72 if (WARN_ON(val
+ count
< 0 || val
+ count
> USHRT_MAX
))
75 slot
->arch
.gfn_track
[mode
][index
] += count
;
79 * add guest page to the tracking pool so that corresponding access on that
80 * page will be intercepted.
82 * It should be called under the protection both of mmu-lock and kvm->srcu
85 * @kvm: the guest instance we are interested in.
86 * @slot: the @gfn belongs to.
87 * @gfn: the guest page.
88 * @mode: tracking mode, currently only write track is supported.
90 void kvm_slot_page_track_add_page(struct kvm
*kvm
,
91 struct kvm_memory_slot
*slot
, gfn_t gfn
,
92 enum kvm_page_track_mode mode
)
95 if (WARN_ON(!page_track_mode_is_valid(mode
)))
98 update_gfn_track(slot
, gfn
, mode
, 1);
101 * new track stops large page mapping for the
104 kvm_mmu_gfn_disallow_lpage(slot
, gfn
);
106 if (mode
== KVM_PAGE_TRACK_WRITE
)
107 if (kvm_mmu_slot_gfn_write_protect(kvm
, slot
, gfn
))
108 kvm_flush_remote_tlbs(kvm
);
110 EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page
);
113 * remove the guest page from the tracking pool which stops the interception
114 * of corresponding access on that page. It is the opposed operation of
115 * kvm_slot_page_track_add_page().
117 * It should be called under the protection both of mmu-lock and kvm->srcu
118 * or kvm->slots_lock.
120 * @kvm: the guest instance we are interested in.
121 * @slot: the @gfn belongs to.
122 * @gfn: the guest page.
123 * @mode: tracking mode, currently only write track is supported.
125 void kvm_slot_page_track_remove_page(struct kvm
*kvm
,
126 struct kvm_memory_slot
*slot
, gfn_t gfn
,
127 enum kvm_page_track_mode mode
)
129 if (WARN_ON(!page_track_mode_is_valid(mode
)))
132 update_gfn_track(slot
, gfn
, mode
, -1);
135 * allow large page mapping for the tracked page
136 * after the tracker is gone.
138 kvm_mmu_gfn_allow_lpage(slot
, gfn
);
140 EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page
);
143 * check if the corresponding access on the specified guest page is tracked.
145 bool kvm_page_track_is_active(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
146 enum kvm_page_track_mode mode
)
148 struct kvm_memory_slot
*slot
;
151 if (WARN_ON(!page_track_mode_is_valid(mode
)))
154 slot
= kvm_vcpu_gfn_to_memslot(vcpu
, gfn
);
158 index
= gfn_to_index(gfn
, slot
->base_gfn
, PT_PAGE_TABLE_LEVEL
);
159 return !!READ_ONCE(slot
->arch
.gfn_track
[mode
][index
]);
162 void kvm_page_track_cleanup(struct kvm
*kvm
)
164 struct kvm_page_track_notifier_head
*head
;
166 head
= &kvm
->arch
.track_notifier_head
;
167 cleanup_srcu_struct(&head
->track_srcu
);
170 void kvm_page_track_init(struct kvm
*kvm
)
172 struct kvm_page_track_notifier_head
*head
;
174 head
= &kvm
->arch
.track_notifier_head
;
175 init_srcu_struct(&head
->track_srcu
);
176 INIT_HLIST_HEAD(&head
->track_notifier_list
);
180 * register the notifier so that event interception for the tracked guest
181 * pages can be received.
184 kvm_page_track_register_notifier(struct kvm
*kvm
,
185 struct kvm_page_track_notifier_node
*n
)
187 struct kvm_page_track_notifier_head
*head
;
189 head
= &kvm
->arch
.track_notifier_head
;
191 spin_lock(&kvm
->mmu_lock
);
192 hlist_add_head_rcu(&n
->node
, &head
->track_notifier_list
);
193 spin_unlock(&kvm
->mmu_lock
);
195 EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier
);
198 * stop receiving the event interception. It is the opposed operation of
199 * kvm_page_track_register_notifier().
202 kvm_page_track_unregister_notifier(struct kvm
*kvm
,
203 struct kvm_page_track_notifier_node
*n
)
205 struct kvm_page_track_notifier_head
*head
;
207 head
= &kvm
->arch
.track_notifier_head
;
209 spin_lock(&kvm
->mmu_lock
);
210 hlist_del_rcu(&n
->node
);
211 spin_unlock(&kvm
->mmu_lock
);
212 synchronize_srcu(&head
->track_srcu
);
214 EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier
);
217 * Notify the node that write access is intercepted and write emulation is
218 * finished at this time.
220 * The node should figure out if the written page is the one that node is
221 * interested in by itself.
223 void kvm_page_track_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
, const u8
*new,
226 struct kvm_page_track_notifier_head
*head
;
227 struct kvm_page_track_notifier_node
*n
;
230 head
= &vcpu
->kvm
->arch
.track_notifier_head
;
232 if (hlist_empty(&head
->track_notifier_list
))
235 idx
= srcu_read_lock(&head
->track_srcu
);
236 hlist_for_each_entry_rcu(n
, &head
->track_notifier_list
, node
)
238 n
->track_write(vcpu
, gpa
, new, bytes
, n
);
239 srcu_read_unlock(&head
->track_srcu
, idx
);
243 * Notify the node that memory slot is being removed or moved so that it can
244 * drop write-protection for the pages in the memory slot.
246 * The node should figure out it has any write-protected pages in this slot
249 void kvm_page_track_flush_slot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
251 struct kvm_page_track_notifier_head
*head
;
252 struct kvm_page_track_notifier_node
*n
;
255 head
= &kvm
->arch
.track_notifier_head
;
257 if (hlist_empty(&head
->track_notifier_list
))
260 idx
= srcu_read_lock(&head
->track_srcu
);
261 hlist_for_each_entry_rcu(n
, &head
->track_notifier_list
, node
)
262 if (n
->track_flush_slot
)
263 n
->track_flush_slot(kvm
, slot
, n
);
264 srcu_read_unlock(&head
->track_srcu
, idx
);