2 * kvm asynchronous fault support
4 * Copyright 2010 Red Hat, Inc.
7 * Gleb Natapov <gleb@redhat.com>
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/kvm_host.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/mmu_context.h>
29 #include <trace/events/kvm.h>
31 static inline void kvm_async_page_present_sync(struct kvm_vcpu
*vcpu
,
32 struct kvm_async_pf
*work
)
34 #ifdef CONFIG_KVM_ASYNC_PF_SYNC
35 kvm_arch_async_page_present(vcpu
, work
);
38 static inline void kvm_async_page_present_async(struct kvm_vcpu
*vcpu
,
39 struct kvm_async_pf
*work
)
41 #ifndef CONFIG_KVM_ASYNC_PF_SYNC
42 kvm_arch_async_page_present(vcpu
, work
);
46 static struct kmem_cache
*async_pf_cache
;
48 int kvm_async_pf_init(void)
50 async_pf_cache
= KMEM_CACHE(kvm_async_pf
, 0);
58 void kvm_async_pf_deinit(void)
60 kmem_cache_destroy(async_pf_cache
);
61 async_pf_cache
= NULL
;
64 void kvm_async_pf_vcpu_init(struct kvm_vcpu
*vcpu
)
66 INIT_LIST_HEAD(&vcpu
->async_pf
.done
);
67 INIT_LIST_HEAD(&vcpu
->async_pf
.queue
);
68 spin_lock_init(&vcpu
->async_pf
.lock
);
71 static void async_pf_execute(struct work_struct
*work
)
73 struct kvm_async_pf
*apf
=
74 container_of(work
, struct kvm_async_pf
, work
);
75 struct mm_struct
*mm
= apf
->mm
;
76 struct kvm_vcpu
*vcpu
= apf
->vcpu
;
77 unsigned long addr
= apf
->addr
;
83 * This work is run asynchromously to the task which owns
84 * mm and might be done in another context, so we must
87 __get_user_pages_unlocked(NULL
, mm
, addr
, 1, 1, 0, NULL
, FOLL_REMOTE
);
89 kvm_async_page_present_sync(vcpu
, apf
);
91 spin_lock(&vcpu
->async_pf
.lock
);
92 list_add_tail(&apf
->link
, &vcpu
->async_pf
.done
);
93 spin_unlock(&vcpu
->async_pf
.lock
);
96 * apf may be freed by kvm_check_async_pf_completion() after
100 trace_kvm_async_pf_completed(addr
, gva
);
103 * This memory barrier pairs with prepare_to_wait's set_current_state()
106 if (swait_active(&vcpu
->wq
))
110 kvm_put_kvm(vcpu
->kvm
);
113 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu
*vcpu
)
115 /* cancel outstanding work queue item */
116 while (!list_empty(&vcpu
->async_pf
.queue
)) {
117 struct kvm_async_pf
*work
=
118 list_first_entry(&vcpu
->async_pf
.queue
,
119 typeof(*work
), queue
);
120 list_del(&work
->queue
);
122 #ifdef CONFIG_KVM_ASYNC_PF_SYNC
123 flush_work(&work
->work
);
125 if (cancel_work_sync(&work
->work
)) {
127 kvm_put_kvm(vcpu
->kvm
); /* == work->vcpu->kvm */
128 kmem_cache_free(async_pf_cache
, work
);
133 spin_lock(&vcpu
->async_pf
.lock
);
134 while (!list_empty(&vcpu
->async_pf
.done
)) {
135 struct kvm_async_pf
*work
=
136 list_first_entry(&vcpu
->async_pf
.done
,
137 typeof(*work
), link
);
138 list_del(&work
->link
);
139 kmem_cache_free(async_pf_cache
, work
);
141 spin_unlock(&vcpu
->async_pf
.lock
);
143 vcpu
->async_pf
.queued
= 0;
146 void kvm_check_async_pf_completion(struct kvm_vcpu
*vcpu
)
148 struct kvm_async_pf
*work
;
150 while (!list_empty_careful(&vcpu
->async_pf
.done
) &&
151 kvm_arch_can_inject_async_page_present(vcpu
)) {
152 spin_lock(&vcpu
->async_pf
.lock
);
153 work
= list_first_entry(&vcpu
->async_pf
.done
, typeof(*work
),
155 list_del(&work
->link
);
156 spin_unlock(&vcpu
->async_pf
.lock
);
158 kvm_arch_async_page_ready(vcpu
, work
);
159 kvm_async_page_present_async(vcpu
, work
);
161 list_del(&work
->queue
);
162 vcpu
->async_pf
.queued
--;
163 kmem_cache_free(async_pf_cache
, work
);
167 int kvm_setup_async_pf(struct kvm_vcpu
*vcpu
, gva_t gva
, unsigned long hva
,
168 struct kvm_arch_async_pf
*arch
)
170 struct kvm_async_pf
*work
;
172 if (vcpu
->async_pf
.queued
>= ASYNC_PF_PER_VCPU
)
175 /* setup delayed work */
178 * do alloc nowait since if we are going to sleep anyway we
179 * may as well sleep faulting in page
181 work
= kmem_cache_zalloc(async_pf_cache
, GFP_NOWAIT
| __GFP_NOWARN
);
185 work
->wakeup_all
= false;
190 work
->mm
= current
->mm
;
191 atomic_inc(&work
->mm
->mm_users
);
192 kvm_get_kvm(work
->vcpu
->kvm
);
194 /* this can't really happen otherwise gfn_to_pfn_async
196 if (unlikely(kvm_is_error_hva(work
->addr
)))
199 INIT_WORK(&work
->work
, async_pf_execute
);
200 if (!schedule_work(&work
->work
))
203 list_add_tail(&work
->queue
, &vcpu
->async_pf
.queue
);
204 vcpu
->async_pf
.queued
++;
205 kvm_arch_async_page_not_present(vcpu
, work
);
208 kvm_put_kvm(work
->vcpu
->kvm
);
210 kmem_cache_free(async_pf_cache
, work
);
214 int kvm_async_pf_wakeup_all(struct kvm_vcpu
*vcpu
)
216 struct kvm_async_pf
*work
;
218 if (!list_empty_careful(&vcpu
->async_pf
.done
))
221 work
= kmem_cache_zalloc(async_pf_cache
, GFP_ATOMIC
);
225 work
->wakeup_all
= true;
226 INIT_LIST_HEAD(&work
->queue
); /* for list_del to work */
228 spin_lock(&vcpu
->async_pf
.lock
);
229 list_add_tail(&work
->link
, &vcpu
->async_pf
.done
);
230 spin_unlock(&vcpu
->async_pf
.lock
);
232 vcpu
->async_pf
.queued
++;