updated on Thu Jan 19 16:10:29 UTC 2012
[aur-mirror.git] / kvm-opensuse / kvm-preXX-init-on-demand.patch
bloba3d0fc13ea92f83bb284a51cd058ddf82828207f
1 From 3be490091c2503d8b50f4a0ce1efe68004f65d3e Mon Sep 17 00:00:00 2001
2 From: Alexander Graf <agraf@suse.de>
3 Date: Tue, 4 Nov 2008 18:48:21 +0100
4 Subject: [PATCH] Activate Virtualization On Demand v3
6 X86 CPUs need to have some magic happening to enable the virtualization
7 extensions on them. This magic can result in unpleasant results for
8 users, like blocking other VMMs from working (vmx) or using invalid TLB
9 entries (svm).
11 Currently KVM activates virtualization when the respective kernel module
12 is loaded. This blocks us from autoloading KVM modules without breaking
13 other VMMs.
15 To circumvent this problem at least a bit, this patch introduces on
16 demand activation of virtualization. This means, that instead
17 virtualization is enabled on creation of the first virtual machine
18 and disabled on removal of the last one.
20 So using this, KVM can be easily autoloaded, while keeping other
21 hypervisors usable.
23 v2 adds returns to non-x86 hardware_enables and adds IA64 change
24 v3 changes:
25 - use spin_lock instead of atomics
26 - put locking to new functions hardware_{en,dis}able_all that get called
27 on VM creation/destruction
28 - remove usage counter checks where not necessary
29 - return -EINVAL for IA64 slot < 0 case
31 Signed-off-by: Alexander Graf <agraf@suse.de>
32 ---
33 arch/ia64/kvm/kvm-ia64.c | 8 +++--
34 arch/powerpc/kvm/powerpc.c | 3 +-
35 arch/s390/kvm/kvm-s390.c | 3 +-
36 kernel/x86/svm.c | 13 +++++--
37 kernel/x86/vmx.c | 7 +++-
38 kernel/x86/x86.c | 4 +-
39 kernel/include/asm-x86/kvm_host.h | 2 +-
40 kernel/include/linux/kvm_host.h | 2 +-
41 virt/kvm/kvm_main.c | 75 ++++++++++++++++++++++++++++++++++++-------
42 9 files changed, 90 insertions(+), 27 deletions(-)
44 Index: kvm-83/kernel/ia64/kvm-ia64.c
45 ===================================================================
46 --- kvm-83.orig/kernel/ia64/kvm-ia64.c
47 +++ kvm-83/kernel/ia64/kvm-ia64.c
48 @@ -151,7 +151,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *h
50 static DEFINE_SPINLOCK(vp_lock);
52 -void kvm_arch_hardware_enable(void *garbage)
53 +int kvm_arch_hardware_enable(void *garbage)
55 long status;
56 long tmp_base;
57 @@ -165,7 +165,7 @@ void kvm_arch_hardware_enable(void *garb
58 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
59 local_irq_restore(saved_psr);
60 if (slot < 0)
61 - return;
62 + return -EINVAL;
64 spin_lock(&vp_lock);
65 status = ia64_pal_vp_init_env(kvm_vsa_base ?
66 @@ -173,7 +173,7 @@ void kvm_arch_hardware_enable(void *garb
67 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
68 if (status != 0) {
69 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
70 - return ;
71 + return -EINVAL;
74 if (!kvm_vsa_base) {
75 @@ -182,6 +182,8 @@ void kvm_arch_hardware_enable(void *garb
77 spin_unlock(&vp_lock);
78 ia64_ptr_entry(0x3, slot);
80 + return 0;
83 void kvm_arch_hardware_disable(void *garbage)
84 Index: kvm-83/kernel/x86/svm.c
85 ===================================================================
86 --- kvm-83.orig/kernel/x86/svm.c
87 +++ kvm-83/kernel/x86/svm.c
88 @@ -303,7 +303,7 @@ static void svm_hardware_disable(void *g
89 cpu_svm_disable();
92 -static void svm_hardware_enable(void *garbage)
93 +static int svm_hardware_enable(void *garbage)
96 struct svm_cpu_data *svm_data;
97 @@ -312,16 +312,20 @@ static void svm_hardware_enable(void *ga
98 struct kvm_desc_struct *gdt;
99 int me = raw_smp_processor_id();
101 + rdmsrl(MSR_EFER, efer);
102 + if (efer & EFER_SVME)
103 + return -EBUSY;
105 if (!has_svm()) {
106 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
107 - return;
108 + return -EINVAL;
110 svm_data = per_cpu(svm_data, me);
112 if (!svm_data) {
113 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
114 me);
115 - return;
116 + return -EINVAL;
119 svm_data->asid_generation = 1;
120 @@ -332,11 +336,12 @@ static void svm_hardware_enable(void *ga
121 gdt = (struct kvm_desc_struct *)gdt_descr.address;
122 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
124 - rdmsrl(MSR_EFER, efer);
125 wrmsrl(MSR_EFER, efer | EFER_SVME);
127 wrmsrl(MSR_VM_HSAVE_PA,
128 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
130 + return 0;
133 static void svm_cpu_uninit(int cpu)
134 Index: kvm-83/kernel/x86/vmx.c
135 ===================================================================
136 --- kvm-83.orig/kernel/x86/vmx.c
137 +++ kvm-83/kernel/x86/vmx.c
138 @@ -1099,12 +1099,15 @@ static __init int vmx_disabled_by_bios(v
139 /* locked but not enabled */
142 -static void hardware_enable(void *garbage)
143 +static int hardware_enable(void *garbage)
145 int cpu = raw_smp_processor_id();
146 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
147 u64 old;
149 + if (read_cr4() & X86_CR4_VMXE)
150 + return -EBUSY;
152 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
153 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
154 if ((old & (FEATURE_CONTROL_LOCKED |
155 @@ -1119,6 +1122,8 @@ static void hardware_enable(void *garbag
156 asm volatile (ASM_VMX_VMXON_RAX
157 : : "a"(&phys_addr), "m"(phys_addr)
158 : "memory", "cc");
160 + return 0;
163 static void vmclear_local_vcpus(void)
164 Index: kvm-83/kernel/x86/x86.c
165 ===================================================================
166 --- kvm-83.orig/kernel/x86/x86.c
167 +++ kvm-83/kernel/x86/x86.c
168 @@ -4158,9 +4158,9 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu
169 return kvm_x86_ops->vcpu_reset(vcpu);
172 -void kvm_arch_hardware_enable(void *garbage)
173 +int kvm_arch_hardware_enable(void *garbage)
175 - kvm_x86_ops->hardware_enable(garbage);
176 + return kvm_x86_ops->hardware_enable(garbage);
179 void kvm_arch_hardware_disable(void *garbage)
180 Index: kvm-83/kernel/include/asm-x86/kvm_host.h
181 ===================================================================
182 --- kvm-83.orig/kernel/include/asm-x86/kvm_host.h
183 +++ kvm-83/kernel/include/asm-x86/kvm_host.h
184 @@ -504,7 +504,7 @@ struct descriptor_table {
185 struct kvm_x86_ops {
186 int (*cpu_has_kvm_support)(void); /* __init */
187 int (*disabled_by_bios)(void); /* __init */
188 - void (*hardware_enable)(void *dummy); /* __init */
189 + int (*hardware_enable)(void *dummy); /* __init */
190 void (*hardware_disable)(void *dummy);
191 void (*check_processor_compatibility)(void *rtn);
192 int (*hardware_setup)(void); /* __init */
193 Index: kvm-83/kernel/include/linux/kvm_host.h
194 ===================================================================
195 --- kvm-83.orig/kernel/include/linux/kvm_host.h
196 +++ kvm-83/kernel/include/linux/kvm_host.h
197 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu
198 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
200 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
201 -void kvm_arch_hardware_enable(void *garbage);
202 +int kvm_arch_hardware_enable(void *garbage);
203 void kvm_arch_hardware_disable(void *garbage);
204 int kvm_arch_hardware_setup(void);
205 void kvm_arch_hardware_unsetup(void);
206 Index: kvm-83/kernel/x86/kvm_main.c
207 ===================================================================
208 --- kvm-83.orig/kernel/x86/kvm_main.c
209 +++ kvm-83/kernel/x86/kvm_main.c
210 @@ -112,6 +112,8 @@ DEFINE_SPINLOCK(kvm_lock);
211 LIST_HEAD(vm_list);
213 static cpumask_var_t cpus_hardware_enabled;
214 +static int kvm_usage_count = 0;
215 +static DEFINE_SPINLOCK(kvm_usage_lock);
217 struct kmem_cache *kvm_vcpu_cache;
218 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
219 @@ -122,6 +124,8 @@ struct dentry *kvm_debugfs_dir;
221 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
222 unsigned long arg);
223 +static int hardware_enable_all(void);
224 +static void hardware_disable_all(void);
226 static bool kvm_rebooting;
228 @@ -873,13 +877,19 @@ static const struct mmu_notifier_ops kvm
230 static struct kvm *kvm_create_vm(void)
232 + int r = 0;
233 struct kvm *kvm = kvm_arch_create_vm();
234 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
235 struct page *page;
236 #endif
238 if (IS_ERR(kvm))
239 - goto out;
240 + return kvm;
242 + r = hardware_enable_all();
243 + if (r) {
244 + goto out_err;
246 #ifdef CONFIG_HAVE_KVM_IRQCHIP
247 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
248 #endif
249 @@ -887,8 +897,8 @@ static struct kvm *kvm_create_vm(void)
250 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
251 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
252 if (!page) {
253 - kfree(kvm);
254 - return ERR_PTR(-ENOMEM);
255 + r = -ENOMEM;
256 + goto out_err;
258 kvm->coalesced_mmio_ring =
259 (struct kvm_coalesced_mmio_ring *)page_address(page);
260 @@ -896,15 +906,13 @@ static struct kvm *kvm_create_vm(void)
262 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
264 - int err;
265 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
266 - err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
267 - if (err) {
268 + r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
269 + if (r) {
270 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
271 put_page(page);
272 #endif
273 - kfree(kvm);
274 - return ERR_PTR(err);
275 + goto out_err;
278 #endif
279 @@ -923,8 +931,12 @@ mmget(&kvm->mm->mm_count);
280 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
281 kvm_coalesced_mmio_init(kvm);
282 #endif
283 -out:
284 return kvm;
286 +out_err:
287 + hardware_disable_all();
288 + kfree(kvm);
289 + return ERR_PTR(r);
293 @@ -974,6 +986,7 @@ static void kvm_destroy_vm(struct kvm *k
294 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
295 #endif
296 kvm_arch_destroy_vm(kvm);
297 + hardware_disable_all();
298 mmdrop(mm);
301 @@ -2112,14 +2125,40 @@ static struct miscdevice kvm_dev = {
302 &kvm_chardev_ops,
305 -static void hardware_enable(void *junk)
306 +static void hardware_enable(void *_r)
308 int cpu = raw_smp_processor_id();
309 + int r;
311 + /* If enabling a previous CPU failed already, let's not continue */
312 + if (_r && *((int*)_r))
313 + return;
315 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
316 return;
317 + r = kvm_arch_hardware_enable(NULL);
318 + if (_r)
319 + *((int*)_r) = r;
320 + if (r) {
321 + printk(KERN_INFO "kvm: enabling virtualization on "
322 + "CPU%d failed\n", cpu);
323 + return;
326 cpumask_set_cpu(cpu, cpus_hardware_enabled);
327 - kvm_arch_hardware_enable(NULL);
330 +static int hardware_enable_all(void)
332 + int r = 0;
334 + spin_lock(&kvm_usage_lock);
335 + kvm_usage_count++;
336 + if (kvm_usage_count == 1)
337 + kvm_on_each_cpu(hardware_enable, &r, 1);
338 + spin_unlock(&kvm_usage_lock);
340 + return r;
343 static void hardware_disable(void *junk)
344 @@ -2132,6 +2171,18 @@ static void hardware_disable(void *junk)
345 kvm_arch_hardware_disable(NULL);
348 +static void hardware_disable_all(void)
350 + if (!kvm_usage_count)
351 + return;
353 + spin_lock(&kvm_usage_lock);
354 + kvm_usage_count--;
355 + if (!kvm_usage_count)
356 + kvm_on_each_cpu(hardware_disable, NULL, 1);
357 + spin_unlock(&kvm_usage_lock);
360 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
361 void *v)
363 @@ -2382,7 +2433,6 @@ int kvm_init(void *opaque, unsigned int
364 goto out_free_1;
367 - kvm_on_each_cpu(hardware_enable, NULL, 1);
368 r = register_cpu_notifier(&kvm_cpu_notifier);
369 if (r)
370 goto out_free_2;
371 @@ -2440,7 +2490,6 @@ out_free_3:
372 unregister_reboot_notifier(&kvm_reboot_notifier);
373 unregister_cpu_notifier(&kvm_cpu_notifier);
374 out_free_2:
375 - kvm_on_each_cpu(hardware_disable, NULL, 1);
376 out_free_1:
377 kvm_arch_hardware_unsetup();
378 out_free_0a:
379 Index: kvm-83/kernel/ia64/kvm_main.c
380 ===================================================================
381 --- kvm-83.orig/kernel/ia64/kvm_main.c
382 +++ kvm-83/kernel/ia64/kvm_main.c
383 @@ -112,6 +112,8 @@ DEFINE_SPINLOCK(kvm_lock);
384 LIST_HEAD(vm_list);
386 static cpumask_var_t cpus_hardware_enabled;
387 +static int kvm_usage_count = 0;
388 +static DEFINE_SPINLOCK(kvm_usage_lock);
390 struct kmem_cache *kvm_vcpu_cache;
391 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
392 @@ -122,6 +124,8 @@ struct dentry *kvm_debugfs_dir;
394 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
395 unsigned long arg);
396 +static int hardware_enable_all(void);
397 +static void hardware_disable_all(void);
399 static bool kvm_rebooting;
401 @@ -873,13 +877,19 @@ static const struct mmu_notifier_ops kvm
403 static struct kvm *kvm_create_vm(void)
405 + int r = 0;
406 struct kvm *kvm = kvm_arch_create_vm();
407 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
408 struct page *page;
409 #endif
411 if (IS_ERR(kvm))
412 - goto out;
413 + return kvm;
415 + r = hardware_enable_all();
416 + if (r) {
417 + goto out_err;
419 #ifdef CONFIG_HAVE_KVM_IRQCHIP
420 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
421 #endif
422 @@ -887,8 +897,8 @@ static struct kvm *kvm_create_vm(void)
423 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
424 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
425 if (!page) {
426 - kfree(kvm);
427 - return ERR_PTR(-ENOMEM);
428 + r = -ENOMEM;
429 + goto out_err;
431 kvm->coalesced_mmio_ring =
432 (struct kvm_coalesced_mmio_ring *)page_address(page);
433 @@ -896,15 +906,13 @@ static struct kvm *kvm_create_vm(void)
435 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
437 - int err;
438 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
439 - err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
440 - if (err) {
441 + r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
442 + if (r) {
443 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
444 put_page(page);
445 #endif
446 - kfree(kvm);
447 - return ERR_PTR(err);
448 + goto out_err;
451 #endif
452 @@ -923,8 +931,12 @@ static struct kvm *kvm_create_vm(void)
453 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
454 kvm_coalesced_mmio_init(kvm);
455 #endif
456 -out:
457 return kvm;
459 +out_err:
460 + hardware_disable_all();
461 + kfree(kvm);
462 + return ERR_PTR(r);
466 @@ -974,6 +986,7 @@ static void kvm_destroy_vm(struct kvm *k
467 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
468 #endif
469 kvm_arch_destroy_vm(kvm);
470 + hardware_disable_all();
471 mmdrop(mm);
474 @@ -2112,14 +2125,40 @@ static struct miscdevice kvm_dev = {
475 &kvm_chardev_ops,
478 -static void hardware_enable(void *junk)
479 +static void hardware_enable(void *_r)
481 int cpu = raw_smp_processor_id();
482 + int r;
484 + /* If enabling a previous CPU failed already, let's not continue */
485 + if (_r && *((int*)_r))
486 + return;
488 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
489 return;
490 + r = kvm_arch_hardware_enable(NULL);
491 + if (_r)
492 + *((int*)_r) = r;
493 + if (r) {
494 + printk(KERN_INFO "kvm: enabling virtualization on "
495 + "CPU%d failed\n", cpu);
496 + return;
499 cpumask_set_cpu(cpu, cpus_hardware_enabled);
500 - kvm_arch_hardware_enable(NULL);
503 +static int hardware_enable_all(void)
505 + int r = 0;
507 + spin_lock(&kvm_usage_lock);
508 + kvm_usage_count++;
509 + if (kvm_usage_count == 1)
510 + on_each_cpu(hardware_enable, &r, 1);
511 + spin_unlock(&kvm_usage_lock);
513 + return r;
516 static void hardware_disable(void *junk)
517 @@ -2132,6 +2171,18 @@ static void hardware_disable(void *junk)
518 kvm_arch_hardware_disable(NULL);
521 +static void hardware_disable_all(void)
523 + if (!kvm_usage_count)
524 + return;
526 + spin_lock(&kvm_usage_lock);
527 + kvm_usage_count--;
528 + if (!kvm_usage_count)
529 + on_each_cpu(hardware_disable, NULL, 1);
530 + spin_unlock(&kvm_usage_lock);
533 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
534 void *v)
536 @@ -2379,7 +2430,6 @@ int kvm_init(void *opaque, unsigned int
537 goto out_free_1;
540 - kvm_on_each_cpu(hardware_enable, NULL, 1);
541 r = register_cpu_notifier(&kvm_cpu_notifier);
542 if (r)
543 goto out_free_2;
544 @@ -2430,7 +2480,6 @@ out_free_3:
545 unregister_reboot_notifier(&kvm_reboot_notifier);
546 unregister_cpu_notifier(&kvm_cpu_notifier);
547 out_free_2:
548 - kvm_on_each_cpu(hardware_disable, NULL, 1);
549 out_free_1:
550 kvm_arch_hardware_unsetup();
551 out_free_0a: