mfd: wm8350-i2c: Make sure the i2c regmap functions are compiled
[linux/fpc-iii.git] / arch / s390 / kvm / kvm-s390.c
blob48bb1c129963acabcbd3d0ad3a2a22a46e57e0c8
1 /*
2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/facility.h>
32 #include <asm/sclp.h>
33 #include "kvm-s390.h"
34 #include "gaccess.h"
36 #define CREATE_TRACE_POINTS
37 #include "trace.h"
38 #include "trace-s390.h"
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42 struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
44 { "exit_null", VCPU_STAT(exit_null) },
45 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
49 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
53 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
55 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
64 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
73 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
82 { "diagnose_10", VCPU_STAT(diagnose_10) },
83 { "diagnose_44", VCPU_STAT(diagnose_44) },
84 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
85 { NULL }
88 unsigned long *vfacilities;
89 static struct gmap_notifier gmap_notifier;
91 /* test availability of vfacility */
92 static inline int test_vfacility(unsigned long nr)
94 return __test_facility(nr, (void *) vfacilities);
97 /* Section: not file related */
98 int kvm_arch_hardware_enable(void *garbage)
100 /* every s390 is virtualization enabled ;-) */
101 return 0;
104 void kvm_arch_hardware_disable(void *garbage)
108 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
110 int kvm_arch_hardware_setup(void)
112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
114 return 0;
117 void kvm_arch_hardware_unsetup(void)
119 gmap_unregister_ipte_notifier(&gmap_notifier);
122 void kvm_arch_check_processor_compat(void *rtn)
126 int kvm_arch_init(void *opaque)
128 return 0;
131 void kvm_arch_exit(void)
135 /* Section: device related */
136 long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
144 int kvm_dev_ioctl_check_extension(long ext)
146 int r;
148 switch (ext) {
149 case KVM_CAP_S390_PSW:
150 case KVM_CAP_S390_GMAP:
151 case KVM_CAP_SYNC_MMU:
152 #ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154 #endif
155 case KVM_CAP_SYNC_REGS:
156 case KVM_CAP_ONE_REG:
157 case KVM_CAP_ENABLE_CAP:
158 case KVM_CAP_S390_CSS_SUPPORT:
159 case KVM_CAP_IOEVENTFD:
160 r = 1;
161 break;
162 case KVM_CAP_NR_VCPUS:
163 case KVM_CAP_MAX_VCPUS:
164 r = KVM_MAX_VCPUS;
165 break;
166 case KVM_CAP_NR_MEMSLOTS:
167 r = KVM_USER_MEM_SLOTS;
168 break;
169 case KVM_CAP_S390_COW:
170 r = MACHINE_HAS_ESOP;
171 break;
172 default:
173 r = 0;
175 return r;
178 /* Section: vm related */
180 * Get (and clear) the dirty memory log for a memory slot.
182 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
183 struct kvm_dirty_log *log)
185 return 0;
188 long kvm_arch_vm_ioctl(struct file *filp,
189 unsigned int ioctl, unsigned long arg)
191 struct kvm *kvm = filp->private_data;
192 void __user *argp = (void __user *)arg;
193 int r;
195 switch (ioctl) {
196 case KVM_S390_INTERRUPT: {
197 struct kvm_s390_interrupt s390int;
199 r = -EFAULT;
200 if (copy_from_user(&s390int, argp, sizeof(s390int)))
201 break;
202 r = kvm_s390_inject_vm(kvm, &s390int);
203 break;
205 default:
206 r = -ENOTTY;
209 return r;
212 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
214 int rc;
215 char debug_name[16];
217 rc = -EINVAL;
218 #ifdef CONFIG_KVM_S390_UCONTROL
219 if (type & ~KVM_VM_S390_UCONTROL)
220 goto out_err;
221 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
222 goto out_err;
223 #else
224 if (type)
225 goto out_err;
226 #endif
228 rc = s390_enable_sie();
229 if (rc)
230 goto out_err;
232 rc = -ENOMEM;
234 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
235 if (!kvm->arch.sca)
236 goto out_err;
238 sprintf(debug_name, "kvm-%u", current->pid);
240 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
241 if (!kvm->arch.dbf)
242 goto out_nodbf;
244 spin_lock_init(&kvm->arch.float_int.lock);
245 INIT_LIST_HEAD(&kvm->arch.float_int.list);
247 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
248 VM_EVENT(kvm, 3, "%s", "vm created");
250 if (type & KVM_VM_S390_UCONTROL) {
251 kvm->arch.gmap = NULL;
252 } else {
253 kvm->arch.gmap = gmap_alloc(current->mm);
254 if (!kvm->arch.gmap)
255 goto out_nogmap;
256 kvm->arch.gmap->private = kvm;
259 kvm->arch.css_support = 0;
261 return 0;
262 out_nogmap:
263 debug_unregister(kvm->arch.dbf);
264 out_nodbf:
265 free_page((unsigned long)(kvm->arch.sca));
266 out_err:
267 return rc;
270 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
272 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
273 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
274 if (!kvm_is_ucontrol(vcpu->kvm)) {
275 clear_bit(63 - vcpu->vcpu_id,
276 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
277 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
278 (__u64) vcpu->arch.sie_block)
279 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
281 smp_mb();
283 if (kvm_is_ucontrol(vcpu->kvm))
284 gmap_free(vcpu->arch.gmap);
286 free_page((unsigned long)(vcpu->arch.sie_block));
287 kvm_vcpu_uninit(vcpu);
288 kmem_cache_free(kvm_vcpu_cache, vcpu);
291 static void kvm_free_vcpus(struct kvm *kvm)
293 unsigned int i;
294 struct kvm_vcpu *vcpu;
296 kvm_for_each_vcpu(i, vcpu, kvm)
297 kvm_arch_vcpu_destroy(vcpu);
299 mutex_lock(&kvm->lock);
300 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
301 kvm->vcpus[i] = NULL;
303 atomic_set(&kvm->online_vcpus, 0);
304 mutex_unlock(&kvm->lock);
307 void kvm_arch_sync_events(struct kvm *kvm)
311 void kvm_arch_destroy_vm(struct kvm *kvm)
313 kvm_free_vcpus(kvm);
314 free_page((unsigned long)(kvm->arch.sca));
315 debug_unregister(kvm->arch.dbf);
316 if (!kvm_is_ucontrol(kvm))
317 gmap_free(kvm->arch.gmap);
320 /* Section: vcpu related */
321 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
323 if (kvm_is_ucontrol(vcpu->kvm)) {
324 vcpu->arch.gmap = gmap_alloc(current->mm);
325 if (!vcpu->arch.gmap)
326 return -ENOMEM;
327 vcpu->arch.gmap->private = vcpu->kvm;
328 return 0;
331 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
332 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
333 KVM_SYNC_GPRS |
334 KVM_SYNC_ACRS |
335 KVM_SYNC_CRS;
336 return 0;
339 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
341 /* Nothing todo */
344 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
346 save_fp_regs(&vcpu->arch.host_fpregs);
347 save_access_regs(vcpu->arch.host_acrs);
348 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
349 restore_fp_regs(&vcpu->arch.guest_fpregs);
350 restore_access_regs(vcpu->run->s.regs.acrs);
351 gmap_enable(vcpu->arch.gmap);
352 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
355 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
357 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
358 gmap_disable(vcpu->arch.gmap);
359 save_fp_regs(&vcpu->arch.guest_fpregs);
360 save_access_regs(vcpu->run->s.regs.acrs);
361 restore_fp_regs(&vcpu->arch.host_fpregs);
362 restore_access_regs(vcpu->arch.host_acrs);
365 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
367 /* this equals initial cpu reset in pop, but we don't switch to ESA */
368 vcpu->arch.sie_block->gpsw.mask = 0UL;
369 vcpu->arch.sie_block->gpsw.addr = 0UL;
370 kvm_s390_set_prefix(vcpu, 0);
371 vcpu->arch.sie_block->cputm = 0UL;
372 vcpu->arch.sie_block->ckc = 0UL;
373 vcpu->arch.sie_block->todpr = 0;
374 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
375 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
376 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
377 vcpu->arch.guest_fpregs.fpc = 0;
378 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
379 vcpu->arch.sie_block->gbea = 1;
380 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
383 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
385 return 0;
388 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
390 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
391 CPUSTAT_SM |
392 CPUSTAT_STOPPED |
393 CPUSTAT_GED);
394 vcpu->arch.sie_block->ecb = 6;
395 vcpu->arch.sie_block->ecb2 = 8;
396 vcpu->arch.sie_block->eca = 0xC1002001U;
397 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
398 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
399 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
400 (unsigned long) vcpu);
401 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
402 get_cpu_id(&vcpu->arch.cpu_id);
403 vcpu->arch.cpu_id.version = 0xff;
404 return 0;
407 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
408 unsigned int id)
410 struct kvm_vcpu *vcpu;
411 int rc = -EINVAL;
413 if (id >= KVM_MAX_VCPUS)
414 goto out;
416 rc = -ENOMEM;
418 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
419 if (!vcpu)
420 goto out;
422 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
423 get_zeroed_page(GFP_KERNEL);
425 if (!vcpu->arch.sie_block)
426 goto out_free_cpu;
428 vcpu->arch.sie_block->icpua = id;
429 if (!kvm_is_ucontrol(kvm)) {
430 if (!kvm->arch.sca) {
431 WARN_ON_ONCE(1);
432 goto out_free_cpu;
434 if (!kvm->arch.sca->cpu[id].sda)
435 kvm->arch.sca->cpu[id].sda =
436 (__u64) vcpu->arch.sie_block;
437 vcpu->arch.sie_block->scaoh =
438 (__u32)(((__u64)kvm->arch.sca) >> 32);
439 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
440 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
443 spin_lock_init(&vcpu->arch.local_int.lock);
444 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
445 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
446 spin_lock(&kvm->arch.float_int.lock);
447 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
448 vcpu->arch.local_int.wq = &vcpu->wq;
449 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
450 spin_unlock(&kvm->arch.float_int.lock);
452 rc = kvm_vcpu_init(vcpu, kvm, id);
453 if (rc)
454 goto out_free_sie_block;
455 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
456 vcpu->arch.sie_block);
457 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
459 return vcpu;
460 out_free_sie_block:
461 free_page((unsigned long)(vcpu->arch.sie_block));
462 out_free_cpu:
463 kmem_cache_free(kvm_vcpu_cache, vcpu);
464 out:
465 return ERR_PTR(rc);
468 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
470 /* kvm common code refers to this, but never calls it */
471 BUG();
472 return 0;
475 void s390_vcpu_block(struct kvm_vcpu *vcpu)
477 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
480 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
482 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
486 * Kick a guest cpu out of SIE and wait until SIE is not running.
487 * If the CPU is not running (e.g. waiting as idle) the function will
488 * return immediately. */
489 void exit_sie(struct kvm_vcpu *vcpu)
491 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
492 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
493 cpu_relax();
496 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
497 void exit_sie_sync(struct kvm_vcpu *vcpu)
499 s390_vcpu_block(vcpu);
500 exit_sie(vcpu);
503 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
505 int i;
506 struct kvm *kvm = gmap->private;
507 struct kvm_vcpu *vcpu;
509 kvm_for_each_vcpu(i, vcpu, kvm) {
510 /* match against both prefix pages */
511 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
512 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
513 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
514 exit_sie_sync(vcpu);
519 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
521 /* kvm common code refers to this, but never calls it */
522 BUG();
523 return 0;
526 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
527 struct kvm_one_reg *reg)
529 int r = -EINVAL;
531 switch (reg->id) {
532 case KVM_REG_S390_TODPR:
533 r = put_user(vcpu->arch.sie_block->todpr,
534 (u32 __user *)reg->addr);
535 break;
536 case KVM_REG_S390_EPOCHDIFF:
537 r = put_user(vcpu->arch.sie_block->epoch,
538 (u64 __user *)reg->addr);
539 break;
540 case KVM_REG_S390_CPU_TIMER:
541 r = put_user(vcpu->arch.sie_block->cputm,
542 (u64 __user *)reg->addr);
543 break;
544 case KVM_REG_S390_CLOCK_COMP:
545 r = put_user(vcpu->arch.sie_block->ckc,
546 (u64 __user *)reg->addr);
547 break;
548 default:
549 break;
552 return r;
555 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
556 struct kvm_one_reg *reg)
558 int r = -EINVAL;
560 switch (reg->id) {
561 case KVM_REG_S390_TODPR:
562 r = get_user(vcpu->arch.sie_block->todpr,
563 (u32 __user *)reg->addr);
564 break;
565 case KVM_REG_S390_EPOCHDIFF:
566 r = get_user(vcpu->arch.sie_block->epoch,
567 (u64 __user *)reg->addr);
568 break;
569 case KVM_REG_S390_CPU_TIMER:
570 r = get_user(vcpu->arch.sie_block->cputm,
571 (u64 __user *)reg->addr);
572 break;
573 case KVM_REG_S390_CLOCK_COMP:
574 r = get_user(vcpu->arch.sie_block->ckc,
575 (u64 __user *)reg->addr);
576 break;
577 default:
578 break;
581 return r;
584 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
586 kvm_s390_vcpu_initial_reset(vcpu);
587 return 0;
590 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
592 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
593 return 0;
596 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
598 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
599 return 0;
602 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
603 struct kvm_sregs *sregs)
605 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
606 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
607 restore_access_regs(vcpu->run->s.regs.acrs);
608 return 0;
611 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
612 struct kvm_sregs *sregs)
614 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
615 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
616 return 0;
619 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
621 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
622 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
623 restore_fp_regs(&vcpu->arch.guest_fpregs);
624 return 0;
627 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
629 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
630 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
631 return 0;
634 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
636 int rc = 0;
638 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
639 rc = -EBUSY;
640 else {
641 vcpu->run->psw_mask = psw.mask;
642 vcpu->run->psw_addr = psw.addr;
644 return rc;
647 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
648 struct kvm_translation *tr)
650 return -EINVAL; /* not implemented yet */
653 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
654 struct kvm_guest_debug *dbg)
656 return -EINVAL; /* not implemented yet */
659 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
660 struct kvm_mp_state *mp_state)
662 return -EINVAL; /* not implemented yet */
665 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
666 struct kvm_mp_state *mp_state)
668 return -EINVAL; /* not implemented yet */
671 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
674 * We use MMU_RELOAD just to re-arm the ipte notifier for the
675 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
676 * This ensures that the ipte instruction for this request has
677 * already finished. We might race against a second unmapper that
678 * wants to set the blocking bit. Lets just retry the request loop.
680 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
681 int rc;
682 rc = gmap_ipte_notify(vcpu->arch.gmap,
683 vcpu->arch.sie_block->prefix,
684 PAGE_SIZE * 2);
685 if (rc)
686 return rc;
687 s390_vcpu_unblock(vcpu);
689 return 0;
692 static int __vcpu_run(struct kvm_vcpu *vcpu)
694 int rc;
696 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
698 if (need_resched())
699 schedule();
701 if (test_thread_flag(TIF_MCCK_PENDING))
702 s390_handle_mcck();
704 if (!kvm_is_ucontrol(vcpu->kvm))
705 kvm_s390_deliver_pending_interrupts(vcpu);
707 rc = kvm_s390_handle_requests(vcpu);
708 if (rc)
709 return rc;
711 vcpu->arch.sie_block->icptcode = 0;
712 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
713 atomic_read(&vcpu->arch.sie_block->cpuflags));
714 trace_kvm_s390_sie_enter(vcpu,
715 atomic_read(&vcpu->arch.sie_block->cpuflags));
718 * As PF_VCPU will be used in fault handler, between guest_enter
719 * and guest_exit should be no uaccess.
721 preempt_disable();
722 kvm_guest_enter();
723 preempt_enable();
724 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
725 kvm_guest_exit();
727 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
728 vcpu->arch.sie_block->icptcode);
729 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
731 if (rc > 0)
732 rc = 0;
733 if (rc < 0) {
734 if (kvm_is_ucontrol(vcpu->kvm)) {
735 rc = SIE_INTERCEPT_UCONTROL;
736 } else {
737 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
738 trace_kvm_s390_sie_fault(vcpu);
739 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
743 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
744 return rc;
747 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
749 int rc;
750 sigset_t sigsaved;
752 rerun_vcpu:
753 if (vcpu->sigset_active)
754 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
756 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
758 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
760 switch (kvm_run->exit_reason) {
761 case KVM_EXIT_S390_SIEIC:
762 case KVM_EXIT_UNKNOWN:
763 case KVM_EXIT_INTR:
764 case KVM_EXIT_S390_RESET:
765 case KVM_EXIT_S390_UCONTROL:
766 case KVM_EXIT_S390_TSCH:
767 break;
768 default:
769 BUG();
772 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
773 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
774 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
775 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
776 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
778 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
779 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
780 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
781 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
784 might_fault();
786 do {
787 rc = __vcpu_run(vcpu);
788 if (rc)
789 break;
790 if (kvm_is_ucontrol(vcpu->kvm))
791 /* Don't exit for host interrupts. */
792 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
793 else
794 rc = kvm_handle_sie_intercept(vcpu);
795 } while (!signal_pending(current) && !rc);
797 if (rc == SIE_INTERCEPT_RERUNVCPU)
798 goto rerun_vcpu;
800 if (signal_pending(current) && !rc) {
801 kvm_run->exit_reason = KVM_EXIT_INTR;
802 rc = -EINTR;
805 #ifdef CONFIG_KVM_S390_UCONTROL
806 if (rc == SIE_INTERCEPT_UCONTROL) {
807 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
808 kvm_run->s390_ucontrol.trans_exc_code =
809 current->thread.gmap_addr;
810 kvm_run->s390_ucontrol.pgm_code = 0x10;
811 rc = 0;
813 #endif
815 if (rc == -EOPNOTSUPP) {
816 /* intercept cannot be handled in-kernel, prepare kvm-run */
817 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
818 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
819 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
820 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
821 rc = 0;
824 if (rc == -EREMOTE) {
825 /* intercept was handled, but userspace support is needed
826 * kvm_run has been prepared by the handler */
827 rc = 0;
830 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
831 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
832 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
833 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
835 if (vcpu->sigset_active)
836 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
838 vcpu->stat.exit_userspace++;
839 return rc;
842 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
843 unsigned long n, int prefix)
845 if (prefix)
846 return copy_to_guest(vcpu, guestdest, from, n);
847 else
848 return copy_to_guest_absolute(vcpu, guestdest, from, n);
852 * store status at address
853 * we use have two special cases:
854 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
855 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
857 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
859 unsigned char archmode = 1;
860 int prefix;
862 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
863 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
864 return -EFAULT;
865 addr = SAVE_AREA_BASE;
866 prefix = 0;
867 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
868 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
869 return -EFAULT;
870 addr = SAVE_AREA_BASE;
871 prefix = 1;
872 } else
873 prefix = 0;
876 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
877 * copying in vcpu load/put. Lets update our copies before we save
878 * it into the save area
880 save_fp_regs(&vcpu->arch.guest_fpregs);
881 save_access_regs(vcpu->run->s.regs.acrs);
883 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
884 vcpu->arch.guest_fpregs.fprs, 128, prefix))
885 return -EFAULT;
887 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
888 vcpu->run->s.regs.gprs, 128, prefix))
889 return -EFAULT;
891 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
892 &vcpu->arch.sie_block->gpsw, 16, prefix))
893 return -EFAULT;
895 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
896 &vcpu->arch.sie_block->prefix, 4, prefix))
897 return -EFAULT;
899 if (__guestcopy(vcpu,
900 addr + offsetof(struct save_area, fp_ctrl_reg),
901 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
902 return -EFAULT;
904 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
905 &vcpu->arch.sie_block->todpr, 4, prefix))
906 return -EFAULT;
908 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
909 &vcpu->arch.sie_block->cputm, 8, prefix))
910 return -EFAULT;
912 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
913 &vcpu->arch.sie_block->ckc, 8, prefix))
914 return -EFAULT;
916 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
917 &vcpu->run->s.regs.acrs, 64, prefix))
918 return -EFAULT;
920 if (__guestcopy(vcpu,
921 addr + offsetof(struct save_area, ctrl_regs),
922 &vcpu->arch.sie_block->gcr, 128, prefix))
923 return -EFAULT;
924 return 0;
927 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
928 struct kvm_enable_cap *cap)
930 int r;
932 if (cap->flags)
933 return -EINVAL;
935 switch (cap->cap) {
936 case KVM_CAP_S390_CSS_SUPPORT:
937 if (!vcpu->kvm->arch.css_support) {
938 vcpu->kvm->arch.css_support = 1;
939 trace_kvm_s390_enable_css(vcpu->kvm);
941 r = 0;
942 break;
943 default:
944 r = -EINVAL;
945 break;
947 return r;
950 long kvm_arch_vcpu_ioctl(struct file *filp,
951 unsigned int ioctl, unsigned long arg)
953 struct kvm_vcpu *vcpu = filp->private_data;
954 void __user *argp = (void __user *)arg;
955 long r;
957 switch (ioctl) {
958 case KVM_S390_INTERRUPT: {
959 struct kvm_s390_interrupt s390int;
961 r = -EFAULT;
962 if (copy_from_user(&s390int, argp, sizeof(s390int)))
963 break;
964 r = kvm_s390_inject_vcpu(vcpu, &s390int);
965 break;
967 case KVM_S390_STORE_STATUS:
968 r = kvm_s390_vcpu_store_status(vcpu, arg);
969 break;
970 case KVM_S390_SET_INITIAL_PSW: {
971 psw_t psw;
973 r = -EFAULT;
974 if (copy_from_user(&psw, argp, sizeof(psw)))
975 break;
976 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
977 break;
979 case KVM_S390_INITIAL_RESET:
980 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
981 break;
982 case KVM_SET_ONE_REG:
983 case KVM_GET_ONE_REG: {
984 struct kvm_one_reg reg;
985 r = -EFAULT;
986 if (copy_from_user(&reg, argp, sizeof(reg)))
987 break;
988 if (ioctl == KVM_SET_ONE_REG)
989 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
990 else
991 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
992 break;
994 #ifdef CONFIG_KVM_S390_UCONTROL
995 case KVM_S390_UCAS_MAP: {
996 struct kvm_s390_ucas_mapping ucasmap;
998 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
999 r = -EFAULT;
1000 break;
1003 if (!kvm_is_ucontrol(vcpu->kvm)) {
1004 r = -EINVAL;
1005 break;
1008 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1009 ucasmap.vcpu_addr, ucasmap.length);
1010 break;
1012 case KVM_S390_UCAS_UNMAP: {
1013 struct kvm_s390_ucas_mapping ucasmap;
1015 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1016 r = -EFAULT;
1017 break;
1020 if (!kvm_is_ucontrol(vcpu->kvm)) {
1021 r = -EINVAL;
1022 break;
1025 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1026 ucasmap.length);
1027 break;
1029 #endif
1030 case KVM_S390_VCPU_FAULT: {
1031 r = gmap_fault(arg, vcpu->arch.gmap);
1032 if (!IS_ERR_VALUE(r))
1033 r = 0;
1034 break;
1036 case KVM_ENABLE_CAP:
1038 struct kvm_enable_cap cap;
1039 r = -EFAULT;
1040 if (copy_from_user(&cap, argp, sizeof(cap)))
1041 break;
1042 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1043 break;
1045 default:
1046 r = -ENOTTY;
1048 return r;
1051 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1053 #ifdef CONFIG_KVM_S390_UCONTROL
1054 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1055 && (kvm_is_ucontrol(vcpu->kvm))) {
1056 vmf->page = virt_to_page(vcpu->arch.sie_block);
1057 get_page(vmf->page);
1058 return 0;
1060 #endif
1061 return VM_FAULT_SIGBUS;
1064 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1065 struct kvm_memory_slot *dont)
1069 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1071 return 0;
1074 void kvm_arch_memslots_updated(struct kvm *kvm)
1078 /* Section: memory related */
1079 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1080 struct kvm_memory_slot *memslot,
1081 struct kvm_userspace_memory_region *mem,
1082 enum kvm_mr_change change)
1084 /* A few sanity checks. We can have memory slots which have to be
1085 located/ended at a segment boundary (1MB). The memory in userland is
1086 ok to be fragmented into various different vmas. It is okay to mmap()
1087 and munmap() stuff in this slot after doing this call at any time */
1089 if (mem->userspace_addr & 0xffffful)
1090 return -EINVAL;
1092 if (mem->memory_size & 0xffffful)
1093 return -EINVAL;
1095 return 0;
1098 void kvm_arch_commit_memory_region(struct kvm *kvm,
1099 struct kvm_userspace_memory_region *mem,
1100 const struct kvm_memory_slot *old,
1101 enum kvm_mr_change change)
1103 int rc;
1105 /* If the basics of the memslot do not change, we do not want
1106 * to update the gmap. Every update causes several unnecessary
1107 * segment translation exceptions. This is usually handled just
1108 * fine by the normal fault handler + gmap, but it will also
1109 * cause faults on the prefix page of running guest CPUs.
1111 if (old->userspace_addr == mem->userspace_addr &&
1112 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1113 old->npages * PAGE_SIZE == mem->memory_size)
1114 return;
1116 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1117 mem->guest_phys_addr, mem->memory_size);
1118 if (rc)
1119 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1120 return;
1123 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1127 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1128 struct kvm_memory_slot *slot)
1132 static int __init kvm_s390_init(void)
1134 int ret;
1135 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1136 if (ret)
1137 return ret;
1140 * guests can ask for up to 255+1 double words, we need a full page
1141 * to hold the maximum amount of facilities. On the other hand, we
1142 * only set facilities that are known to work in KVM.
1144 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1145 if (!vfacilities) {
1146 kvm_exit();
1147 return -ENOMEM;
1149 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1150 vfacilities[0] &= 0xff82fff3f47c0000UL;
1151 vfacilities[1] &= 0x001c000000000000UL;
1152 return 0;
1155 static void __exit kvm_s390_exit(void)
1157 free_page((unsigned long) vfacilities);
1158 kvm_exit();
1161 module_init(kvm_s390_init);
1162 module_exit(kvm_s390_exit);
1165 * Enable autoloading of the kvm module.
1166 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1167 * since x86 takes a different approach.
1169 #include <linux/miscdevice.h>
1170 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1171 MODULE_ALIAS("devname:kvm");