Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-btrfs-devel.git] / arch / s390 / kvm / kvm-s390.c
blobdc2b580e27bcfc45e0a883ffd61992d4647eb64c
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
69 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
70 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
71 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
72 { "diagnose_44", VCPU_STAT(diagnose_44) },
73 { NULL }
76 static unsigned long long *facilities;
78 /* Section: not file related */
79 int kvm_arch_hardware_enable(void *garbage)
81 /* every s390 is virtualization enabled ;-) */
82 return 0;
85 void kvm_arch_hardware_disable(void *garbage)
89 int kvm_arch_hardware_setup(void)
91 return 0;
94 void kvm_arch_hardware_unsetup(void)
98 void kvm_arch_check_processor_compat(void *rtn)
102 int kvm_arch_init(void *opaque)
104 return 0;
107 void kvm_arch_exit(void)
111 /* Section: device related */
112 long kvm_arch_dev_ioctl(struct file *filp,
113 unsigned int ioctl, unsigned long arg)
115 if (ioctl == KVM_S390_ENABLE_SIE)
116 return s390_enable_sie();
117 return -EINVAL;
120 int kvm_dev_ioctl_check_extension(long ext)
122 int r;
124 switch (ext) {
125 case KVM_CAP_S390_PSW:
126 case KVM_CAP_S390_GMAP:
127 r = 1;
128 break;
129 default:
130 r = 0;
132 return r;
135 /* Section: vm related */
137 * Get (and clear) the dirty memory log for a memory slot.
139 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
140 struct kvm_dirty_log *log)
142 return 0;
145 long kvm_arch_vm_ioctl(struct file *filp,
146 unsigned int ioctl, unsigned long arg)
148 struct kvm *kvm = filp->private_data;
149 void __user *argp = (void __user *)arg;
150 int r;
152 switch (ioctl) {
153 case KVM_S390_INTERRUPT: {
154 struct kvm_s390_interrupt s390int;
156 r = -EFAULT;
157 if (copy_from_user(&s390int, argp, sizeof(s390int)))
158 break;
159 r = kvm_s390_inject_vm(kvm, &s390int);
160 break;
162 default:
163 r = -ENOTTY;
166 return r;
169 int kvm_arch_init_vm(struct kvm *kvm)
171 int rc;
172 char debug_name[16];
174 rc = s390_enable_sie();
175 if (rc)
176 goto out_err;
178 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
179 if (!kvm->arch.sca)
180 goto out_err;
182 sprintf(debug_name, "kvm-%u", current->pid);
184 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
185 if (!kvm->arch.dbf)
186 goto out_nodbf;
188 spin_lock_init(&kvm->arch.float_int.lock);
189 INIT_LIST_HEAD(&kvm->arch.float_int.list);
191 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
192 VM_EVENT(kvm, 3, "%s", "vm created");
194 kvm->arch.gmap = gmap_alloc(current->mm);
195 if (!kvm->arch.gmap)
196 goto out_nogmap;
198 return 0;
199 out_nogmap:
200 debug_unregister(kvm->arch.dbf);
201 out_nodbf:
202 free_page((unsigned long)(kvm->arch.sca));
203 out_err:
204 return rc;
207 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
211 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
212 (__u64) vcpu->arch.sie_block)
213 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
214 smp_mb();
215 free_page((unsigned long)(vcpu->arch.sie_block));
216 kvm_vcpu_uninit(vcpu);
217 kfree(vcpu);
220 static void kvm_free_vcpus(struct kvm *kvm)
222 unsigned int i;
223 struct kvm_vcpu *vcpu;
225 kvm_for_each_vcpu(i, vcpu, kvm)
226 kvm_arch_vcpu_destroy(vcpu);
228 mutex_lock(&kvm->lock);
229 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
230 kvm->vcpus[i] = NULL;
232 atomic_set(&kvm->online_vcpus, 0);
233 mutex_unlock(&kvm->lock);
236 void kvm_arch_sync_events(struct kvm *kvm)
240 void kvm_arch_destroy_vm(struct kvm *kvm)
242 kvm_free_vcpus(kvm);
243 free_page((unsigned long)(kvm->arch.sca));
244 debug_unregister(kvm->arch.dbf);
245 gmap_free(kvm->arch.gmap);
248 /* Section: vcpu related */
249 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
251 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
252 return 0;
255 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
257 /* Nothing todo */
260 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
262 save_fp_regs(&vcpu->arch.host_fpregs);
263 save_access_regs(vcpu->arch.host_acrs);
264 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265 restore_fp_regs(&vcpu->arch.guest_fpregs);
266 restore_access_regs(vcpu->arch.guest_acrs);
267 gmap_enable(vcpu->arch.gmap);
270 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
272 gmap_disable(vcpu->arch.gmap);
273 save_fp_regs(&vcpu->arch.guest_fpregs);
274 save_access_regs(vcpu->arch.guest_acrs);
275 restore_fp_regs(&vcpu->arch.host_fpregs);
276 restore_access_regs(vcpu->arch.host_acrs);
279 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
281 /* this equals initial cpu reset in pop, but we don't switch to ESA */
282 vcpu->arch.sie_block->gpsw.mask = 0UL;
283 vcpu->arch.sie_block->gpsw.addr = 0UL;
284 vcpu->arch.sie_block->prefix = 0UL;
285 vcpu->arch.sie_block->ihcpu = 0xffff;
286 vcpu->arch.sie_block->cputm = 0UL;
287 vcpu->arch.sie_block->ckc = 0UL;
288 vcpu->arch.sie_block->todpr = 0;
289 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
290 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
291 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
292 vcpu->arch.guest_fpregs.fpc = 0;
293 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
294 vcpu->arch.sie_block->gbea = 1;
297 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
299 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
300 vcpu->arch.sie_block->ecb = 6;
301 vcpu->arch.sie_block->eca = 0xC1002001U;
302 vcpu->arch.sie_block->fac = (int) (long) facilities;
303 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
304 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
305 (unsigned long) vcpu);
306 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
307 get_cpu_id(&vcpu->arch.cpu_id);
308 vcpu->arch.cpu_id.version = 0xff;
309 return 0;
312 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
313 unsigned int id)
315 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
316 int rc = -ENOMEM;
318 if (!vcpu)
319 goto out_nomem;
321 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
322 get_zeroed_page(GFP_KERNEL);
324 if (!vcpu->arch.sie_block)
325 goto out_free_cpu;
327 vcpu->arch.sie_block->icpua = id;
328 BUG_ON(!kvm->arch.sca);
329 if (!kvm->arch.sca->cpu[id].sda)
330 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
331 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
332 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
333 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
335 spin_lock_init(&vcpu->arch.local_int.lock);
336 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
337 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
338 spin_lock(&kvm->arch.float_int.lock);
339 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
340 init_waitqueue_head(&vcpu->arch.local_int.wq);
341 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
342 spin_unlock(&kvm->arch.float_int.lock);
344 rc = kvm_vcpu_init(vcpu, kvm, id);
345 if (rc)
346 goto out_free_sie_block;
347 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
348 vcpu->arch.sie_block);
350 return vcpu;
351 out_free_sie_block:
352 free_page((unsigned long)(vcpu->arch.sie_block));
353 out_free_cpu:
354 kfree(vcpu);
355 out_nomem:
356 return ERR_PTR(rc);
359 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
361 /* kvm common code refers to this, but never calls it */
362 BUG();
363 return 0;
366 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
368 kvm_s390_vcpu_initial_reset(vcpu);
369 return 0;
372 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
374 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
375 return 0;
378 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
380 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
381 return 0;
384 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
385 struct kvm_sregs *sregs)
387 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
388 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
389 return 0;
392 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
393 struct kvm_sregs *sregs)
395 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
396 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
397 return 0;
400 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
402 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
403 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
404 return 0;
407 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
409 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
410 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
411 return 0;
414 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
416 int rc = 0;
418 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
419 rc = -EBUSY;
420 else {
421 vcpu->run->psw_mask = psw.mask;
422 vcpu->run->psw_addr = psw.addr;
424 return rc;
427 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
428 struct kvm_translation *tr)
430 return -EINVAL; /* not implemented yet */
433 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
434 struct kvm_guest_debug *dbg)
436 return -EINVAL; /* not implemented yet */
439 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
440 struct kvm_mp_state *mp_state)
442 return -EINVAL; /* not implemented yet */
445 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
446 struct kvm_mp_state *mp_state)
448 return -EINVAL; /* not implemented yet */
451 static void __vcpu_run(struct kvm_vcpu *vcpu)
453 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
455 if (need_resched())
456 schedule();
458 if (test_thread_flag(TIF_MCCK_PENDING))
459 s390_handle_mcck();
461 kvm_s390_deliver_pending_interrupts(vcpu);
463 vcpu->arch.sie_block->icptcode = 0;
464 local_irq_disable();
465 kvm_guest_enter();
466 local_irq_enable();
467 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
468 atomic_read(&vcpu->arch.sie_block->cpuflags));
469 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
470 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
471 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
473 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
474 vcpu->arch.sie_block->icptcode);
475 local_irq_disable();
476 kvm_guest_exit();
477 local_irq_enable();
479 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
482 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
484 int rc;
485 sigset_t sigsaved;
487 rerun_vcpu:
488 if (vcpu->sigset_active)
489 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
491 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
493 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
495 switch (kvm_run->exit_reason) {
496 case KVM_EXIT_S390_SIEIC:
497 case KVM_EXIT_UNKNOWN:
498 case KVM_EXIT_INTR:
499 case KVM_EXIT_S390_RESET:
500 break;
501 default:
502 BUG();
505 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
506 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
508 might_fault();
510 do {
511 __vcpu_run(vcpu);
512 rc = kvm_handle_sie_intercept(vcpu);
513 } while (!signal_pending(current) && !rc);
515 if (rc == SIE_INTERCEPT_RERUNVCPU)
516 goto rerun_vcpu;
518 if (signal_pending(current) && !rc) {
519 kvm_run->exit_reason = KVM_EXIT_INTR;
520 rc = -EINTR;
523 if (rc == -EOPNOTSUPP) {
524 /* intercept cannot be handled in-kernel, prepare kvm-run */
525 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
526 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
527 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
528 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
529 rc = 0;
532 if (rc == -EREMOTE) {
533 /* intercept was handled, but userspace support is needed
534 * kvm_run has been prepared by the handler */
535 rc = 0;
538 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
539 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
541 if (vcpu->sigset_active)
542 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
544 vcpu->stat.exit_userspace++;
545 return rc;
548 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
549 unsigned long n, int prefix)
551 if (prefix)
552 return copy_to_guest(vcpu, guestdest, from, n);
553 else
554 return copy_to_guest_absolute(vcpu, guestdest, from, n);
558 * store status at address
559 * we use have two special cases:
560 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
561 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
563 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
565 unsigned char archmode = 1;
566 int prefix;
568 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
569 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
570 return -EFAULT;
571 addr = SAVE_AREA_BASE;
572 prefix = 0;
573 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
574 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
575 return -EFAULT;
576 addr = SAVE_AREA_BASE;
577 prefix = 1;
578 } else
579 prefix = 0;
581 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
582 vcpu->arch.guest_fpregs.fprs, 128, prefix))
583 return -EFAULT;
585 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
586 vcpu->arch.guest_gprs, 128, prefix))
587 return -EFAULT;
589 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
590 &vcpu->arch.sie_block->gpsw, 16, prefix))
591 return -EFAULT;
593 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
594 &vcpu->arch.sie_block->prefix, 4, prefix))
595 return -EFAULT;
597 if (__guestcopy(vcpu,
598 addr + offsetof(struct save_area, fp_ctrl_reg),
599 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
600 return -EFAULT;
602 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
603 &vcpu->arch.sie_block->todpr, 4, prefix))
604 return -EFAULT;
606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
607 &vcpu->arch.sie_block->cputm, 8, prefix))
608 return -EFAULT;
610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
611 &vcpu->arch.sie_block->ckc, 8, prefix))
612 return -EFAULT;
614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
615 &vcpu->arch.guest_acrs, 64, prefix))
616 return -EFAULT;
618 if (__guestcopy(vcpu,
619 addr + offsetof(struct save_area, ctrl_regs),
620 &vcpu->arch.sie_block->gcr, 128, prefix))
621 return -EFAULT;
622 return 0;
625 long kvm_arch_vcpu_ioctl(struct file *filp,
626 unsigned int ioctl, unsigned long arg)
628 struct kvm_vcpu *vcpu = filp->private_data;
629 void __user *argp = (void __user *)arg;
630 long r;
632 switch (ioctl) {
633 case KVM_S390_INTERRUPT: {
634 struct kvm_s390_interrupt s390int;
636 r = -EFAULT;
637 if (copy_from_user(&s390int, argp, sizeof(s390int)))
638 break;
639 r = kvm_s390_inject_vcpu(vcpu, &s390int);
640 break;
642 case KVM_S390_STORE_STATUS:
643 r = kvm_s390_vcpu_store_status(vcpu, arg);
644 break;
645 case KVM_S390_SET_INITIAL_PSW: {
646 psw_t psw;
648 r = -EFAULT;
649 if (copy_from_user(&psw, argp, sizeof(psw)))
650 break;
651 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
652 break;
654 case KVM_S390_INITIAL_RESET:
655 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
656 break;
657 default:
658 r = -EINVAL;
660 return r;
663 /* Section: memory related */
664 int kvm_arch_prepare_memory_region(struct kvm *kvm,
665 struct kvm_memory_slot *memslot,
666 struct kvm_memory_slot old,
667 struct kvm_userspace_memory_region *mem,
668 int user_alloc)
670 /* A few sanity checks. We can have exactly one memory slot which has
671 to start at guest virtual zero and which has to be located at a
672 page boundary in userland and which has to end at a page boundary.
673 The memory in userland is ok to be fragmented into various different
674 vmas. It is okay to mmap() and munmap() stuff in this slot after
675 doing this call at any time */
677 if (mem->slot)
678 return -EINVAL;
680 if (mem->guest_phys_addr)
681 return -EINVAL;
683 if (mem->userspace_addr & 0xffffful)
684 return -EINVAL;
686 if (mem->memory_size & 0xffffful)
687 return -EINVAL;
689 if (!user_alloc)
690 return -EINVAL;
692 return 0;
695 void kvm_arch_commit_memory_region(struct kvm *kvm,
696 struct kvm_userspace_memory_region *mem,
697 struct kvm_memory_slot old,
698 int user_alloc)
700 int rc;
703 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
704 mem->guest_phys_addr, mem->memory_size);
705 if (rc)
706 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
707 return;
710 void kvm_arch_flush_shadow(struct kvm *kvm)
714 static int __init kvm_s390_init(void)
716 int ret;
717 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
718 if (ret)
719 return ret;
722 * guests can ask for up to 255+1 double words, we need a full page
723 * to hold the maximum amount of facilities. On the other hand, we
724 * only set facilities that are known to work in KVM.
726 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
727 if (!facilities) {
728 kvm_exit();
729 return -ENOMEM;
731 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
732 facilities[0] &= 0xff00fff3f47c0000ULL;
733 facilities[1] &= 0x201c000000000000ULL;
734 return 0;
737 static void __exit kvm_s390_exit(void)
739 free_page((unsigned long) facilities);
740 kvm_exit();
743 module_init(kvm_s390_init);
744 module_exit(kvm_s390_exit);