initial commit with v2.6.32.60
[linux-2.6.32.60-moxart.git] / arch / s390 / kvm / kvm-s390.c
blob693dee728ce4b023d84bd5f1c188784aed096d22
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/lowcore.h>
27 #include <asm/pgtable.h>
28 #include <asm/nmi.h>
29 #include <asm/system.h>
30 #include "kvm-s390.h"
31 #include "gaccess.h"
33 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35 struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "userspace_handled", VCPU_STAT(exit_userspace) },
37 { "exit_null", VCPU_STAT(exit_null) },
38 { "exit_validity", VCPU_STAT(exit_validity) },
39 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
40 { "exit_external_request", VCPU_STAT(exit_external_request) },
41 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
42 { "exit_instruction", VCPU_STAT(exit_instruction) },
43 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
44 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
45 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
46 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
47 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
48 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
49 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
50 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
51 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
52 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
53 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
54 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
55 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
56 { "instruction_spx", VCPU_STAT(instruction_spx) },
57 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
58 { "instruction_stap", VCPU_STAT(instruction_stap) },
59 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
60 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
61 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
62 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
63 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
64 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
65 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
66 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
67 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
68 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
69 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
70 { "diagnose_44", VCPU_STAT(diagnose_44) },
71 { NULL }
74 static unsigned long long *facilities;
76 /* Section: not file related */
77 void kvm_arch_hardware_enable(void *garbage)
79 /* every s390 is virtualization enabled ;-) */
82 void kvm_arch_hardware_disable(void *garbage)
86 int kvm_arch_hardware_setup(void)
88 return 0;
91 void kvm_arch_hardware_unsetup(void)
95 void kvm_arch_check_processor_compat(void *rtn)
99 int kvm_arch_init(void *opaque)
101 return 0;
104 void kvm_arch_exit(void)
108 /* Section: device related */
109 long kvm_arch_dev_ioctl(struct file *filp,
110 unsigned int ioctl, unsigned long arg)
112 if (ioctl == KVM_S390_ENABLE_SIE)
113 return s390_enable_sie();
114 return -EINVAL;
117 int kvm_dev_ioctl_check_extension(long ext)
119 int r;
121 switch (ext) {
122 case KVM_CAP_S390_PSW:
123 r = 1;
124 break;
125 default:
126 r = 0;
128 return r;
131 /* Section: vm related */
133 * Get (and clear) the dirty memory log for a memory slot.
135 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
136 struct kvm_dirty_log *log)
138 return 0;
141 long kvm_arch_vm_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
144 struct kvm *kvm = filp->private_data;
145 void __user *argp = (void __user *)arg;
146 int r;
148 switch (ioctl) {
149 case KVM_S390_INTERRUPT: {
150 struct kvm_s390_interrupt s390int;
152 r = -EFAULT;
153 if (copy_from_user(&s390int, argp, sizeof(s390int)))
154 break;
155 r = kvm_s390_inject_vm(kvm, &s390int);
156 break;
158 default:
159 r = -EINVAL;
162 return r;
165 struct kvm *kvm_arch_create_vm(void)
167 struct kvm *kvm;
168 int rc;
169 char debug_name[16];
171 rc = s390_enable_sie();
172 if (rc)
173 goto out_nokvm;
175 rc = -ENOMEM;
176 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
177 if (!kvm)
178 goto out_nokvm;
180 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
181 if (!kvm->arch.sca)
182 goto out_nosca;
184 sprintf(debug_name, "kvm-%u", current->pid);
186 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
187 if (!kvm->arch.dbf)
188 goto out_nodbf;
190 spin_lock_init(&kvm->arch.float_int.lock);
191 INIT_LIST_HEAD(&kvm->arch.float_int.list);
193 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
194 VM_EVENT(kvm, 3, "%s", "vm created");
196 return kvm;
197 out_nodbf:
198 free_page((unsigned long)(kvm->arch.sca));
199 out_nosca:
200 kfree(kvm);
201 out_nokvm:
202 return ERR_PTR(rc);
205 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
207 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
208 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
209 (__u64) vcpu->arch.sie_block)
210 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
211 smp_mb();
212 free_page((unsigned long)(vcpu->arch.sie_block));
213 kvm_vcpu_uninit(vcpu);
214 kfree(vcpu);
217 static void kvm_free_vcpus(struct kvm *kvm)
219 unsigned int i;
220 struct kvm_vcpu *vcpu;
222 kvm_for_each_vcpu(i, vcpu, kvm)
223 kvm_arch_vcpu_destroy(vcpu);
225 mutex_lock(&kvm->lock);
226 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
227 kvm->vcpus[i] = NULL;
229 atomic_set(&kvm->online_vcpus, 0);
230 mutex_unlock(&kvm->lock);
233 void kvm_arch_sync_events(struct kvm *kvm)
237 void kvm_arch_destroy_vm(struct kvm *kvm)
239 kvm_free_vcpus(kvm);
240 kvm_free_physmem(kvm);
241 free_page((unsigned long)(kvm->arch.sca));
242 debug_unregister(kvm->arch.dbf);
243 kfree(kvm);
246 /* Section: vcpu related */
247 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
249 return 0;
252 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
254 /* Nothing todo */
257 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
259 save_fp_regs(&vcpu->arch.host_fpregs);
260 save_access_regs(vcpu->arch.host_acrs);
261 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
262 restore_fp_regs(&vcpu->arch.guest_fpregs);
263 restore_access_regs(vcpu->arch.guest_acrs);
266 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
268 save_fp_regs(&vcpu->arch.guest_fpregs);
269 save_access_regs(vcpu->arch.guest_acrs);
270 restore_fp_regs(&vcpu->arch.host_fpregs);
271 restore_access_regs(vcpu->arch.host_acrs);
274 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
276 /* this equals initial cpu reset in pop, but we don't switch to ESA */
277 vcpu->arch.sie_block->gpsw.mask = 0UL;
278 vcpu->arch.sie_block->gpsw.addr = 0UL;
279 vcpu->arch.sie_block->prefix = 0UL;
280 vcpu->arch.sie_block->ihcpu = 0xffff;
281 vcpu->arch.sie_block->cputm = 0UL;
282 vcpu->arch.sie_block->ckc = 0UL;
283 vcpu->arch.sie_block->todpr = 0;
284 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
285 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
286 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
287 vcpu->arch.guest_fpregs.fpc = 0;
288 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
289 vcpu->arch.sie_block->gbea = 1;
292 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
294 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
295 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
296 vcpu->arch.sie_block->ecb = 2;
297 vcpu->arch.sie_block->eca = 0xC1002001U;
298 vcpu->arch.sie_block->fac = (int) (long) facilities;
299 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
300 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
301 (unsigned long) vcpu);
302 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
303 get_cpu_id(&vcpu->arch.cpu_id);
304 vcpu->arch.cpu_id.version = 0xff;
305 return 0;
308 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
309 unsigned int id)
311 struct kvm_vcpu *vcpu;
312 int rc = -EINVAL;
314 if (id >= KVM_MAX_VCPUS)
315 goto out;
317 rc = -ENOMEM;
319 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
320 if (!vcpu)
321 goto out;
323 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
324 get_zeroed_page(GFP_KERNEL);
326 if (!vcpu->arch.sie_block)
327 goto out_free_cpu;
329 vcpu->arch.sie_block->icpua = id;
330 BUG_ON(!kvm->arch.sca);
331 if (!kvm->arch.sca->cpu[id].sda)
332 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
333 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
334 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
336 spin_lock_init(&vcpu->arch.local_int.lock);
337 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
338 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
339 spin_lock(&kvm->arch.float_int.lock);
340 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
341 init_waitqueue_head(&vcpu->arch.local_int.wq);
342 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
343 spin_unlock(&kvm->arch.float_int.lock);
345 rc = kvm_vcpu_init(vcpu, kvm, id);
346 if (rc)
347 goto out_free_sie_block;
348 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
349 vcpu->arch.sie_block);
351 return vcpu;
352 out_free_sie_block:
353 free_page((unsigned long)(vcpu->arch.sie_block));
354 out_free_cpu:
355 kfree(vcpu);
356 out:
357 return ERR_PTR(rc);
360 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
362 /* kvm common code refers to this, but never calls it */
363 BUG();
364 return 0;
367 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
369 vcpu_load(vcpu);
370 kvm_s390_vcpu_initial_reset(vcpu);
371 vcpu_put(vcpu);
372 return 0;
375 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
377 vcpu_load(vcpu);
378 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
379 vcpu_put(vcpu);
380 return 0;
383 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
385 vcpu_load(vcpu);
386 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
387 vcpu_put(vcpu);
388 return 0;
391 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
392 struct kvm_sregs *sregs)
394 vcpu_load(vcpu);
395 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
396 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
397 vcpu_put(vcpu);
398 return 0;
401 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
402 struct kvm_sregs *sregs)
404 vcpu_load(vcpu);
405 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
406 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
407 vcpu_put(vcpu);
408 return 0;
411 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
413 vcpu_load(vcpu);
414 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
415 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
416 vcpu_put(vcpu);
417 return 0;
420 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
422 vcpu_load(vcpu);
423 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
424 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
425 vcpu_put(vcpu);
426 return 0;
429 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
431 int rc = 0;
433 vcpu_load(vcpu);
434 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
435 rc = -EBUSY;
436 else {
437 vcpu->run->psw_mask = psw.mask;
438 vcpu->run->psw_addr = psw.addr;
440 vcpu_put(vcpu);
441 return rc;
444 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
445 struct kvm_translation *tr)
447 return -EINVAL; /* not implemented yet */
450 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
451 struct kvm_guest_debug *dbg)
453 return -EINVAL; /* not implemented yet */
456 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
457 struct kvm_mp_state *mp_state)
459 return -EINVAL; /* not implemented yet */
462 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
463 struct kvm_mp_state *mp_state)
465 return -EINVAL; /* not implemented yet */
468 static void __vcpu_run(struct kvm_vcpu *vcpu)
470 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
472 if (need_resched())
473 schedule();
475 if (test_thread_flag(TIF_MCCK_PENDING))
476 s390_handle_mcck();
478 kvm_s390_deliver_pending_interrupts(vcpu);
480 vcpu->arch.sie_block->icptcode = 0;
481 local_irq_disable();
482 kvm_guest_enter();
483 local_irq_enable();
484 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
485 atomic_read(&vcpu->arch.sie_block->cpuflags));
486 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
487 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
488 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
490 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
491 vcpu->arch.sie_block->icptcode);
492 local_irq_disable();
493 kvm_guest_exit();
494 local_irq_enable();
496 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
499 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
501 int rc;
502 sigset_t sigsaved;
504 vcpu_load(vcpu);
506 rerun_vcpu:
507 if (vcpu->requests)
508 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
509 kvm_s390_vcpu_set_mem(vcpu);
511 /* verify, that memory has been registered */
512 if (!vcpu->arch.sie_block->gmslm) {
513 vcpu_put(vcpu);
514 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
515 return -EINVAL;
518 if (vcpu->sigset_active)
519 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
521 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
523 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
525 switch (kvm_run->exit_reason) {
526 case KVM_EXIT_S390_SIEIC:
527 case KVM_EXIT_UNKNOWN:
528 case KVM_EXIT_INTR:
529 case KVM_EXIT_S390_RESET:
530 break;
531 default:
532 BUG();
535 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
536 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
538 might_fault();
540 do {
541 __vcpu_run(vcpu);
542 rc = kvm_handle_sie_intercept(vcpu);
543 } while (!signal_pending(current) && !rc);
545 if (rc == SIE_INTERCEPT_RERUNVCPU)
546 goto rerun_vcpu;
548 if (signal_pending(current) && !rc) {
549 kvm_run->exit_reason = KVM_EXIT_INTR;
550 rc = -EINTR;
553 if (rc == -ENOTSUPP) {
554 /* intercept cannot be handled in-kernel, prepare kvm-run */
555 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
556 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
557 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
558 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
559 rc = 0;
562 if (rc == -EREMOTE) {
563 /* intercept was handled, but userspace support is needed
564 * kvm_run has been prepared by the handler */
565 rc = 0;
568 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
569 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
571 if (vcpu->sigset_active)
572 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
574 vcpu_put(vcpu);
576 vcpu->stat.exit_userspace++;
577 return rc;
580 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
581 unsigned long n, int prefix)
583 if (prefix)
584 return copy_to_guest(vcpu, guestdest, from, n);
585 else
586 return copy_to_guest_absolute(vcpu, guestdest, from, n);
590 * store status at address
591 * we use have two special cases:
592 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
593 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
595 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
597 const unsigned char archmode = 1;
598 int prefix;
600 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
601 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
602 return -EFAULT;
603 addr = SAVE_AREA_BASE;
604 prefix = 0;
605 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
606 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
607 return -EFAULT;
608 addr = SAVE_AREA_BASE;
609 prefix = 1;
610 } else
611 prefix = 0;
613 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
614 vcpu->arch.guest_fpregs.fprs, 128, prefix))
615 return -EFAULT;
617 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
618 vcpu->arch.guest_gprs, 128, prefix))
619 return -EFAULT;
621 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
622 &vcpu->arch.sie_block->gpsw, 16, prefix))
623 return -EFAULT;
625 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
626 &vcpu->arch.sie_block->prefix, 4, prefix))
627 return -EFAULT;
629 if (__guestcopy(vcpu,
630 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
631 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
632 return -EFAULT;
634 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
635 &vcpu->arch.sie_block->todpr, 4, prefix))
636 return -EFAULT;
638 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
639 &vcpu->arch.sie_block->cputm, 8, prefix))
640 return -EFAULT;
642 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
643 &vcpu->arch.sie_block->ckc, 8, prefix))
644 return -EFAULT;
646 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
647 &vcpu->arch.guest_acrs, 64, prefix))
648 return -EFAULT;
650 if (__guestcopy(vcpu,
651 addr + offsetof(struct save_area_s390x, ctrl_regs),
652 &vcpu->arch.sie_block->gcr, 128, prefix))
653 return -EFAULT;
654 return 0;
657 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
659 int rc;
661 vcpu_load(vcpu);
662 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
663 vcpu_put(vcpu);
664 return rc;
667 long kvm_arch_vcpu_ioctl(struct file *filp,
668 unsigned int ioctl, unsigned long arg)
670 struct kvm_vcpu *vcpu = filp->private_data;
671 void __user *argp = (void __user *)arg;
673 switch (ioctl) {
674 case KVM_S390_INTERRUPT: {
675 struct kvm_s390_interrupt s390int;
677 if (copy_from_user(&s390int, argp, sizeof(s390int)))
678 return -EFAULT;
679 return kvm_s390_inject_vcpu(vcpu, &s390int);
681 case KVM_S390_STORE_STATUS:
682 return kvm_s390_vcpu_store_status(vcpu, arg);
683 case KVM_S390_SET_INITIAL_PSW: {
684 psw_t psw;
686 if (copy_from_user(&psw, argp, sizeof(psw)))
687 return -EFAULT;
688 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
690 case KVM_S390_INITIAL_RESET:
691 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
692 default:
695 return -EINVAL;
698 /* Section: memory related */
699 int kvm_arch_set_memory_region(struct kvm *kvm,
700 struct kvm_userspace_memory_region *mem,
701 struct kvm_memory_slot old,
702 int user_alloc)
704 int i;
705 struct kvm_vcpu *vcpu;
707 /* A few sanity checks. We can have exactly one memory slot which has
708 to start at guest virtual zero and which has to be located at a
709 page boundary in userland and which has to end at a page boundary.
710 The memory in userland is ok to be fragmented into various different
711 vmas. It is okay to mmap() and munmap() stuff in this slot after
712 doing this call at any time */
714 if (mem->slot)
715 return -EINVAL;
717 if (mem->guest_phys_addr)
718 return -EINVAL;
720 if (mem->userspace_addr & (PAGE_SIZE - 1))
721 return -EINVAL;
723 if (mem->memory_size & (PAGE_SIZE - 1))
724 return -EINVAL;
726 if (!user_alloc)
727 return -EINVAL;
729 /* request update of sie control block for all available vcpus */
730 kvm_for_each_vcpu(i, vcpu, kvm) {
731 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
732 continue;
733 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
736 return 0;
739 void kvm_arch_flush_shadow(struct kvm *kvm)
743 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
745 return gfn;
748 static int __init kvm_s390_init(void)
750 int ret;
751 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
752 if (ret)
753 return ret;
756 * guests can ask for up to 255+1 double words, we need a full page
757 * to hold the maximum amount of facilites. On the other hand, we
758 * only set facilities that are known to work in KVM.
760 facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
761 if (!facilities) {
762 kvm_exit();
763 return -ENOMEM;
765 stfle(facilities, 1);
766 facilities[0] &= 0xff00fff3f0700000ULL;
767 return 0;
770 static void __exit kvm_s390_exit(void)
772 free_page((unsigned long) facilities);
773 kvm_exit();
776 module_init(kvm_s390_init);
777 module_exit(kvm_s390_exit);