acpi_pad: build only on X86
[linux-2.6/linux-acpi-2.6.git] / arch / s390 / kvm / kvm-s390.c
blob90d9d1ba258b0458b23df8892a20a792523f1b14
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/hrtimer.h>
19 #include <linux/init.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/timer.h>
25 #include <asm/lowcore.h>
26 #include <asm/pgtable.h>
27 #include <asm/nmi.h>
28 #include <asm/system.h>
29 #include "kvm-s390.h"
30 #include "gaccess.h"
32 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
34 struct kvm_stats_debugfs_item debugfs_entries[] = {
35 { "userspace_handled", VCPU_STAT(exit_userspace) },
36 { "exit_null", VCPU_STAT(exit_null) },
37 { "exit_validity", VCPU_STAT(exit_validity) },
38 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
39 { "exit_external_request", VCPU_STAT(exit_external_request) },
40 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
41 { "exit_instruction", VCPU_STAT(exit_instruction) },
42 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
43 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
44 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
45 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
46 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
47 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
48 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
49 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
50 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
51 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
52 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
53 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
54 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
55 { "instruction_spx", VCPU_STAT(instruction_spx) },
56 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
57 { "instruction_stap", VCPU_STAT(instruction_stap) },
58 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
59 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
60 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
61 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
62 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
63 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
64 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
65 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
66 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
67 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
68 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
69 { "diagnose_44", VCPU_STAT(diagnose_44) },
70 { NULL }
73 static unsigned long long *facilities;
75 /* Section: not file related */
76 void kvm_arch_hardware_enable(void *garbage)
78 /* every s390 is virtualization enabled ;-) */
81 void kvm_arch_hardware_disable(void *garbage)
85 int kvm_arch_hardware_setup(void)
87 return 0;
90 void kvm_arch_hardware_unsetup(void)
94 void kvm_arch_check_processor_compat(void *rtn)
98 int kvm_arch_init(void *opaque)
100 return 0;
103 void kvm_arch_exit(void)
107 /* Section: device related */
108 long kvm_arch_dev_ioctl(struct file *filp,
109 unsigned int ioctl, unsigned long arg)
111 if (ioctl == KVM_S390_ENABLE_SIE)
112 return s390_enable_sie();
113 return -EINVAL;
116 int kvm_dev_ioctl_check_extension(long ext)
118 switch (ext) {
119 default:
120 return 0;
124 /* Section: vm related */
126 * Get (and clear) the dirty memory log for a memory slot.
128 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
129 struct kvm_dirty_log *log)
131 return 0;
134 long kvm_arch_vm_ioctl(struct file *filp,
135 unsigned int ioctl, unsigned long arg)
137 struct kvm *kvm = filp->private_data;
138 void __user *argp = (void __user *)arg;
139 int r;
141 switch (ioctl) {
142 case KVM_S390_INTERRUPT: {
143 struct kvm_s390_interrupt s390int;
145 r = -EFAULT;
146 if (copy_from_user(&s390int, argp, sizeof(s390int)))
147 break;
148 r = kvm_s390_inject_vm(kvm, &s390int);
149 break;
151 default:
152 r = -EINVAL;
155 return r;
158 struct kvm *kvm_arch_create_vm(void)
160 struct kvm *kvm;
161 int rc;
162 char debug_name[16];
164 rc = s390_enable_sie();
165 if (rc)
166 goto out_nokvm;
168 rc = -ENOMEM;
169 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
170 if (!kvm)
171 goto out_nokvm;
173 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
174 if (!kvm->arch.sca)
175 goto out_nosca;
177 sprintf(debug_name, "kvm-%u", current->pid);
179 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
180 if (!kvm->arch.dbf)
181 goto out_nodbf;
183 spin_lock_init(&kvm->arch.float_int.lock);
184 INIT_LIST_HEAD(&kvm->arch.float_int.list);
186 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
187 VM_EVENT(kvm, 3, "%s", "vm created");
189 return kvm;
190 out_nodbf:
191 free_page((unsigned long)(kvm->arch.sca));
192 out_nosca:
193 kfree(kvm);
194 out_nokvm:
195 return ERR_PTR(rc);
198 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
200 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
201 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
202 (__u64) vcpu->arch.sie_block)
203 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
204 smp_mb();
205 free_page((unsigned long)(vcpu->arch.sie_block));
206 kvm_vcpu_uninit(vcpu);
207 kfree(vcpu);
210 static void kvm_free_vcpus(struct kvm *kvm)
212 unsigned int i;
214 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
215 if (kvm->vcpus[i]) {
216 kvm_arch_vcpu_destroy(kvm->vcpus[i]);
217 kvm->vcpus[i] = NULL;
222 void kvm_arch_sync_events(struct kvm *kvm)
226 void kvm_arch_destroy_vm(struct kvm *kvm)
228 kvm_free_vcpus(kvm);
229 kvm_free_physmem(kvm);
230 free_page((unsigned long)(kvm->arch.sca));
231 debug_unregister(kvm->arch.dbf);
232 kfree(kvm);
235 /* Section: vcpu related */
236 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
238 return 0;
241 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
243 /* Nothing todo */
246 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
248 save_fp_regs(&vcpu->arch.host_fpregs);
249 save_access_regs(vcpu->arch.host_acrs);
250 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
251 restore_fp_regs(&vcpu->arch.guest_fpregs);
252 restore_access_regs(vcpu->arch.guest_acrs);
255 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
257 save_fp_regs(&vcpu->arch.guest_fpregs);
258 save_access_regs(vcpu->arch.guest_acrs);
259 restore_fp_regs(&vcpu->arch.host_fpregs);
260 restore_access_regs(vcpu->arch.host_acrs);
263 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
265 /* this equals initial cpu reset in pop, but we don't switch to ESA */
266 vcpu->arch.sie_block->gpsw.mask = 0UL;
267 vcpu->arch.sie_block->gpsw.addr = 0UL;
268 vcpu->arch.sie_block->prefix = 0UL;
269 vcpu->arch.sie_block->ihcpu = 0xffff;
270 vcpu->arch.sie_block->cputm = 0UL;
271 vcpu->arch.sie_block->ckc = 0UL;
272 vcpu->arch.sie_block->todpr = 0;
273 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
274 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
275 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
276 vcpu->arch.guest_fpregs.fpc = 0;
277 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
278 vcpu->arch.sie_block->gbea = 1;
281 /* The current code can have up to 256 pages for virtio */
282 #define VIRTIODESCSPACE (256ul * 4096ul)
284 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
286 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
287 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
288 vcpu->kvm->arch.guest_origin +
289 VIRTIODESCSPACE - 1ul;
290 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
291 vcpu->arch.sie_block->ecb = 2;
292 vcpu->arch.sie_block->eca = 0xC1002001U;
293 vcpu->arch.sie_block->fac = (int) (long) facilities;
294 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
295 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
296 (unsigned long) vcpu);
297 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
298 get_cpu_id(&vcpu->arch.cpu_id);
299 vcpu->arch.cpu_id.version = 0xff;
300 return 0;
303 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
304 unsigned int id)
306 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
307 int rc = -ENOMEM;
309 if (!vcpu)
310 goto out_nomem;
312 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
313 get_zeroed_page(GFP_KERNEL);
315 if (!vcpu->arch.sie_block)
316 goto out_free_cpu;
318 vcpu->arch.sie_block->icpua = id;
319 BUG_ON(!kvm->arch.sca);
320 if (!kvm->arch.sca->cpu[id].sda)
321 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
322 else
323 BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
324 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
325 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
327 spin_lock_init(&vcpu->arch.local_int.lock);
328 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
329 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
330 spin_lock(&kvm->arch.float_int.lock);
331 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
332 init_waitqueue_head(&vcpu->arch.local_int.wq);
333 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
334 spin_unlock(&kvm->arch.float_int.lock);
336 rc = kvm_vcpu_init(vcpu, kvm, id);
337 if (rc)
338 goto out_free_cpu;
339 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
340 vcpu->arch.sie_block);
342 return vcpu;
343 out_free_cpu:
344 kfree(vcpu);
345 out_nomem:
346 return ERR_PTR(rc);
349 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
351 /* kvm common code refers to this, but never calls it */
352 BUG();
353 return 0;
356 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
358 vcpu_load(vcpu);
359 kvm_s390_vcpu_initial_reset(vcpu);
360 vcpu_put(vcpu);
361 return 0;
364 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
366 vcpu_load(vcpu);
367 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
368 vcpu_put(vcpu);
369 return 0;
372 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
374 vcpu_load(vcpu);
375 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
376 vcpu_put(vcpu);
377 return 0;
380 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
381 struct kvm_sregs *sregs)
383 vcpu_load(vcpu);
384 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
385 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
386 vcpu_put(vcpu);
387 return 0;
390 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
391 struct kvm_sregs *sregs)
393 vcpu_load(vcpu);
394 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
395 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
396 vcpu_put(vcpu);
397 return 0;
400 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
402 vcpu_load(vcpu);
403 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
404 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
405 vcpu_put(vcpu);
406 return 0;
409 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
411 vcpu_load(vcpu);
412 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
413 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
414 vcpu_put(vcpu);
415 return 0;
418 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
420 int rc = 0;
422 vcpu_load(vcpu);
423 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
424 rc = -EBUSY;
425 else
426 vcpu->arch.sie_block->gpsw = psw;
427 vcpu_put(vcpu);
428 return rc;
431 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
432 struct kvm_translation *tr)
434 return -EINVAL; /* not implemented yet */
437 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
438 struct kvm_guest_debug *dbg)
440 return -EINVAL; /* not implemented yet */
443 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
444 struct kvm_mp_state *mp_state)
446 return -EINVAL; /* not implemented yet */
449 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
450 struct kvm_mp_state *mp_state)
452 return -EINVAL; /* not implemented yet */
455 static void __vcpu_run(struct kvm_vcpu *vcpu)
457 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
459 if (need_resched())
460 schedule();
462 if (test_thread_flag(TIF_MCCK_PENDING))
463 s390_handle_mcck();
465 kvm_s390_deliver_pending_interrupts(vcpu);
467 vcpu->arch.sie_block->icptcode = 0;
468 local_irq_disable();
469 kvm_guest_enter();
470 local_irq_enable();
471 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
472 atomic_read(&vcpu->arch.sie_block->cpuflags));
473 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
474 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
475 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
477 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
478 vcpu->arch.sie_block->icptcode);
479 local_irq_disable();
480 kvm_guest_exit();
481 local_irq_enable();
483 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
486 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
488 int rc;
489 sigset_t sigsaved;
491 vcpu_load(vcpu);
493 /* verify, that memory has been registered */
494 if (!vcpu->kvm->arch.guest_memsize) {
495 vcpu_put(vcpu);
496 return -EINVAL;
499 if (vcpu->sigset_active)
500 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
502 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
504 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
506 switch (kvm_run->exit_reason) {
507 case KVM_EXIT_S390_SIEIC:
508 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
509 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
510 break;
511 case KVM_EXIT_UNKNOWN:
512 case KVM_EXIT_S390_RESET:
513 break;
514 default:
515 BUG();
518 might_fault();
520 do {
521 __vcpu_run(vcpu);
522 rc = kvm_handle_sie_intercept(vcpu);
523 } while (!signal_pending(current) && !rc);
525 if (signal_pending(current) && !rc)
526 rc = -EINTR;
528 if (rc == -ENOTSUPP) {
529 /* intercept cannot be handled in-kernel, prepare kvm-run */
530 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
531 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
532 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
533 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
534 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
535 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
536 rc = 0;
539 if (rc == -EREMOTE) {
540 /* intercept was handled, but userspace support is needed
541 * kvm_run has been prepared by the handler */
542 rc = 0;
545 if (vcpu->sigset_active)
546 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
548 vcpu_put(vcpu);
550 vcpu->stat.exit_userspace++;
551 return rc;
554 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
555 unsigned long n, int prefix)
557 if (prefix)
558 return copy_to_guest(vcpu, guestdest, from, n);
559 else
560 return copy_to_guest_absolute(vcpu, guestdest, from, n);
564 * store status at address
565 * we use have two special cases:
566 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
567 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
569 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
571 const unsigned char archmode = 1;
572 int prefix;
574 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
575 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
576 return -EFAULT;
577 addr = SAVE_AREA_BASE;
578 prefix = 0;
579 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
580 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
581 return -EFAULT;
582 addr = SAVE_AREA_BASE;
583 prefix = 1;
584 } else
585 prefix = 0;
587 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
588 vcpu->arch.guest_fpregs.fprs, 128, prefix))
589 return -EFAULT;
591 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
592 vcpu->arch.guest_gprs, 128, prefix))
593 return -EFAULT;
595 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
596 &vcpu->arch.sie_block->gpsw, 16, prefix))
597 return -EFAULT;
599 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
600 &vcpu->arch.sie_block->prefix, 4, prefix))
601 return -EFAULT;
603 if (__guestcopy(vcpu,
604 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
605 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
606 return -EFAULT;
608 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
609 &vcpu->arch.sie_block->todpr, 4, prefix))
610 return -EFAULT;
612 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
613 &vcpu->arch.sie_block->cputm, 8, prefix))
614 return -EFAULT;
616 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
617 &vcpu->arch.sie_block->ckc, 8, prefix))
618 return -EFAULT;
620 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
621 &vcpu->arch.guest_acrs, 64, prefix))
622 return -EFAULT;
624 if (__guestcopy(vcpu,
625 addr + offsetof(struct save_area_s390x, ctrl_regs),
626 &vcpu->arch.sie_block->gcr, 128, prefix))
627 return -EFAULT;
628 return 0;
631 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
633 int rc;
635 vcpu_load(vcpu);
636 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
637 vcpu_put(vcpu);
638 return rc;
641 long kvm_arch_vcpu_ioctl(struct file *filp,
642 unsigned int ioctl, unsigned long arg)
644 struct kvm_vcpu *vcpu = filp->private_data;
645 void __user *argp = (void __user *)arg;
647 switch (ioctl) {
648 case KVM_S390_INTERRUPT: {
649 struct kvm_s390_interrupt s390int;
651 if (copy_from_user(&s390int, argp, sizeof(s390int)))
652 return -EFAULT;
653 return kvm_s390_inject_vcpu(vcpu, &s390int);
655 case KVM_S390_STORE_STATUS:
656 return kvm_s390_vcpu_store_status(vcpu, arg);
657 case KVM_S390_SET_INITIAL_PSW: {
658 psw_t psw;
660 if (copy_from_user(&psw, argp, sizeof(psw)))
661 return -EFAULT;
662 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
664 case KVM_S390_INITIAL_RESET:
665 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
666 default:
669 return -EINVAL;
672 /* Section: memory related */
673 int kvm_arch_set_memory_region(struct kvm *kvm,
674 struct kvm_userspace_memory_region *mem,
675 struct kvm_memory_slot old,
676 int user_alloc)
678 int i;
680 /* A few sanity checks. We can have exactly one memory slot which has
681 to start at guest virtual zero and which has to be located at a
682 page boundary in userland and which has to end at a page boundary.
683 The memory in userland is ok to be fragmented into various different
684 vmas. It is okay to mmap() and munmap() stuff in this slot after
685 doing this call at any time */
687 if (mem->slot || kvm->arch.guest_memsize)
688 return -EINVAL;
690 if (mem->guest_phys_addr)
691 return -EINVAL;
693 if (mem->userspace_addr & (PAGE_SIZE - 1))
694 return -EINVAL;
696 if (mem->memory_size & (PAGE_SIZE - 1))
697 return -EINVAL;
699 if (!user_alloc)
700 return -EINVAL;
702 /* lock all vcpus */
703 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
704 if (!kvm->vcpus[i])
705 continue;
706 if (!mutex_trylock(&kvm->vcpus[i]->mutex))
707 goto fail_out;
710 kvm->arch.guest_origin = mem->userspace_addr;
711 kvm->arch.guest_memsize = mem->memory_size;
713 /* update sie control blocks, and unlock all vcpus */
714 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
715 if (kvm->vcpus[i]) {
716 kvm->vcpus[i]->arch.sie_block->gmsor =
717 kvm->arch.guest_origin;
718 kvm->vcpus[i]->arch.sie_block->gmslm =
719 kvm->arch.guest_memsize +
720 kvm->arch.guest_origin +
721 VIRTIODESCSPACE - 1ul;
722 mutex_unlock(&kvm->vcpus[i]->mutex);
726 return 0;
728 fail_out:
729 for (; i >= 0; i--)
730 mutex_unlock(&kvm->vcpus[i]->mutex);
731 return -EINVAL;
734 void kvm_arch_flush_shadow(struct kvm *kvm)
738 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
740 return gfn;
743 static int __init kvm_s390_init(void)
745 int ret;
746 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
747 if (ret)
748 return ret;
751 * guests can ask for up to 255+1 double words, we need a full page
752 * to hold the maximum amount of facilites. On the other hand, we
753 * only set facilities that are known to work in KVM.
755 facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
756 if (!facilities) {
757 kvm_exit();
758 return -ENOMEM;
760 stfle(facilities, 1);
761 facilities[0] &= 0xff00fff3f0700000ULL;
762 return 0;
765 static void __exit kvm_s390_exit(void)
767 free_page((unsigned long) facilities);
768 kvm_exit();
771 module_init(kvm_s390_init);
772 module_exit(kvm_s390_exit);