2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
35 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
37 struct kvm_stats_debugfs_item debugfs_entries
[] = {
38 { "userspace_handled", VCPU_STAT(exit_userspace
) },
39 { "exit_null", VCPU_STAT(exit_null
) },
40 { "exit_validity", VCPU_STAT(exit_validity
) },
41 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
42 { "exit_external_request", VCPU_STAT(exit_external_request
) },
43 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
44 { "exit_instruction", VCPU_STAT(exit_instruction
) },
45 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
46 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
47 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
48 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
49 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
50 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
51 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
52 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
53 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
54 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
55 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
56 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
57 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
58 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
59 { "instruction_spx", VCPU_STAT(instruction_spx
) },
60 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
61 { "instruction_stap", VCPU_STAT(instruction_stap
) },
62 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
63 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
64 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
65 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
66 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
67 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
68 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
69 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
70 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
71 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
72 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
73 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
74 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
75 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
76 { "diagnose_10", VCPU_STAT(diagnose_10
) },
77 { "diagnose_44", VCPU_STAT(diagnose_44
) },
78 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
82 static unsigned long long *facilities
;
84 /* Section: not file related */
85 int kvm_arch_hardware_enable(void *garbage
)
87 /* every s390 is virtualization enabled ;-) */
91 void kvm_arch_hardware_disable(void *garbage
)
95 int kvm_arch_hardware_setup(void)
100 void kvm_arch_hardware_unsetup(void)
104 void kvm_arch_check_processor_compat(void *rtn
)
108 int kvm_arch_init(void *opaque
)
113 void kvm_arch_exit(void)
117 /* Section: device related */
118 long kvm_arch_dev_ioctl(struct file
*filp
,
119 unsigned int ioctl
, unsigned long arg
)
121 if (ioctl
== KVM_S390_ENABLE_SIE
)
122 return s390_enable_sie();
126 int kvm_dev_ioctl_check_extension(long ext
)
131 case KVM_CAP_S390_PSW
:
132 case KVM_CAP_S390_GMAP
:
133 case KVM_CAP_SYNC_MMU
:
134 #ifdef CONFIG_KVM_S390_UCONTROL
135 case KVM_CAP_S390_UCONTROL
:
137 case KVM_CAP_SYNC_REGS
:
138 case KVM_CAP_ONE_REG
:
141 case KVM_CAP_NR_VCPUS
:
142 case KVM_CAP_MAX_VCPUS
:
145 case KVM_CAP_S390_COW
:
146 r
= sclp_get_fac85() & 0x2;
154 /* Section: vm related */
156 * Get (and clear) the dirty memory log for a memory slot.
158 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
159 struct kvm_dirty_log
*log
)
164 long kvm_arch_vm_ioctl(struct file
*filp
,
165 unsigned int ioctl
, unsigned long arg
)
167 struct kvm
*kvm
= filp
->private_data
;
168 void __user
*argp
= (void __user
*)arg
;
172 case KVM_S390_INTERRUPT
: {
173 struct kvm_s390_interrupt s390int
;
176 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
178 r
= kvm_s390_inject_vm(kvm
, &s390int
);
188 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
194 #ifdef CONFIG_KVM_S390_UCONTROL
195 if (type
& ~KVM_VM_S390_UCONTROL
)
197 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
204 rc
= s390_enable_sie();
210 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
214 sprintf(debug_name
, "kvm-%u", current
->pid
);
216 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
220 spin_lock_init(&kvm
->arch
.float_int
.lock
);
221 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
223 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
224 VM_EVENT(kvm
, 3, "%s", "vm created");
226 if (type
& KVM_VM_S390_UCONTROL
) {
227 kvm
->arch
.gmap
= NULL
;
229 kvm
->arch
.gmap
= gmap_alloc(current
->mm
);
235 debug_unregister(kvm
->arch
.dbf
);
237 free_page((unsigned long)(kvm
->arch
.sca
));
242 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
244 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
245 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
246 clear_bit(63 - vcpu
->vcpu_id
,
247 (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
248 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
249 (__u64
) vcpu
->arch
.sie_block
)
250 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
254 if (kvm_is_ucontrol(vcpu
->kvm
))
255 gmap_free(vcpu
->arch
.gmap
);
257 free_page((unsigned long)(vcpu
->arch
.sie_block
));
258 kvm_vcpu_uninit(vcpu
);
262 static void kvm_free_vcpus(struct kvm
*kvm
)
265 struct kvm_vcpu
*vcpu
;
267 kvm_for_each_vcpu(i
, vcpu
, kvm
)
268 kvm_arch_vcpu_destroy(vcpu
);
270 mutex_lock(&kvm
->lock
);
271 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
272 kvm
->vcpus
[i
] = NULL
;
274 atomic_set(&kvm
->online_vcpus
, 0);
275 mutex_unlock(&kvm
->lock
);
278 void kvm_arch_sync_events(struct kvm
*kvm
)
282 void kvm_arch_destroy_vm(struct kvm
*kvm
)
285 free_page((unsigned long)(kvm
->arch
.sca
));
286 debug_unregister(kvm
->arch
.dbf
);
287 if (!kvm_is_ucontrol(kvm
))
288 gmap_free(kvm
->arch
.gmap
);
291 /* Section: vcpu related */
292 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
294 if (kvm_is_ucontrol(vcpu
->kvm
)) {
295 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
);
296 if (!vcpu
->arch
.gmap
)
301 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
302 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
309 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
314 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
316 save_fp_regs(&vcpu
->arch
.host_fpregs
);
317 save_access_regs(vcpu
->arch
.host_acrs
);
318 vcpu
->arch
.guest_fpregs
.fpc
&= FPC_VALID_MASK
;
319 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
320 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
321 gmap_enable(vcpu
->arch
.gmap
);
322 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
325 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
327 atomic_clear_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
328 gmap_disable(vcpu
->arch
.gmap
);
329 save_fp_regs(&vcpu
->arch
.guest_fpregs
);
330 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
331 restore_fp_regs(&vcpu
->arch
.host_fpregs
);
332 restore_access_regs(vcpu
->arch
.host_acrs
);
335 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
337 /* this equals initial cpu reset in pop, but we don't switch to ESA */
338 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
339 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
340 kvm_s390_set_prefix(vcpu
, 0);
341 vcpu
->arch
.sie_block
->cputm
= 0UL;
342 vcpu
->arch
.sie_block
->ckc
= 0UL;
343 vcpu
->arch
.sie_block
->todpr
= 0;
344 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
345 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
346 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
347 vcpu
->arch
.guest_fpregs
.fpc
= 0;
348 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
349 vcpu
->arch
.sie_block
->gbea
= 1;
350 atomic_set_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
353 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
355 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
358 vcpu
->arch
.sie_block
->ecb
= 6;
359 vcpu
->arch
.sie_block
->eca
= 0xC1002001U
;
360 vcpu
->arch
.sie_block
->fac
= (int) (long) facilities
;
361 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
362 tasklet_init(&vcpu
->arch
.tasklet
, kvm_s390_tasklet
,
363 (unsigned long) vcpu
);
364 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
365 get_cpu_id(&vcpu
->arch
.cpu_id
);
366 vcpu
->arch
.cpu_id
.version
= 0xff;
370 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
373 struct kvm_vcpu
*vcpu
;
376 if (id
>= KVM_MAX_VCPUS
)
381 vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
385 vcpu
->arch
.sie_block
= (struct kvm_s390_sie_block
*)
386 get_zeroed_page(GFP_KERNEL
);
388 if (!vcpu
->arch
.sie_block
)
391 vcpu
->arch
.sie_block
->icpua
= id
;
392 if (!kvm_is_ucontrol(kvm
)) {
393 if (!kvm
->arch
.sca
) {
397 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
398 kvm
->arch
.sca
->cpu
[id
].sda
=
399 (__u64
) vcpu
->arch
.sie_block
;
400 vcpu
->arch
.sie_block
->scaoh
=
401 (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
402 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
403 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
406 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
407 INIT_LIST_HEAD(&vcpu
->arch
.local_int
.list
);
408 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
409 spin_lock(&kvm
->arch
.float_int
.lock
);
410 kvm
->arch
.float_int
.local_int
[id
] = &vcpu
->arch
.local_int
;
411 init_waitqueue_head(&vcpu
->arch
.local_int
.wq
);
412 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
413 spin_unlock(&kvm
->arch
.float_int
.lock
);
415 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
417 goto out_free_sie_block
;
418 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
419 vcpu
->arch
.sie_block
);
423 free_page((unsigned long)(vcpu
->arch
.sie_block
));
430 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
432 /* kvm common code refers to this, but never calls it */
437 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
439 /* kvm common code refers to this, but never calls it */
444 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
445 struct kvm_one_reg
*reg
)
450 case KVM_REG_S390_TODPR
:
451 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
452 (u32 __user
*)reg
->addr
);
454 case KVM_REG_S390_EPOCHDIFF
:
455 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
456 (u64 __user
*)reg
->addr
);
458 case KVM_REG_S390_CPU_TIMER
:
459 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
460 (u64 __user
*)reg
->addr
);
462 case KVM_REG_S390_CLOCK_COMP
:
463 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
464 (u64 __user
*)reg
->addr
);
473 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
474 struct kvm_one_reg
*reg
)
479 case KVM_REG_S390_TODPR
:
480 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
481 (u32 __user
*)reg
->addr
);
483 case KVM_REG_S390_EPOCHDIFF
:
484 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
485 (u64 __user
*)reg
->addr
);
487 case KVM_REG_S390_CPU_TIMER
:
488 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
489 (u64 __user
*)reg
->addr
);
491 case KVM_REG_S390_CLOCK_COMP
:
492 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
493 (u64 __user
*)reg
->addr
);
502 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
504 kvm_s390_vcpu_initial_reset(vcpu
);
508 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
510 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
514 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
516 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
520 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
521 struct kvm_sregs
*sregs
)
523 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
524 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
525 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
529 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
530 struct kvm_sregs
*sregs
)
532 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
533 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
537 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
539 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
540 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
& FPC_VALID_MASK
;
541 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
545 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
547 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
548 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
552 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
556 if (!(atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_STOPPED
))
559 vcpu
->run
->psw_mask
= psw
.mask
;
560 vcpu
->run
->psw_addr
= psw
.addr
;
565 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
566 struct kvm_translation
*tr
)
568 return -EINVAL
; /* not implemented yet */
571 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
572 struct kvm_guest_debug
*dbg
)
574 return -EINVAL
; /* not implemented yet */
577 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
578 struct kvm_mp_state
*mp_state
)
580 return -EINVAL
; /* not implemented yet */
583 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
584 struct kvm_mp_state
*mp_state
)
586 return -EINVAL
; /* not implemented yet */
589 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
593 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->run
->s
.regs
.gprs
[14], 16);
598 if (test_thread_flag(TIF_MCCK_PENDING
))
601 if (!kvm_is_ucontrol(vcpu
->kvm
))
602 kvm_s390_deliver_pending_interrupts(vcpu
);
604 vcpu
->arch
.sie_block
->icptcode
= 0;
608 VCPU_EVENT(vcpu
, 6, "entering sie flags %x",
609 atomic_read(&vcpu
->arch
.sie_block
->cpuflags
));
610 rc
= sie64a(vcpu
->arch
.sie_block
, vcpu
->run
->s
.regs
.gprs
);
612 if (kvm_is_ucontrol(vcpu
->kvm
)) {
613 rc
= SIE_INTERCEPT_UCONTROL
;
615 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
616 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
620 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
621 vcpu
->arch
.sie_block
->icptcode
);
626 memcpy(&vcpu
->run
->s
.regs
.gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
630 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
636 if (vcpu
->sigset_active
)
637 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
639 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
641 BUG_ON(vcpu
->kvm
->arch
.float_int
.local_int
[vcpu
->vcpu_id
] == NULL
);
643 switch (kvm_run
->exit_reason
) {
644 case KVM_EXIT_S390_SIEIC
:
645 case KVM_EXIT_UNKNOWN
:
647 case KVM_EXIT_S390_RESET
:
648 case KVM_EXIT_S390_UCONTROL
:
654 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
655 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
656 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
) {
657 kvm_run
->kvm_dirty_regs
&= ~KVM_SYNC_PREFIX
;
658 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
660 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
661 kvm_run
->kvm_dirty_regs
&= ~KVM_SYNC_CRS
;
662 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
663 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
669 rc
= __vcpu_run(vcpu
);
672 if (kvm_is_ucontrol(vcpu
->kvm
))
675 rc
= kvm_handle_sie_intercept(vcpu
);
676 } while (!signal_pending(current
) && !rc
);
678 if (rc
== SIE_INTERCEPT_RERUNVCPU
)
681 if (signal_pending(current
) && !rc
) {
682 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
686 #ifdef CONFIG_KVM_S390_UCONTROL
687 if (rc
== SIE_INTERCEPT_UCONTROL
) {
688 kvm_run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
689 kvm_run
->s390_ucontrol
.trans_exc_code
=
690 current
->thread
.gmap_addr
;
691 kvm_run
->s390_ucontrol
.pgm_code
= 0x10;
696 if (rc
== -EOPNOTSUPP
) {
697 /* intercept cannot be handled in-kernel, prepare kvm-run */
698 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
699 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
700 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
701 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
705 if (rc
== -EREMOTE
) {
706 /* intercept was handled, but userspace support is needed
707 * kvm_run has been prepared by the handler */
711 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
712 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
713 kvm_run
->s
.regs
.prefix
= vcpu
->arch
.sie_block
->prefix
;
714 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
716 if (vcpu
->sigset_active
)
717 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
719 vcpu
->stat
.exit_userspace
++;
723 static int __guestcopy(struct kvm_vcpu
*vcpu
, u64 guestdest
, void *from
,
724 unsigned long n
, int prefix
)
727 return copy_to_guest(vcpu
, guestdest
, from
, n
);
729 return copy_to_guest_absolute(vcpu
, guestdest
, from
, n
);
733 * store status at address
734 * we use have two special cases:
735 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
736 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
738 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
740 unsigned char archmode
= 1;
743 if (addr
== KVM_S390_STORE_STATUS_NOADDR
) {
744 if (copy_to_guest_absolute(vcpu
, 163ul, &archmode
, 1))
746 addr
= SAVE_AREA_BASE
;
748 } else if (addr
== KVM_S390_STORE_STATUS_PREFIXED
) {
749 if (copy_to_guest(vcpu
, 163ul, &archmode
, 1))
751 addr
= SAVE_AREA_BASE
;
756 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, fp_regs
),
757 vcpu
->arch
.guest_fpregs
.fprs
, 128, prefix
))
760 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, gp_regs
),
761 vcpu
->run
->s
.regs
.gprs
, 128, prefix
))
764 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, psw
),
765 &vcpu
->arch
.sie_block
->gpsw
, 16, prefix
))
768 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, pref_reg
),
769 &vcpu
->arch
.sie_block
->prefix
, 4, prefix
))
772 if (__guestcopy(vcpu
,
773 addr
+ offsetof(struct save_area
, fp_ctrl_reg
),
774 &vcpu
->arch
.guest_fpregs
.fpc
, 4, prefix
))
777 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, tod_reg
),
778 &vcpu
->arch
.sie_block
->todpr
, 4, prefix
))
781 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, timer
),
782 &vcpu
->arch
.sie_block
->cputm
, 8, prefix
))
785 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, clk_cmp
),
786 &vcpu
->arch
.sie_block
->ckc
, 8, prefix
))
789 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, acc_regs
),
790 &vcpu
->run
->s
.regs
.acrs
, 64, prefix
))
793 if (__guestcopy(vcpu
,
794 addr
+ offsetof(struct save_area
, ctrl_regs
),
795 &vcpu
->arch
.sie_block
->gcr
, 128, prefix
))
800 long kvm_arch_vcpu_ioctl(struct file
*filp
,
801 unsigned int ioctl
, unsigned long arg
)
803 struct kvm_vcpu
*vcpu
= filp
->private_data
;
804 void __user
*argp
= (void __user
*)arg
;
808 case KVM_S390_INTERRUPT
: {
809 struct kvm_s390_interrupt s390int
;
812 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
814 r
= kvm_s390_inject_vcpu(vcpu
, &s390int
);
817 case KVM_S390_STORE_STATUS
:
818 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
820 case KVM_S390_SET_INITIAL_PSW
: {
824 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
826 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
829 case KVM_S390_INITIAL_RESET
:
830 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
832 case KVM_SET_ONE_REG
:
833 case KVM_GET_ONE_REG
: {
834 struct kvm_one_reg reg
;
836 if (copy_from_user(®
, argp
, sizeof(reg
)))
838 if (ioctl
== KVM_SET_ONE_REG
)
839 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
841 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
844 #ifdef CONFIG_KVM_S390_UCONTROL
845 case KVM_S390_UCAS_MAP
: {
846 struct kvm_s390_ucas_mapping ucasmap
;
848 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
853 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
858 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
859 ucasmap
.vcpu_addr
, ucasmap
.length
);
862 case KVM_S390_UCAS_UNMAP
: {
863 struct kvm_s390_ucas_mapping ucasmap
;
865 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
870 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
875 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
880 case KVM_S390_VCPU_FAULT
: {
881 r
= gmap_fault(arg
, vcpu
->arch
.gmap
);
882 if (!IS_ERR_VALUE(r
))
892 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
894 #ifdef CONFIG_KVM_S390_UCONTROL
895 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
896 && (kvm_is_ucontrol(vcpu
->kvm
))) {
897 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
902 return VM_FAULT_SIGBUS
;
905 void kvm_arch_free_memslot(struct kvm_memory_slot
*free
,
906 struct kvm_memory_slot
*dont
)
910 int kvm_arch_create_memslot(struct kvm_memory_slot
*slot
, unsigned long npages
)
915 /* Section: memory related */
916 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
917 struct kvm_memory_slot
*memslot
,
918 struct kvm_memory_slot old
,
919 struct kvm_userspace_memory_region
*mem
,
922 /* A few sanity checks. We can have exactly one memory slot which has
923 to start at guest virtual zero and which has to be located at a
924 page boundary in userland and which has to end at a page boundary.
925 The memory in userland is ok to be fragmented into various different
926 vmas. It is okay to mmap() and munmap() stuff in this slot after
927 doing this call at any time */
932 if (mem
->guest_phys_addr
)
935 if (mem
->userspace_addr
& 0xffffful
)
938 if (mem
->memory_size
& 0xffffful
)
947 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
948 struct kvm_userspace_memory_region
*mem
,
949 struct kvm_memory_slot old
,
955 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
956 mem
->guest_phys_addr
, mem
->memory_size
);
958 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
962 void kvm_arch_flush_shadow(struct kvm
*kvm
)
966 static int __init
kvm_s390_init(void)
969 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
974 * guests can ask for up to 255+1 double words, we need a full page
975 * to hold the maximum amount of facilities. On the other hand, we
976 * only set facilities that are known to work in KVM.
978 facilities
= (unsigned long long *) get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
983 memcpy(facilities
, S390_lowcore
.stfle_fac_list
, 16);
984 facilities
[0] &= 0xff00fff3f47c0000ULL
;
985 facilities
[1] &= 0x201c000000000000ULL
;
989 static void __exit
kvm_s390_exit(void)
991 free_page((unsigned long) facilities
);
995 module_init(kvm_s390_init
);
996 module_exit(kvm_s390_exit
);