2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
31 #include <asm/facility.h>
36 #define CREATE_TRACE_POINTS
38 #include "trace-s390.h"
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42 struct kvm_stats_debugfs_item debugfs_entries
[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace
) },
44 { "exit_null", VCPU_STAT(exit_null
) },
45 { "exit_validity", VCPU_STAT(exit_validity
) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
47 { "exit_external_request", VCPU_STAT(exit_external_request
) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
49 { "exit_instruction", VCPU_STAT(exit_instruction
) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
53 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
55 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
64 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
65 { "instruction_spx", VCPU_STAT(instruction_spx
) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
67 { "instruction_stap", VCPU_STAT(instruction_stap
) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
73 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
82 { "diagnose_10", VCPU_STAT(diagnose_10
) },
83 { "diagnose_44", VCPU_STAT(diagnose_44
) },
84 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
88 unsigned long *vfacilities
;
89 static struct gmap_notifier gmap_notifier
;
91 /* test availability of vfacility */
92 static inline int test_vfacility(unsigned long nr
)
94 return __test_facility(nr
, (void *) vfacilities
);
97 /* Section: not file related */
98 int kvm_arch_hardware_enable(void *garbage
)
100 /* every s390 is virtualization enabled ;-) */
104 void kvm_arch_hardware_disable(void *garbage
)
108 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
);
110 int kvm_arch_hardware_setup(void)
112 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
113 gmap_register_ipte_notifier(&gmap_notifier
);
117 void kvm_arch_hardware_unsetup(void)
119 gmap_unregister_ipte_notifier(&gmap_notifier
);
122 void kvm_arch_check_processor_compat(void *rtn
)
126 int kvm_arch_init(void *opaque
)
131 void kvm_arch_exit(void)
135 /* Section: device related */
136 long kvm_arch_dev_ioctl(struct file
*filp
,
137 unsigned int ioctl
, unsigned long arg
)
139 if (ioctl
== KVM_S390_ENABLE_SIE
)
140 return s390_enable_sie();
144 int kvm_dev_ioctl_check_extension(long ext
)
149 case KVM_CAP_S390_PSW
:
150 case KVM_CAP_S390_GMAP
:
151 case KVM_CAP_SYNC_MMU
:
152 #ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL
:
155 case KVM_CAP_SYNC_REGS
:
156 case KVM_CAP_ONE_REG
:
157 case KVM_CAP_ENABLE_CAP
:
158 case KVM_CAP_S390_CSS_SUPPORT
:
159 case KVM_CAP_IOEVENTFD
:
162 case KVM_CAP_NR_VCPUS
:
163 case KVM_CAP_MAX_VCPUS
:
166 case KVM_CAP_NR_MEMSLOTS
:
167 r
= KVM_USER_MEM_SLOTS
;
169 case KVM_CAP_S390_COW
:
170 r
= MACHINE_HAS_ESOP
;
178 /* Section: vm related */
180 * Get (and clear) the dirty memory log for a memory slot.
182 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
183 struct kvm_dirty_log
*log
)
188 long kvm_arch_vm_ioctl(struct file
*filp
,
189 unsigned int ioctl
, unsigned long arg
)
191 struct kvm
*kvm
= filp
->private_data
;
192 void __user
*argp
= (void __user
*)arg
;
196 case KVM_S390_INTERRUPT
: {
197 struct kvm_s390_interrupt s390int
;
200 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
202 r
= kvm_s390_inject_vm(kvm
, &s390int
);
212 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
218 #ifdef CONFIG_KVM_S390_UCONTROL
219 if (type
& ~KVM_VM_S390_UCONTROL
)
221 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
228 rc
= s390_enable_sie();
234 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
238 sprintf(debug_name
, "kvm-%u", current
->pid
);
240 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
244 spin_lock_init(&kvm
->arch
.float_int
.lock
);
245 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
247 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
248 VM_EVENT(kvm
, 3, "%s", "vm created");
250 if (type
& KVM_VM_S390_UCONTROL
) {
251 kvm
->arch
.gmap
= NULL
;
253 kvm
->arch
.gmap
= gmap_alloc(current
->mm
);
256 kvm
->arch
.gmap
->private = kvm
;
259 kvm
->arch
.css_support
= 0;
263 debug_unregister(kvm
->arch
.dbf
);
265 free_page((unsigned long)(kvm
->arch
.sca
));
270 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
272 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
273 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
274 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
275 clear_bit(63 - vcpu
->vcpu_id
,
276 (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
277 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
278 (__u64
) vcpu
->arch
.sie_block
)
279 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
283 if (kvm_is_ucontrol(vcpu
->kvm
))
284 gmap_free(vcpu
->arch
.gmap
);
286 free_page((unsigned long)(vcpu
->arch
.sie_block
));
287 kvm_vcpu_uninit(vcpu
);
288 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
291 static void kvm_free_vcpus(struct kvm
*kvm
)
294 struct kvm_vcpu
*vcpu
;
296 kvm_for_each_vcpu(i
, vcpu
, kvm
)
297 kvm_arch_vcpu_destroy(vcpu
);
299 mutex_lock(&kvm
->lock
);
300 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
301 kvm
->vcpus
[i
] = NULL
;
303 atomic_set(&kvm
->online_vcpus
, 0);
304 mutex_unlock(&kvm
->lock
);
307 void kvm_arch_sync_events(struct kvm
*kvm
)
311 void kvm_arch_destroy_vm(struct kvm
*kvm
)
314 free_page((unsigned long)(kvm
->arch
.sca
));
315 debug_unregister(kvm
->arch
.dbf
);
316 if (!kvm_is_ucontrol(kvm
))
317 gmap_free(kvm
->arch
.gmap
);
320 /* Section: vcpu related */
321 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
323 if (kvm_is_ucontrol(vcpu
->kvm
)) {
324 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
);
325 if (!vcpu
->arch
.gmap
)
327 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
331 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
332 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
339 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
344 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
346 save_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
347 save_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
348 save_access_regs(vcpu
->arch
.host_acrs
);
349 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
350 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
351 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
352 gmap_enable(vcpu
->arch
.gmap
);
353 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
356 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
358 atomic_clear_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
359 gmap_disable(vcpu
->arch
.gmap
);
360 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
361 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
362 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
363 restore_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
364 restore_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
365 restore_access_regs(vcpu
->arch
.host_acrs
);
368 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
370 /* this equals initial cpu reset in pop, but we don't switch to ESA */
371 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
372 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
373 kvm_s390_set_prefix(vcpu
, 0);
374 vcpu
->arch
.sie_block
->cputm
= 0UL;
375 vcpu
->arch
.sie_block
->ckc
= 0UL;
376 vcpu
->arch
.sie_block
->todpr
= 0;
377 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
378 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
379 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
380 vcpu
->arch
.guest_fpregs
.fpc
= 0;
381 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
382 vcpu
->arch
.sie_block
->gbea
= 1;
383 atomic_set_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
386 int kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
391 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
393 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
397 vcpu
->arch
.sie_block
->ecb
= 6;
398 if (test_vfacility(50) && test_vfacility(73))
399 vcpu
->arch
.sie_block
->ecb
|= 0x10;
401 vcpu
->arch
.sie_block
->ecb2
= 8;
402 vcpu
->arch
.sie_block
->eca
= 0xC1002001U
;
403 vcpu
->arch
.sie_block
->fac
= (int) (long) vfacilities
;
404 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
405 tasklet_init(&vcpu
->arch
.tasklet
, kvm_s390_tasklet
,
406 (unsigned long) vcpu
);
407 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
408 get_cpu_id(&vcpu
->arch
.cpu_id
);
409 vcpu
->arch
.cpu_id
.version
= 0xff;
413 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
416 struct kvm_vcpu
*vcpu
;
417 struct sie_page
*sie_page
;
420 if (id
>= KVM_MAX_VCPUS
)
425 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
429 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL
);
433 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
434 vcpu
->arch
.sie_block
->itdba
= (unsigned long) &sie_page
->itdb
;
436 vcpu
->arch
.sie_block
->icpua
= id
;
437 if (!kvm_is_ucontrol(kvm
)) {
438 if (!kvm
->arch
.sca
) {
442 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
443 kvm
->arch
.sca
->cpu
[id
].sda
=
444 (__u64
) vcpu
->arch
.sie_block
;
445 vcpu
->arch
.sie_block
->scaoh
=
446 (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
447 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
448 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
451 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
452 INIT_LIST_HEAD(&vcpu
->arch
.local_int
.list
);
453 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
454 spin_lock(&kvm
->arch
.float_int
.lock
);
455 kvm
->arch
.float_int
.local_int
[id
] = &vcpu
->arch
.local_int
;
456 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
457 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
458 spin_unlock(&kvm
->arch
.float_int
.lock
);
460 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
462 goto out_free_sie_block
;
463 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
464 vcpu
->arch
.sie_block
);
465 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
469 free_page((unsigned long)(vcpu
->arch
.sie_block
));
471 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
476 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
478 /* kvm common code refers to this, but never calls it */
483 void s390_vcpu_block(struct kvm_vcpu
*vcpu
)
485 atomic_set_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
488 void s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
490 atomic_clear_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
494 * Kick a guest cpu out of SIE and wait until SIE is not running.
495 * If the CPU is not running (e.g. waiting as idle) the function will
496 * return immediately. */
497 void exit_sie(struct kvm_vcpu
*vcpu
)
499 atomic_set_mask(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
500 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
504 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
505 void exit_sie_sync(struct kvm_vcpu
*vcpu
)
507 s390_vcpu_block(vcpu
);
511 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
)
514 struct kvm
*kvm
= gmap
->private;
515 struct kvm_vcpu
*vcpu
;
517 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
518 /* match against both prefix pages */
519 if (vcpu
->arch
.sie_block
->prefix
== (address
& ~0x1000UL
)) {
520 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx", address
);
521 kvm_make_request(KVM_REQ_MMU_RELOAD
, vcpu
);
527 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
529 /* kvm common code refers to this, but never calls it */
534 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
535 struct kvm_one_reg
*reg
)
540 case KVM_REG_S390_TODPR
:
541 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
542 (u32 __user
*)reg
->addr
);
544 case KVM_REG_S390_EPOCHDIFF
:
545 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
546 (u64 __user
*)reg
->addr
);
548 case KVM_REG_S390_CPU_TIMER
:
549 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
550 (u64 __user
*)reg
->addr
);
552 case KVM_REG_S390_CLOCK_COMP
:
553 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
554 (u64 __user
*)reg
->addr
);
563 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
564 struct kvm_one_reg
*reg
)
569 case KVM_REG_S390_TODPR
:
570 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
571 (u32 __user
*)reg
->addr
);
573 case KVM_REG_S390_EPOCHDIFF
:
574 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
575 (u64 __user
*)reg
->addr
);
577 case KVM_REG_S390_CPU_TIMER
:
578 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
579 (u64 __user
*)reg
->addr
);
581 case KVM_REG_S390_CLOCK_COMP
:
582 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
583 (u64 __user
*)reg
->addr
);
592 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
594 kvm_s390_vcpu_initial_reset(vcpu
);
598 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
600 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
604 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
606 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
610 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
611 struct kvm_sregs
*sregs
)
613 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
614 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
615 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
619 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
620 struct kvm_sregs
*sregs
)
622 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
623 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
627 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
629 if (test_fp_ctl(fpu
->fpc
))
631 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
632 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
633 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
634 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
638 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
640 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
641 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
645 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
649 if (!(atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_STOPPED
))
652 vcpu
->run
->psw_mask
= psw
.mask
;
653 vcpu
->run
->psw_addr
= psw
.addr
;
658 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
659 struct kvm_translation
*tr
)
661 return -EINVAL
; /* not implemented yet */
664 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
665 struct kvm_guest_debug
*dbg
)
667 return -EINVAL
; /* not implemented yet */
670 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
671 struct kvm_mp_state
*mp_state
)
673 return -EINVAL
; /* not implemented yet */
676 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
677 struct kvm_mp_state
*mp_state
)
679 return -EINVAL
; /* not implemented yet */
682 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
685 * We use MMU_RELOAD just to re-arm the ipte notifier for the
686 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
687 * This ensures that the ipte instruction for this request has
688 * already finished. We might race against a second unmapper that
689 * wants to set the blocking bit. Lets just retry the request loop.
691 while (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
693 rc
= gmap_ipte_notify(vcpu
->arch
.gmap
,
694 vcpu
->arch
.sie_block
->prefix
,
698 s390_vcpu_unblock(vcpu
);
703 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
707 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->run
->s
.regs
.gprs
[14], 16);
712 if (test_thread_flag(TIF_MCCK_PENDING
))
715 if (!kvm_is_ucontrol(vcpu
->kvm
))
716 kvm_s390_deliver_pending_interrupts(vcpu
);
718 rc
= kvm_s390_handle_requests(vcpu
);
722 vcpu
->arch
.sie_block
->icptcode
= 0;
723 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
724 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
725 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
730 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
734 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
735 vcpu
->arch
.sie_block
->icptcode
);
736 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
738 if (exit_reason
>= 0) {
740 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
741 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
742 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
743 current
->thread
.gmap_addr
;
744 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
747 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
748 trace_kvm_s390_sie_fault(vcpu
);
749 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
752 memcpy(&vcpu
->run
->s
.regs
.gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
755 if (kvm_is_ucontrol(vcpu
->kvm
))
758 rc
= kvm_handle_sie_intercept(vcpu
);
764 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
769 * We try to hold kvm->srcu during most of vcpu_run (except when run-
770 * ning the guest), so that memslots (and other stuff) are protected
772 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
775 rc
= vcpu_pre_run(vcpu
);
779 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
781 * As PF_VCPU will be used in fault handler, between
782 * guest_enter and guest_exit should be no uaccess.
787 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
788 vcpu
->run
->s
.regs
.gprs
);
790 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
792 rc
= vcpu_post_run(vcpu
, exit_reason
);
793 } while (!signal_pending(current
) && !rc
);
795 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
799 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
804 if (vcpu
->sigset_active
)
805 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
807 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
809 BUG_ON(vcpu
->kvm
->arch
.float_int
.local_int
[vcpu
->vcpu_id
] == NULL
);
811 switch (kvm_run
->exit_reason
) {
812 case KVM_EXIT_S390_SIEIC
:
813 case KVM_EXIT_UNKNOWN
:
815 case KVM_EXIT_S390_RESET
:
816 case KVM_EXIT_S390_UCONTROL
:
817 case KVM_EXIT_S390_TSCH
:
823 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
824 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
825 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
) {
826 kvm_run
->kvm_dirty_regs
&= ~KVM_SYNC_PREFIX
;
827 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
829 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
830 kvm_run
->kvm_dirty_regs
&= ~KVM_SYNC_CRS
;
831 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
832 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
836 rc
= __vcpu_run(vcpu
);
838 if (signal_pending(current
) && !rc
) {
839 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
843 if (rc
== -EOPNOTSUPP
) {
844 /* intercept cannot be handled in-kernel, prepare kvm-run */
845 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
846 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
847 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
848 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
852 if (rc
== -EREMOTE
) {
853 /* intercept was handled, but userspace support is needed
854 * kvm_run has been prepared by the handler */
858 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
859 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
860 kvm_run
->s
.regs
.prefix
= vcpu
->arch
.sie_block
->prefix
;
861 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
863 if (vcpu
->sigset_active
)
864 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
866 vcpu
->stat
.exit_userspace
++;
870 static int __guestcopy(struct kvm_vcpu
*vcpu
, u64 guestdest
, void *from
,
871 unsigned long n
, int prefix
)
874 return copy_to_guest(vcpu
, guestdest
, from
, n
);
876 return copy_to_guest_absolute(vcpu
, guestdest
, from
, n
);
880 * store status at address
881 * we use have two special cases:
882 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
883 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
885 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long addr
)
887 unsigned char archmode
= 1;
891 if (addr
== KVM_S390_STORE_STATUS_NOADDR
) {
892 if (copy_to_guest_absolute(vcpu
, 163ul, &archmode
, 1))
894 addr
= SAVE_AREA_BASE
;
896 } else if (addr
== KVM_S390_STORE_STATUS_PREFIXED
) {
897 if (copy_to_guest(vcpu
, 163ul, &archmode
, 1))
899 addr
= SAVE_AREA_BASE
;
904 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, fp_regs
),
905 vcpu
->arch
.guest_fpregs
.fprs
, 128, prefix
))
908 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, gp_regs
),
909 vcpu
->run
->s
.regs
.gprs
, 128, prefix
))
912 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, psw
),
913 &vcpu
->arch
.sie_block
->gpsw
, 16, prefix
))
916 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, pref_reg
),
917 &vcpu
->arch
.sie_block
->prefix
, 4, prefix
))
920 if (__guestcopy(vcpu
,
921 addr
+ offsetof(struct save_area
, fp_ctrl_reg
),
922 &vcpu
->arch
.guest_fpregs
.fpc
, 4, prefix
))
925 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, tod_reg
),
926 &vcpu
->arch
.sie_block
->todpr
, 4, prefix
))
929 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, timer
),
930 &vcpu
->arch
.sie_block
->cputm
, 8, prefix
))
933 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
934 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, clk_cmp
),
935 &clkcomp
, 8, prefix
))
938 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, acc_regs
),
939 &vcpu
->run
->s
.regs
.acrs
, 64, prefix
))
942 if (__guestcopy(vcpu
,
943 addr
+ offsetof(struct save_area
, ctrl_regs
),
944 &vcpu
->arch
.sie_block
->gcr
, 128, prefix
))
949 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
952 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
953 * copying in vcpu load/put. Lets update our copies before we save
954 * it into the save area
956 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
957 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
958 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
960 return kvm_s390_store_status_unloaded(vcpu
, addr
);
963 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
964 struct kvm_enable_cap
*cap
)
972 case KVM_CAP_S390_CSS_SUPPORT
:
973 if (!vcpu
->kvm
->arch
.css_support
) {
974 vcpu
->kvm
->arch
.css_support
= 1;
975 trace_kvm_s390_enable_css(vcpu
->kvm
);
986 long kvm_arch_vcpu_ioctl(struct file
*filp
,
987 unsigned int ioctl
, unsigned long arg
)
989 struct kvm_vcpu
*vcpu
= filp
->private_data
;
990 void __user
*argp
= (void __user
*)arg
;
995 case KVM_S390_INTERRUPT
: {
996 struct kvm_s390_interrupt s390int
;
999 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
1001 r
= kvm_s390_inject_vcpu(vcpu
, &s390int
);
1004 case KVM_S390_STORE_STATUS
:
1005 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1006 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
1007 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1009 case KVM_S390_SET_INITIAL_PSW
: {
1013 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
1015 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
1018 case KVM_S390_INITIAL_RESET
:
1019 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
1021 case KVM_SET_ONE_REG
:
1022 case KVM_GET_ONE_REG
: {
1023 struct kvm_one_reg reg
;
1025 if (copy_from_user(®
, argp
, sizeof(reg
)))
1027 if (ioctl
== KVM_SET_ONE_REG
)
1028 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
1030 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
1033 #ifdef CONFIG_KVM_S390_UCONTROL
1034 case KVM_S390_UCAS_MAP
: {
1035 struct kvm_s390_ucas_mapping ucasmap
;
1037 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
1042 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1047 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
1048 ucasmap
.vcpu_addr
, ucasmap
.length
);
1051 case KVM_S390_UCAS_UNMAP
: {
1052 struct kvm_s390_ucas_mapping ucasmap
;
1054 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
1059 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1064 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
1069 case KVM_S390_VCPU_FAULT
: {
1070 r
= gmap_fault(arg
, vcpu
->arch
.gmap
);
1071 if (!IS_ERR_VALUE(r
))
1075 case KVM_ENABLE_CAP
:
1077 struct kvm_enable_cap cap
;
1079 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1081 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1090 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1092 #ifdef CONFIG_KVM_S390_UCONTROL
1093 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
1094 && (kvm_is_ucontrol(vcpu
->kvm
))) {
1095 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
1096 get_page(vmf
->page
);
1100 return VM_FAULT_SIGBUS
;
1103 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
1104 struct kvm_memory_slot
*dont
)
1108 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
1109 unsigned long npages
)
1114 void kvm_arch_memslots_updated(struct kvm
*kvm
)
1118 /* Section: memory related */
1119 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
1120 struct kvm_memory_slot
*memslot
,
1121 struct kvm_userspace_memory_region
*mem
,
1122 enum kvm_mr_change change
)
1124 /* A few sanity checks. We can have memory slots which have to be
1125 located/ended at a segment boundary (1MB). The memory in userland is
1126 ok to be fragmented into various different vmas. It is okay to mmap()
1127 and munmap() stuff in this slot after doing this call at any time */
1129 if (mem
->userspace_addr
& 0xffffful
)
1132 if (mem
->memory_size
& 0xffffful
)
1138 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
1139 struct kvm_userspace_memory_region
*mem
,
1140 const struct kvm_memory_slot
*old
,
1141 enum kvm_mr_change change
)
1145 /* If the basics of the memslot do not change, we do not want
1146 * to update the gmap. Every update causes several unnecessary
1147 * segment translation exceptions. This is usually handled just
1148 * fine by the normal fault handler + gmap, but it will also
1149 * cause faults on the prefix page of running guest CPUs.
1151 if (old
->userspace_addr
== mem
->userspace_addr
&&
1152 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
1153 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
1156 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
1157 mem
->guest_phys_addr
, mem
->memory_size
);
1159 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
1163 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
1167 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
1168 struct kvm_memory_slot
*slot
)
1172 static int __init
kvm_s390_init(void)
1175 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1180 * guests can ask for up to 255+1 double words, we need a full page
1181 * to hold the maximum amount of facilities. On the other hand, we
1182 * only set facilities that are known to work in KVM.
1184 vfacilities
= (unsigned long *) get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
1189 memcpy(vfacilities
, S390_lowcore
.stfle_fac_list
, 16);
1190 vfacilities
[0] &= 0xff82fff3f4fc2000UL
;
1191 vfacilities
[1] &= 0x005c000000000000UL
;
1195 static void __exit
kvm_s390_exit(void)
1197 free_page((unsigned long) vfacilities
);
1201 module_init(kvm_s390_init
);
1202 module_exit(kvm_s390_exit
);
1205 * Enable autoloading of the kvm module.
1206 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1207 * since x86 takes a different approach.
1209 #include <linux/miscdevice.h>
1210 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
1211 MODULE_ALIAS("devname:kvm");