2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
31 #include <asm/facility.h>
36 #define CREATE_TRACE_POINTS
38 #include "trace-s390.h"
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42 struct kvm_stats_debugfs_item debugfs_entries
[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace
) },
44 { "exit_null", VCPU_STAT(exit_null
) },
45 { "exit_validity", VCPU_STAT(exit_validity
) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
47 { "exit_external_request", VCPU_STAT(exit_external_request
) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
49 { "exit_instruction", VCPU_STAT(exit_instruction
) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
53 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
55 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
64 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
65 { "instruction_spx", VCPU_STAT(instruction_spx
) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
67 { "instruction_stap", VCPU_STAT(instruction_stap
) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
73 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
82 { "diagnose_10", VCPU_STAT(diagnose_10
) },
83 { "diagnose_44", VCPU_STAT(diagnose_44
) },
84 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
88 unsigned long *vfacilities
;
89 static struct gmap_notifier gmap_notifier
;
91 /* test availability of vfacility */
92 static inline int test_vfacility(unsigned long nr
)
94 return __test_facility(nr
, (void *) vfacilities
);
97 /* Section: not file related */
98 int kvm_arch_hardware_enable(void *garbage
)
100 /* every s390 is virtualization enabled ;-) */
104 void kvm_arch_hardware_disable(void *garbage
)
108 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
);
110 int kvm_arch_hardware_setup(void)
112 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
113 gmap_register_ipte_notifier(&gmap_notifier
);
117 void kvm_arch_hardware_unsetup(void)
119 gmap_unregister_ipte_notifier(&gmap_notifier
);
122 void kvm_arch_check_processor_compat(void *rtn
)
126 int kvm_arch_init(void *opaque
)
131 void kvm_arch_exit(void)
135 /* Section: device related */
136 long kvm_arch_dev_ioctl(struct file
*filp
,
137 unsigned int ioctl
, unsigned long arg
)
139 if (ioctl
== KVM_S390_ENABLE_SIE
)
140 return s390_enable_sie();
144 int kvm_dev_ioctl_check_extension(long ext
)
149 case KVM_CAP_S390_PSW
:
150 case KVM_CAP_S390_GMAP
:
151 case KVM_CAP_SYNC_MMU
:
152 #ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL
:
155 case KVM_CAP_SYNC_REGS
:
156 case KVM_CAP_ONE_REG
:
157 case KVM_CAP_ENABLE_CAP
:
158 case KVM_CAP_S390_CSS_SUPPORT
:
159 case KVM_CAP_IOEVENTFD
:
162 case KVM_CAP_NR_VCPUS
:
163 case KVM_CAP_MAX_VCPUS
:
166 case KVM_CAP_NR_MEMSLOTS
:
167 r
= KVM_USER_MEM_SLOTS
;
169 case KVM_CAP_S390_COW
:
170 r
= MACHINE_HAS_ESOP
;
178 /* Section: vm related */
180 * Get (and clear) the dirty memory log for a memory slot.
182 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
183 struct kvm_dirty_log
*log
)
188 long kvm_arch_vm_ioctl(struct file
*filp
,
189 unsigned int ioctl
, unsigned long arg
)
191 struct kvm
*kvm
= filp
->private_data
;
192 void __user
*argp
= (void __user
*)arg
;
196 case KVM_S390_INTERRUPT
: {
197 struct kvm_s390_interrupt s390int
;
200 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
202 r
= kvm_s390_inject_vm(kvm
, &s390int
);
212 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
218 #ifdef CONFIG_KVM_S390_UCONTROL
219 if (type
& ~KVM_VM_S390_UCONTROL
)
221 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
228 rc
= s390_enable_sie();
234 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
238 sprintf(debug_name
, "kvm-%u", current
->pid
);
240 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
244 spin_lock_init(&kvm
->arch
.float_int
.lock
);
245 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
247 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
248 VM_EVENT(kvm
, 3, "%s", "vm created");
250 if (type
& KVM_VM_S390_UCONTROL
) {
251 kvm
->arch
.gmap
= NULL
;
253 kvm
->arch
.gmap
= gmap_alloc(current
->mm
);
256 kvm
->arch
.gmap
->private = kvm
;
259 kvm
->arch
.css_support
= 0;
263 debug_unregister(kvm
->arch
.dbf
);
265 free_page((unsigned long)(kvm
->arch
.sca
));
270 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
272 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
273 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
274 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
275 clear_bit(63 - vcpu
->vcpu_id
,
276 (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
277 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
278 (__u64
) vcpu
->arch
.sie_block
)
279 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
283 if (kvm_is_ucontrol(vcpu
->kvm
))
284 gmap_free(vcpu
->arch
.gmap
);
286 free_page((unsigned long)(vcpu
->arch
.sie_block
));
287 kvm_vcpu_uninit(vcpu
);
288 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
291 static void kvm_free_vcpus(struct kvm
*kvm
)
294 struct kvm_vcpu
*vcpu
;
296 kvm_for_each_vcpu(i
, vcpu
, kvm
)
297 kvm_arch_vcpu_destroy(vcpu
);
299 mutex_lock(&kvm
->lock
);
300 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
301 kvm
->vcpus
[i
] = NULL
;
303 atomic_set(&kvm
->online_vcpus
, 0);
304 mutex_unlock(&kvm
->lock
);
307 void kvm_arch_sync_events(struct kvm
*kvm
)
311 void kvm_arch_destroy_vm(struct kvm
*kvm
)
314 free_page((unsigned long)(kvm
->arch
.sca
));
315 debug_unregister(kvm
->arch
.dbf
);
316 if (!kvm_is_ucontrol(kvm
))
317 gmap_free(kvm
->arch
.gmap
);
320 /* Section: vcpu related */
321 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
323 if (kvm_is_ucontrol(vcpu
->kvm
)) {
324 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
);
325 if (!vcpu
->arch
.gmap
)
327 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
331 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
332 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
339 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
344 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
346 save_fp_regs(&vcpu
->arch
.host_fpregs
);
347 save_access_regs(vcpu
->arch
.host_acrs
);
348 vcpu
->arch
.guest_fpregs
.fpc
&= FPC_VALID_MASK
;
349 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
350 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
351 gmap_enable(vcpu
->arch
.gmap
);
352 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
355 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
357 atomic_clear_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
358 gmap_disable(vcpu
->arch
.gmap
);
359 save_fp_regs(&vcpu
->arch
.guest_fpregs
);
360 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
361 restore_fp_regs(&vcpu
->arch
.host_fpregs
);
362 restore_access_regs(vcpu
->arch
.host_acrs
);
365 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
367 /* this equals initial cpu reset in pop, but we don't switch to ESA */
368 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
369 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
370 kvm_s390_set_prefix(vcpu
, 0);
371 vcpu
->arch
.sie_block
->cputm
= 0UL;
372 vcpu
->arch
.sie_block
->ckc
= 0UL;
373 vcpu
->arch
.sie_block
->todpr
= 0;
374 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
375 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
376 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
377 vcpu
->arch
.guest_fpregs
.fpc
= 0;
378 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
379 vcpu
->arch
.sie_block
->gbea
= 1;
380 atomic_set_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
383 int kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
388 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
390 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
394 vcpu
->arch
.sie_block
->ecb
= 6;
395 vcpu
->arch
.sie_block
->ecb2
= 8;
396 vcpu
->arch
.sie_block
->eca
= 0xC1002001U
;
397 vcpu
->arch
.sie_block
->fac
= (int) (long) vfacilities
;
398 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
399 tasklet_init(&vcpu
->arch
.tasklet
, kvm_s390_tasklet
,
400 (unsigned long) vcpu
);
401 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
402 get_cpu_id(&vcpu
->arch
.cpu_id
);
403 vcpu
->arch
.cpu_id
.version
= 0xff;
407 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
410 struct kvm_vcpu
*vcpu
;
413 if (id
>= KVM_MAX_VCPUS
)
418 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
422 vcpu
->arch
.sie_block
= (struct kvm_s390_sie_block
*)
423 get_zeroed_page(GFP_KERNEL
);
425 if (!vcpu
->arch
.sie_block
)
428 vcpu
->arch
.sie_block
->icpua
= id
;
429 if (!kvm_is_ucontrol(kvm
)) {
430 if (!kvm
->arch
.sca
) {
434 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
435 kvm
->arch
.sca
->cpu
[id
].sda
=
436 (__u64
) vcpu
->arch
.sie_block
;
437 vcpu
->arch
.sie_block
->scaoh
=
438 (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
439 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
440 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
443 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
444 INIT_LIST_HEAD(&vcpu
->arch
.local_int
.list
);
445 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
446 spin_lock(&kvm
->arch
.float_int
.lock
);
447 kvm
->arch
.float_int
.local_int
[id
] = &vcpu
->arch
.local_int
;
448 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
449 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
450 spin_unlock(&kvm
->arch
.float_int
.lock
);
452 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
454 goto out_free_sie_block
;
455 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
456 vcpu
->arch
.sie_block
);
457 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
461 free_page((unsigned long)(vcpu
->arch
.sie_block
));
463 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
468 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
470 /* kvm common code refers to this, but never calls it */
475 void s390_vcpu_block(struct kvm_vcpu
*vcpu
)
477 atomic_set_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
480 void s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
482 atomic_clear_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
486 * Kick a guest cpu out of SIE and wait until SIE is not running.
487 * If the CPU is not running (e.g. waiting as idle) the function will
488 * return immediately. */
489 void exit_sie(struct kvm_vcpu
*vcpu
)
491 atomic_set_mask(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
492 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
496 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
497 void exit_sie_sync(struct kvm_vcpu
*vcpu
)
499 s390_vcpu_block(vcpu
);
503 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
)
506 struct kvm
*kvm
= gmap
->private;
507 struct kvm_vcpu
*vcpu
;
509 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
510 /* match against both prefix pages */
511 if (vcpu
->arch
.sie_block
->prefix
== (address
& ~0x1000UL
)) {
512 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx", address
);
513 kvm_make_request(KVM_REQ_MMU_RELOAD
, vcpu
);
519 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
521 /* kvm common code refers to this, but never calls it */
526 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
527 struct kvm_one_reg
*reg
)
532 case KVM_REG_S390_TODPR
:
533 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
534 (u32 __user
*)reg
->addr
);
536 case KVM_REG_S390_EPOCHDIFF
:
537 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
538 (u64 __user
*)reg
->addr
);
540 case KVM_REG_S390_CPU_TIMER
:
541 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
542 (u64 __user
*)reg
->addr
);
544 case KVM_REG_S390_CLOCK_COMP
:
545 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
546 (u64 __user
*)reg
->addr
);
555 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
556 struct kvm_one_reg
*reg
)
561 case KVM_REG_S390_TODPR
:
562 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
563 (u32 __user
*)reg
->addr
);
565 case KVM_REG_S390_EPOCHDIFF
:
566 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
567 (u64 __user
*)reg
->addr
);
569 case KVM_REG_S390_CPU_TIMER
:
570 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
571 (u64 __user
*)reg
->addr
);
573 case KVM_REG_S390_CLOCK_COMP
:
574 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
575 (u64 __user
*)reg
->addr
);
584 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
586 kvm_s390_vcpu_initial_reset(vcpu
);
590 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
592 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
596 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
598 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
602 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
603 struct kvm_sregs
*sregs
)
605 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
606 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
607 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
611 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
612 struct kvm_sregs
*sregs
)
614 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
615 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
619 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
621 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
622 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
& FPC_VALID_MASK
;
623 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
627 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
629 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
630 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
634 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
638 if (!(atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_STOPPED
))
641 vcpu
->run
->psw_mask
= psw
.mask
;
642 vcpu
->run
->psw_addr
= psw
.addr
;
647 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
648 struct kvm_translation
*tr
)
650 return -EINVAL
; /* not implemented yet */
653 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
654 struct kvm_guest_debug
*dbg
)
656 return -EINVAL
; /* not implemented yet */
659 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
660 struct kvm_mp_state
*mp_state
)
662 return -EINVAL
; /* not implemented yet */
665 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
666 struct kvm_mp_state
*mp_state
)
668 return -EINVAL
; /* not implemented yet */
671 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
674 * We use MMU_RELOAD just to re-arm the ipte notifier for the
675 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
676 * This ensures that the ipte instruction for this request has
677 * already finished. We might race against a second unmapper that
678 * wants to set the blocking bit. Lets just retry the request loop.
680 while (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
682 rc
= gmap_ipte_notify(vcpu
->arch
.gmap
,
683 vcpu
->arch
.sie_block
->prefix
,
687 s390_vcpu_unblock(vcpu
);
692 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
696 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->run
->s
.regs
.gprs
[14], 16);
701 if (test_thread_flag(TIF_MCCK_PENDING
))
704 if (!kvm_is_ucontrol(vcpu
->kvm
))
705 kvm_s390_deliver_pending_interrupts(vcpu
);
707 rc
= kvm_s390_handle_requests(vcpu
);
711 vcpu
->arch
.sie_block
->icptcode
= 0;
712 VCPU_EVENT(vcpu
, 6, "entering sie flags %x",
713 atomic_read(&vcpu
->arch
.sie_block
->cpuflags
));
714 trace_kvm_s390_sie_enter(vcpu
,
715 atomic_read(&vcpu
->arch
.sie_block
->cpuflags
));
718 * As PF_VCPU will be used in fault handler, between guest_enter
719 * and guest_exit should be no uaccess.
724 rc
= sie64a(vcpu
->arch
.sie_block
, vcpu
->run
->s
.regs
.gprs
);
727 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
728 vcpu
->arch
.sie_block
->icptcode
);
729 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
734 if (kvm_is_ucontrol(vcpu
->kvm
)) {
735 rc
= SIE_INTERCEPT_UCONTROL
;
737 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
738 trace_kvm_s390_sie_fault(vcpu
);
739 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
743 memcpy(&vcpu
->run
->s
.regs
.gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
747 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
753 if (vcpu
->sigset_active
)
754 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
756 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
758 BUG_ON(vcpu
->kvm
->arch
.float_int
.local_int
[vcpu
->vcpu_id
] == NULL
);
760 switch (kvm_run
->exit_reason
) {
761 case KVM_EXIT_S390_SIEIC
:
762 case KVM_EXIT_UNKNOWN
:
764 case KVM_EXIT_S390_RESET
:
765 case KVM_EXIT_S390_UCONTROL
:
766 case KVM_EXIT_S390_TSCH
:
772 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
773 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
774 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
) {
775 kvm_run
->kvm_dirty_regs
&= ~KVM_SYNC_PREFIX
;
776 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
778 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
779 kvm_run
->kvm_dirty_regs
&= ~KVM_SYNC_CRS
;
780 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
781 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
787 rc
= __vcpu_run(vcpu
);
790 if (kvm_is_ucontrol(vcpu
->kvm
))
791 /* Don't exit for host interrupts. */
792 rc
= vcpu
->arch
.sie_block
->icptcode
? -EOPNOTSUPP
: 0;
794 rc
= kvm_handle_sie_intercept(vcpu
);
795 } while (!signal_pending(current
) && !rc
);
797 if (rc
== SIE_INTERCEPT_RERUNVCPU
)
800 if (signal_pending(current
) && !rc
) {
801 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
805 #ifdef CONFIG_KVM_S390_UCONTROL
806 if (rc
== SIE_INTERCEPT_UCONTROL
) {
807 kvm_run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
808 kvm_run
->s390_ucontrol
.trans_exc_code
=
809 current
->thread
.gmap_addr
;
810 kvm_run
->s390_ucontrol
.pgm_code
= 0x10;
815 if (rc
== -EOPNOTSUPP
) {
816 /* intercept cannot be handled in-kernel, prepare kvm-run */
817 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
818 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
819 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
820 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
824 if (rc
== -EREMOTE
) {
825 /* intercept was handled, but userspace support is needed
826 * kvm_run has been prepared by the handler */
830 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
831 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
832 kvm_run
->s
.regs
.prefix
= vcpu
->arch
.sie_block
->prefix
;
833 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
835 if (vcpu
->sigset_active
)
836 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
838 vcpu
->stat
.exit_userspace
++;
842 static int __guestcopy(struct kvm_vcpu
*vcpu
, u64 guestdest
, void *from
,
843 unsigned long n
, int prefix
)
846 return copy_to_guest(vcpu
, guestdest
, from
, n
);
848 return copy_to_guest_absolute(vcpu
, guestdest
, from
, n
);
852 * store status at address
853 * we use have two special cases:
854 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
855 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
857 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
859 unsigned char archmode
= 1;
862 if (addr
== KVM_S390_STORE_STATUS_NOADDR
) {
863 if (copy_to_guest_absolute(vcpu
, 163ul, &archmode
, 1))
865 addr
= SAVE_AREA_BASE
;
867 } else if (addr
== KVM_S390_STORE_STATUS_PREFIXED
) {
868 if (copy_to_guest(vcpu
, 163ul, &archmode
, 1))
870 addr
= SAVE_AREA_BASE
;
876 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
877 * copying in vcpu load/put. Lets update our copies before we save
878 * it into the save area
880 save_fp_regs(&vcpu
->arch
.guest_fpregs
);
881 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
883 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, fp_regs
),
884 vcpu
->arch
.guest_fpregs
.fprs
, 128, prefix
))
887 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, gp_regs
),
888 vcpu
->run
->s
.regs
.gprs
, 128, prefix
))
891 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, psw
),
892 &vcpu
->arch
.sie_block
->gpsw
, 16, prefix
))
895 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, pref_reg
),
896 &vcpu
->arch
.sie_block
->prefix
, 4, prefix
))
899 if (__guestcopy(vcpu
,
900 addr
+ offsetof(struct save_area
, fp_ctrl_reg
),
901 &vcpu
->arch
.guest_fpregs
.fpc
, 4, prefix
))
904 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, tod_reg
),
905 &vcpu
->arch
.sie_block
->todpr
, 4, prefix
))
908 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, timer
),
909 &vcpu
->arch
.sie_block
->cputm
, 8, prefix
))
912 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, clk_cmp
),
913 &vcpu
->arch
.sie_block
->ckc
, 8, prefix
))
916 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, acc_regs
),
917 &vcpu
->run
->s
.regs
.acrs
, 64, prefix
))
920 if (__guestcopy(vcpu
,
921 addr
+ offsetof(struct save_area
, ctrl_regs
),
922 &vcpu
->arch
.sie_block
->gcr
, 128, prefix
))
927 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
928 struct kvm_enable_cap
*cap
)
936 case KVM_CAP_S390_CSS_SUPPORT
:
937 if (!vcpu
->kvm
->arch
.css_support
) {
938 vcpu
->kvm
->arch
.css_support
= 1;
939 trace_kvm_s390_enable_css(vcpu
->kvm
);
950 long kvm_arch_vcpu_ioctl(struct file
*filp
,
951 unsigned int ioctl
, unsigned long arg
)
953 struct kvm_vcpu
*vcpu
= filp
->private_data
;
954 void __user
*argp
= (void __user
*)arg
;
958 case KVM_S390_INTERRUPT
: {
959 struct kvm_s390_interrupt s390int
;
962 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
964 r
= kvm_s390_inject_vcpu(vcpu
, &s390int
);
967 case KVM_S390_STORE_STATUS
:
968 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
970 case KVM_S390_SET_INITIAL_PSW
: {
974 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
976 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
979 case KVM_S390_INITIAL_RESET
:
980 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
982 case KVM_SET_ONE_REG
:
983 case KVM_GET_ONE_REG
: {
984 struct kvm_one_reg reg
;
986 if (copy_from_user(®
, argp
, sizeof(reg
)))
988 if (ioctl
== KVM_SET_ONE_REG
)
989 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
991 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
994 #ifdef CONFIG_KVM_S390_UCONTROL
995 case KVM_S390_UCAS_MAP
: {
996 struct kvm_s390_ucas_mapping ucasmap
;
998 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
1003 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1008 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
1009 ucasmap
.vcpu_addr
, ucasmap
.length
);
1012 case KVM_S390_UCAS_UNMAP
: {
1013 struct kvm_s390_ucas_mapping ucasmap
;
1015 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
1020 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1025 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
1030 case KVM_S390_VCPU_FAULT
: {
1031 r
= gmap_fault(arg
, vcpu
->arch
.gmap
);
1032 if (!IS_ERR_VALUE(r
))
1036 case KVM_ENABLE_CAP
:
1038 struct kvm_enable_cap cap
;
1040 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1042 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1051 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1053 #ifdef CONFIG_KVM_S390_UCONTROL
1054 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
1055 && (kvm_is_ucontrol(vcpu
->kvm
))) {
1056 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
1057 get_page(vmf
->page
);
1061 return VM_FAULT_SIGBUS
;
1064 void kvm_arch_free_memslot(struct kvm_memory_slot
*free
,
1065 struct kvm_memory_slot
*dont
)
1069 int kvm_arch_create_memslot(struct kvm_memory_slot
*slot
, unsigned long npages
)
1074 void kvm_arch_memslots_updated(struct kvm
*kvm
)
1078 /* Section: memory related */
1079 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
1080 struct kvm_memory_slot
*memslot
,
1081 struct kvm_userspace_memory_region
*mem
,
1082 enum kvm_mr_change change
)
1084 /* A few sanity checks. We can have memory slots which have to be
1085 located/ended at a segment boundary (1MB). The memory in userland is
1086 ok to be fragmented into various different vmas. It is okay to mmap()
1087 and munmap() stuff in this slot after doing this call at any time */
1089 if (mem
->userspace_addr
& 0xffffful
)
1092 if (mem
->memory_size
& 0xffffful
)
1098 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
1099 struct kvm_userspace_memory_region
*mem
,
1100 const struct kvm_memory_slot
*old
,
1101 enum kvm_mr_change change
)
1105 /* If the basics of the memslot do not change, we do not want
1106 * to update the gmap. Every update causes several unnecessary
1107 * segment translation exceptions. This is usually handled just
1108 * fine by the normal fault handler + gmap, but it will also
1109 * cause faults on the prefix page of running guest CPUs.
1111 if (old
->userspace_addr
== mem
->userspace_addr
&&
1112 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
1113 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
1116 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
1117 mem
->guest_phys_addr
, mem
->memory_size
);
1119 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
1123 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
1127 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
1128 struct kvm_memory_slot
*slot
)
1132 static int __init
kvm_s390_init(void)
1135 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1140 * guests can ask for up to 255+1 double words, we need a full page
1141 * to hold the maximum amount of facilities. On the other hand, we
1142 * only set facilities that are known to work in KVM.
1144 vfacilities
= (unsigned long *) get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
1149 memcpy(vfacilities
, S390_lowcore
.stfle_fac_list
, 16);
1150 vfacilities
[0] &= 0xff82fff3f47c0000UL
;
1151 vfacilities
[1] &= 0x001c000000000000UL
;
1155 static void __exit
kvm_s390_exit(void)
1157 free_page((unsigned long) vfacilities
);
1161 module_init(kvm_s390_init
);
1162 module_exit(kvm_s390_exit
);
1165 * Enable autoloading of the kvm module.
1166 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1167 * since x86 takes a different approach.
1169 #include <linux/miscdevice.h>
1170 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
1171 MODULE_ALIAS("devname:kvm");