2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries
[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace
) },
38 { "exit_null", VCPU_STAT(exit_null
) },
39 { "exit_validity", VCPU_STAT(exit_validity
) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
41 { "exit_external_request", VCPU_STAT(exit_external_request
) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
43 { "exit_instruction", VCPU_STAT(exit_instruction
) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
56 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
57 { "instruction_spx", VCPU_STAT(instruction_spx
) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
59 { "instruction_stap", VCPU_STAT(instruction_stap
) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
68 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
69 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
70 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
71 { "diagnose_44", VCPU_STAT(diagnose_44
) },
75 static unsigned long long *facilities
;
77 /* Section: not file related */
78 int kvm_arch_hardware_enable(void *garbage
)
80 /* every s390 is virtualization enabled ;-) */
84 void kvm_arch_hardware_disable(void *garbage
)
88 int kvm_arch_hardware_setup(void)
93 void kvm_arch_hardware_unsetup(void)
97 void kvm_arch_check_processor_compat(void *rtn
)
101 int kvm_arch_init(void *opaque
)
106 void kvm_arch_exit(void)
110 /* Section: device related */
111 long kvm_arch_dev_ioctl(struct file
*filp
,
112 unsigned int ioctl
, unsigned long arg
)
114 if (ioctl
== KVM_S390_ENABLE_SIE
)
115 return s390_enable_sie();
119 int kvm_dev_ioctl_check_extension(long ext
)
124 case KVM_CAP_S390_PSW
:
133 /* Section: vm related */
135 * Get (and clear) the dirty memory log for a memory slot.
137 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
138 struct kvm_dirty_log
*log
)
143 long kvm_arch_vm_ioctl(struct file
*filp
,
144 unsigned int ioctl
, unsigned long arg
)
146 struct kvm
*kvm
= filp
->private_data
;
147 void __user
*argp
= (void __user
*)arg
;
151 case KVM_S390_INTERRUPT
: {
152 struct kvm_s390_interrupt s390int
;
155 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
157 r
= kvm_s390_inject_vm(kvm
, &s390int
);
167 int kvm_arch_init_vm(struct kvm
*kvm
)
172 rc
= s390_enable_sie();
176 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
180 sprintf(debug_name
, "kvm-%u", current
->pid
);
182 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
186 spin_lock_init(&kvm
->arch
.float_int
.lock
);
187 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
189 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
190 VM_EVENT(kvm
, 3, "%s", "vm created");
194 free_page((unsigned long)(kvm
->arch
.sca
));
199 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
201 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
202 clear_bit(63 - vcpu
->vcpu_id
, (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
203 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
204 (__u64
) vcpu
->arch
.sie_block
)
205 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
207 free_page((unsigned long)(vcpu
->arch
.sie_block
));
208 kvm_vcpu_uninit(vcpu
);
212 static void kvm_free_vcpus(struct kvm
*kvm
)
215 struct kvm_vcpu
*vcpu
;
217 kvm_for_each_vcpu(i
, vcpu
, kvm
)
218 kvm_arch_vcpu_destroy(vcpu
);
220 mutex_lock(&kvm
->lock
);
221 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
222 kvm
->vcpus
[i
] = NULL
;
224 atomic_set(&kvm
->online_vcpus
, 0);
225 mutex_unlock(&kvm
->lock
);
228 void kvm_arch_sync_events(struct kvm
*kvm
)
232 void kvm_arch_destroy_vm(struct kvm
*kvm
)
235 free_page((unsigned long)(kvm
->arch
.sca
));
236 debug_unregister(kvm
->arch
.dbf
);
239 /* Section: vcpu related */
240 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
245 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
250 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
252 save_fp_regs(&vcpu
->arch
.host_fpregs
);
253 save_access_regs(vcpu
->arch
.host_acrs
);
254 vcpu
->arch
.guest_fpregs
.fpc
&= FPC_VALID_MASK
;
255 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
256 restore_access_regs(vcpu
->arch
.guest_acrs
);
259 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
261 save_fp_regs(&vcpu
->arch
.guest_fpregs
);
262 save_access_regs(vcpu
->arch
.guest_acrs
);
263 restore_fp_regs(&vcpu
->arch
.host_fpregs
);
264 restore_access_regs(vcpu
->arch
.host_acrs
);
267 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
269 /* this equals initial cpu reset in pop, but we don't switch to ESA */
270 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
271 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
272 vcpu
->arch
.sie_block
->prefix
= 0UL;
273 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
274 vcpu
->arch
.sie_block
->cputm
= 0UL;
275 vcpu
->arch
.sie_block
->ckc
= 0UL;
276 vcpu
->arch
.sie_block
->todpr
= 0;
277 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
278 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
279 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
280 vcpu
->arch
.guest_fpregs
.fpc
= 0;
281 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
282 vcpu
->arch
.sie_block
->gbea
= 1;
285 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
287 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
);
288 set_bit(KVM_REQ_MMU_RELOAD
, &vcpu
->requests
);
289 vcpu
->arch
.sie_block
->ecb
= 6;
290 vcpu
->arch
.sie_block
->eca
= 0xC1002001U
;
291 vcpu
->arch
.sie_block
->fac
= (int) (long) facilities
;
292 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
293 tasklet_init(&vcpu
->arch
.tasklet
, kvm_s390_tasklet
,
294 (unsigned long) vcpu
);
295 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
296 get_cpu_id(&vcpu
->arch
.cpu_id
);
297 vcpu
->arch
.cpu_id
.version
= 0xff;
301 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
304 struct kvm_vcpu
*vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
310 vcpu
->arch
.sie_block
= (struct kvm_s390_sie_block
*)
311 get_zeroed_page(GFP_KERNEL
);
313 if (!vcpu
->arch
.sie_block
)
316 vcpu
->arch
.sie_block
->icpua
= id
;
317 BUG_ON(!kvm
->arch
.sca
);
318 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
319 kvm
->arch
.sca
->cpu
[id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
320 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
321 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
322 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
324 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
325 INIT_LIST_HEAD(&vcpu
->arch
.local_int
.list
);
326 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
327 spin_lock(&kvm
->arch
.float_int
.lock
);
328 kvm
->arch
.float_int
.local_int
[id
] = &vcpu
->arch
.local_int
;
329 init_waitqueue_head(&vcpu
->arch
.local_int
.wq
);
330 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
331 spin_unlock(&kvm
->arch
.float_int
.lock
);
333 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
335 goto out_free_sie_block
;
336 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
337 vcpu
->arch
.sie_block
);
341 free_page((unsigned long)(vcpu
->arch
.sie_block
));
348 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
350 /* kvm common code refers to this, but never calls it */
355 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
357 kvm_s390_vcpu_initial_reset(vcpu
);
361 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
363 memcpy(&vcpu
->arch
.guest_gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
367 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
369 memcpy(®s
->gprs
, &vcpu
->arch
.guest_gprs
, sizeof(regs
->gprs
));
373 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
374 struct kvm_sregs
*sregs
)
376 memcpy(&vcpu
->arch
.guest_acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
377 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
381 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
382 struct kvm_sregs
*sregs
)
384 memcpy(&sregs
->acrs
, &vcpu
->arch
.guest_acrs
, sizeof(sregs
->acrs
));
385 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
389 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
391 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
392 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
396 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
398 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
399 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
403 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
407 if (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_RUNNING
)
410 vcpu
->run
->psw_mask
= psw
.mask
;
411 vcpu
->run
->psw_addr
= psw
.addr
;
416 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
417 struct kvm_translation
*tr
)
419 return -EINVAL
; /* not implemented yet */
422 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
423 struct kvm_guest_debug
*dbg
)
425 return -EINVAL
; /* not implemented yet */
428 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
429 struct kvm_mp_state
*mp_state
)
431 return -EINVAL
; /* not implemented yet */
434 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
435 struct kvm_mp_state
*mp_state
)
437 return -EINVAL
; /* not implemented yet */
440 static void __vcpu_run(struct kvm_vcpu
*vcpu
)
442 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->arch
.guest_gprs
[14], 16);
447 if (test_thread_flag(TIF_MCCK_PENDING
))
450 kvm_s390_deliver_pending_interrupts(vcpu
);
452 vcpu
->arch
.sie_block
->icptcode
= 0;
456 VCPU_EVENT(vcpu
, 6, "entering sie flags %x",
457 atomic_read(&vcpu
->arch
.sie_block
->cpuflags
));
458 if (sie64a(vcpu
->arch
.sie_block
, vcpu
->arch
.guest_gprs
)) {
459 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
460 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
462 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
463 vcpu
->arch
.sie_block
->icptcode
);
468 memcpy(&vcpu
->arch
.guest_gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
471 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
478 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD
, &vcpu
->requests
))
479 kvm_s390_vcpu_set_mem(vcpu
);
481 /* verify, that memory has been registered */
482 if (!vcpu
->arch
.sie_block
->gmslm
) {
484 VCPU_EVENT(vcpu
, 3, "%s", "no memory registered to run vcpu");
488 if (vcpu
->sigset_active
)
489 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
491 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
493 BUG_ON(vcpu
->kvm
->arch
.float_int
.local_int
[vcpu
->vcpu_id
] == NULL
);
495 switch (kvm_run
->exit_reason
) {
496 case KVM_EXIT_S390_SIEIC
:
497 case KVM_EXIT_UNKNOWN
:
499 case KVM_EXIT_S390_RESET
:
505 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
506 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
512 rc
= kvm_handle_sie_intercept(vcpu
);
513 } while (!signal_pending(current
) && !rc
);
515 if (rc
== SIE_INTERCEPT_RERUNVCPU
)
518 if (signal_pending(current
) && !rc
) {
519 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
523 if (rc
== -EOPNOTSUPP
) {
524 /* intercept cannot be handled in-kernel, prepare kvm-run */
525 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
526 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
527 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
528 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
532 if (rc
== -EREMOTE
) {
533 /* intercept was handled, but userspace support is needed
534 * kvm_run has been prepared by the handler */
538 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
539 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
541 if (vcpu
->sigset_active
)
542 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
544 vcpu
->stat
.exit_userspace
++;
548 static int __guestcopy(struct kvm_vcpu
*vcpu
, u64 guestdest
, const void *from
,
549 unsigned long n
, int prefix
)
552 return copy_to_guest(vcpu
, guestdest
, from
, n
);
554 return copy_to_guest_absolute(vcpu
, guestdest
, from
, n
);
558 * store status at address
559 * we use have two special cases:
560 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
561 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
563 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
565 const unsigned char archmode
= 1;
568 if (addr
== KVM_S390_STORE_STATUS_NOADDR
) {
569 if (copy_to_guest_absolute(vcpu
, 163ul, &archmode
, 1))
571 addr
= SAVE_AREA_BASE
;
573 } else if (addr
== KVM_S390_STORE_STATUS_PREFIXED
) {
574 if (copy_to_guest(vcpu
, 163ul, &archmode
, 1))
576 addr
= SAVE_AREA_BASE
;
581 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, fp_regs
),
582 vcpu
->arch
.guest_fpregs
.fprs
, 128, prefix
))
585 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, gp_regs
),
586 vcpu
->arch
.guest_gprs
, 128, prefix
))
589 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, psw
),
590 &vcpu
->arch
.sie_block
->gpsw
, 16, prefix
))
593 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, pref_reg
),
594 &vcpu
->arch
.sie_block
->prefix
, 4, prefix
))
597 if (__guestcopy(vcpu
,
598 addr
+ offsetof(struct save_area
, fp_ctrl_reg
),
599 &vcpu
->arch
.guest_fpregs
.fpc
, 4, prefix
))
602 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, tod_reg
),
603 &vcpu
->arch
.sie_block
->todpr
, 4, prefix
))
606 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, timer
),
607 &vcpu
->arch
.sie_block
->cputm
, 8, prefix
))
610 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, clk_cmp
),
611 &vcpu
->arch
.sie_block
->ckc
, 8, prefix
))
614 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, acc_regs
),
615 &vcpu
->arch
.guest_acrs
, 64, prefix
))
618 if (__guestcopy(vcpu
,
619 addr
+ offsetof(struct save_area
, ctrl_regs
),
620 &vcpu
->arch
.sie_block
->gcr
, 128, prefix
))
625 long kvm_arch_vcpu_ioctl(struct file
*filp
,
626 unsigned int ioctl
, unsigned long arg
)
628 struct kvm_vcpu
*vcpu
= filp
->private_data
;
629 void __user
*argp
= (void __user
*)arg
;
633 case KVM_S390_INTERRUPT
: {
634 struct kvm_s390_interrupt s390int
;
637 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
639 r
= kvm_s390_inject_vcpu(vcpu
, &s390int
);
642 case KVM_S390_STORE_STATUS
:
643 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
645 case KVM_S390_SET_INITIAL_PSW
: {
649 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
651 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
654 case KVM_S390_INITIAL_RESET
:
655 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
663 /* Section: memory related */
664 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
665 struct kvm_memory_slot
*memslot
,
666 struct kvm_memory_slot old
,
667 struct kvm_userspace_memory_region
*mem
,
670 /* A few sanity checks. We can have exactly one memory slot which has
671 to start at guest virtual zero and which has to be located at a
672 page boundary in userland and which has to end at a page boundary.
673 The memory in userland is ok to be fragmented into various different
674 vmas. It is okay to mmap() and munmap() stuff in this slot after
675 doing this call at any time */
680 if (mem
->guest_phys_addr
)
683 if (mem
->userspace_addr
& (PAGE_SIZE
- 1))
686 if (mem
->memory_size
& (PAGE_SIZE
- 1))
695 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
696 struct kvm_userspace_memory_region
*mem
,
697 struct kvm_memory_slot old
,
701 struct kvm_vcpu
*vcpu
;
703 /* request update of sie control block for all available vcpus */
704 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
705 if (test_and_set_bit(KVM_REQ_MMU_RELOAD
, &vcpu
->requests
))
707 kvm_s390_inject_sigp_stop(vcpu
, ACTION_RELOADVCPU_ON_STOP
);
711 void kvm_arch_flush_shadow(struct kvm
*kvm
)
715 static int __init
kvm_s390_init(void)
718 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
723 * guests can ask for up to 255+1 double words, we need a full page
724 * to hold the maximum amount of facilities. On the other hand, we
725 * only set facilities that are known to work in KVM.
727 facilities
= (unsigned long long *) get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
732 memcpy(facilities
, S390_lowcore
.stfle_fac_list
, 16);
733 facilities
[0] &= 0xff00fff3f47c0000ULL
;
734 facilities
[1] &= 0x201c000000000000ULL
;
738 static void __exit
kvm_s390_exit(void)
740 free_page((unsigned long) facilities
);
744 module_init(kvm_s390_init
);
745 module_exit(kvm_s390_exit
);