2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/lowcore.h>
27 #include <asm/pgtable.h>
29 #include <asm/system.h>
33 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35 struct kvm_stats_debugfs_item debugfs_entries
[] = {
36 { "userspace_handled", VCPU_STAT(exit_userspace
) },
37 { "exit_null", VCPU_STAT(exit_null
) },
38 { "exit_validity", VCPU_STAT(exit_validity
) },
39 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
40 { "exit_external_request", VCPU_STAT(exit_external_request
) },
41 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
42 { "exit_instruction", VCPU_STAT(exit_instruction
) },
43 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
44 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
45 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
46 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
47 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
48 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
49 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
50 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
51 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
52 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
53 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
54 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
55 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
56 { "instruction_spx", VCPU_STAT(instruction_spx
) },
57 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
58 { "instruction_stap", VCPU_STAT(instruction_stap
) },
59 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
60 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
61 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
62 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
63 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
64 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
65 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
66 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
67 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
68 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
69 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
70 { "diagnose_44", VCPU_STAT(diagnose_44
) },
74 static unsigned long long *facilities
;
76 /* Section: not file related */
77 void kvm_arch_hardware_enable(void *garbage
)
79 /* every s390 is virtualization enabled ;-) */
82 void kvm_arch_hardware_disable(void *garbage
)
86 int kvm_arch_hardware_setup(void)
91 void kvm_arch_hardware_unsetup(void)
95 void kvm_arch_check_processor_compat(void *rtn
)
99 int kvm_arch_init(void *opaque
)
104 void kvm_arch_exit(void)
108 /* Section: device related */
109 long kvm_arch_dev_ioctl(struct file
*filp
,
110 unsigned int ioctl
, unsigned long arg
)
112 if (ioctl
== KVM_S390_ENABLE_SIE
)
113 return s390_enable_sie();
117 int kvm_dev_ioctl_check_extension(long ext
)
125 /* Section: vm related */
127 * Get (and clear) the dirty memory log for a memory slot.
129 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
130 struct kvm_dirty_log
*log
)
135 long kvm_arch_vm_ioctl(struct file
*filp
,
136 unsigned int ioctl
, unsigned long arg
)
138 struct kvm
*kvm
= filp
->private_data
;
139 void __user
*argp
= (void __user
*)arg
;
143 case KVM_S390_INTERRUPT
: {
144 struct kvm_s390_interrupt s390int
;
147 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
149 r
= kvm_s390_inject_vm(kvm
, &s390int
);
159 struct kvm
*kvm_arch_create_vm(void)
165 rc
= s390_enable_sie();
170 kvm
= kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
174 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
178 sprintf(debug_name
, "kvm-%u", current
->pid
);
180 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
184 spin_lock_init(&kvm
->arch
.float_int
.lock
);
185 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
187 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
188 VM_EVENT(kvm
, 3, "%s", "vm created");
192 free_page((unsigned long)(kvm
->arch
.sca
));
199 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
201 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
202 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
203 (__u64
) vcpu
->arch
.sie_block
)
204 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
206 free_page((unsigned long)(vcpu
->arch
.sie_block
));
207 kvm_vcpu_uninit(vcpu
);
211 static void kvm_free_vcpus(struct kvm
*kvm
)
214 struct kvm_vcpu
*vcpu
;
216 kvm_for_each_vcpu(i
, vcpu
, kvm
)
217 kvm_arch_vcpu_destroy(vcpu
);
219 mutex_lock(&kvm
->lock
);
220 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
221 kvm
->vcpus
[i
] = NULL
;
223 atomic_set(&kvm
->online_vcpus
, 0);
224 mutex_unlock(&kvm
->lock
);
227 void kvm_arch_sync_events(struct kvm
*kvm
)
231 void kvm_arch_destroy_vm(struct kvm
*kvm
)
234 kvm_free_physmem(kvm
);
235 free_page((unsigned long)(kvm
->arch
.sca
));
236 debug_unregister(kvm
->arch
.dbf
);
240 /* Section: vcpu related */
241 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
246 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
251 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
253 save_fp_regs(&vcpu
->arch
.host_fpregs
);
254 save_access_regs(vcpu
->arch
.host_acrs
);
255 vcpu
->arch
.guest_fpregs
.fpc
&= FPC_VALID_MASK
;
256 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
257 restore_access_regs(vcpu
->arch
.guest_acrs
);
260 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
262 save_fp_regs(&vcpu
->arch
.guest_fpregs
);
263 save_access_regs(vcpu
->arch
.guest_acrs
);
264 restore_fp_regs(&vcpu
->arch
.host_fpregs
);
265 restore_access_regs(vcpu
->arch
.host_acrs
);
268 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
270 /* this equals initial cpu reset in pop, but we don't switch to ESA */
271 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
272 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
273 vcpu
->arch
.sie_block
->prefix
= 0UL;
274 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
275 vcpu
->arch
.sie_block
->cputm
= 0UL;
276 vcpu
->arch
.sie_block
->ckc
= 0UL;
277 vcpu
->arch
.sie_block
->todpr
= 0;
278 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
279 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
280 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
281 vcpu
->arch
.guest_fpregs
.fpc
= 0;
282 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
283 vcpu
->arch
.sie_block
->gbea
= 1;
286 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
288 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
);
289 set_bit(KVM_REQ_MMU_RELOAD
, &vcpu
->requests
);
290 vcpu
->arch
.sie_block
->ecb
= 2;
291 vcpu
->arch
.sie_block
->eca
= 0xC1002001U
;
292 vcpu
->arch
.sie_block
->fac
= (int) (long) facilities
;
293 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
294 tasklet_init(&vcpu
->arch
.tasklet
, kvm_s390_tasklet
,
295 (unsigned long) vcpu
);
296 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
297 get_cpu_id(&vcpu
->arch
.cpu_id
);
298 vcpu
->arch
.cpu_id
.version
= 0xff;
302 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
305 struct kvm_vcpu
*vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
311 vcpu
->arch
.sie_block
= (struct kvm_s390_sie_block
*)
312 get_zeroed_page(GFP_KERNEL
);
314 if (!vcpu
->arch
.sie_block
)
317 vcpu
->arch
.sie_block
->icpua
= id
;
318 BUG_ON(!kvm
->arch
.sca
);
319 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
320 kvm
->arch
.sca
->cpu
[id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
321 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
322 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
324 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
325 INIT_LIST_HEAD(&vcpu
->arch
.local_int
.list
);
326 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
327 spin_lock(&kvm
->arch
.float_int
.lock
);
328 kvm
->arch
.float_int
.local_int
[id
] = &vcpu
->arch
.local_int
;
329 init_waitqueue_head(&vcpu
->arch
.local_int
.wq
);
330 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
331 spin_unlock(&kvm
->arch
.float_int
.lock
);
333 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
336 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
337 vcpu
->arch
.sie_block
);
346 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
348 /* kvm common code refers to this, but never calls it */
353 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
356 kvm_s390_vcpu_initial_reset(vcpu
);
361 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
364 memcpy(&vcpu
->arch
.guest_gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
369 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
372 memcpy(®s
->gprs
, &vcpu
->arch
.guest_gprs
, sizeof(regs
->gprs
));
377 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
378 struct kvm_sregs
*sregs
)
381 memcpy(&vcpu
->arch
.guest_acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
382 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
387 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
388 struct kvm_sregs
*sregs
)
391 memcpy(&sregs
->acrs
, &vcpu
->arch
.guest_acrs
, sizeof(sregs
->acrs
));
392 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
397 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
400 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
401 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
406 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
409 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
410 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
415 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
420 if (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_RUNNING
)
423 vcpu
->arch
.sie_block
->gpsw
= psw
;
428 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
429 struct kvm_translation
*tr
)
431 return -EINVAL
; /* not implemented yet */
434 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
435 struct kvm_guest_debug
*dbg
)
437 return -EINVAL
; /* not implemented yet */
440 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
441 struct kvm_mp_state
*mp_state
)
443 return -EINVAL
; /* not implemented yet */
446 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
447 struct kvm_mp_state
*mp_state
)
449 return -EINVAL
; /* not implemented yet */
452 static void __vcpu_run(struct kvm_vcpu
*vcpu
)
454 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->arch
.guest_gprs
[14], 16);
459 if (test_thread_flag(TIF_MCCK_PENDING
))
462 kvm_s390_deliver_pending_interrupts(vcpu
);
464 vcpu
->arch
.sie_block
->icptcode
= 0;
468 VCPU_EVENT(vcpu
, 6, "entering sie flags %x",
469 atomic_read(&vcpu
->arch
.sie_block
->cpuflags
));
470 if (sie64a(vcpu
->arch
.sie_block
, vcpu
->arch
.guest_gprs
)) {
471 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
472 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
474 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
475 vcpu
->arch
.sie_block
->icptcode
);
480 memcpy(&vcpu
->arch
.guest_gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
483 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
492 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD
, &vcpu
->requests
))
493 kvm_s390_vcpu_set_mem(vcpu
);
495 /* verify, that memory has been registered */
496 if (!vcpu
->arch
.sie_block
->gmslm
) {
498 VCPU_EVENT(vcpu
, 3, "%s", "no memory registered to run vcpu");
502 if (vcpu
->sigset_active
)
503 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
505 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
507 BUG_ON(vcpu
->kvm
->arch
.float_int
.local_int
[vcpu
->vcpu_id
] == NULL
);
509 switch (kvm_run
->exit_reason
) {
510 case KVM_EXIT_S390_SIEIC
:
511 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->s390_sieic
.mask
;
512 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->s390_sieic
.addr
;
514 case KVM_EXIT_UNKNOWN
:
516 case KVM_EXIT_S390_RESET
:
526 rc
= kvm_handle_sie_intercept(vcpu
);
527 } while (!signal_pending(current
) && !rc
);
529 if (rc
== SIE_INTERCEPT_RERUNVCPU
)
532 if (signal_pending(current
) && !rc
) {
533 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
537 if (rc
== -ENOTSUPP
) {
538 /* intercept cannot be handled in-kernel, prepare kvm-run */
539 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
540 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
541 kvm_run
->s390_sieic
.mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
542 kvm_run
->s390_sieic
.addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
543 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
544 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
548 if (rc
== -EREMOTE
) {
549 /* intercept was handled, but userspace support is needed
550 * kvm_run has been prepared by the handler */
554 if (vcpu
->sigset_active
)
555 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
559 vcpu
->stat
.exit_userspace
++;
563 static int __guestcopy(struct kvm_vcpu
*vcpu
, u64 guestdest
, const void *from
,
564 unsigned long n
, int prefix
)
567 return copy_to_guest(vcpu
, guestdest
, from
, n
);
569 return copy_to_guest_absolute(vcpu
, guestdest
, from
, n
);
573 * store status at address
574 * we use have two special cases:
575 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
576 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
578 int __kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
580 const unsigned char archmode
= 1;
583 if (addr
== KVM_S390_STORE_STATUS_NOADDR
) {
584 if (copy_to_guest_absolute(vcpu
, 163ul, &archmode
, 1))
586 addr
= SAVE_AREA_BASE
;
588 } else if (addr
== KVM_S390_STORE_STATUS_PREFIXED
) {
589 if (copy_to_guest(vcpu
, 163ul, &archmode
, 1))
591 addr
= SAVE_AREA_BASE
;
596 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, fp_regs
),
597 vcpu
->arch
.guest_fpregs
.fprs
, 128, prefix
))
600 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, gp_regs
),
601 vcpu
->arch
.guest_gprs
, 128, prefix
))
604 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, psw
),
605 &vcpu
->arch
.sie_block
->gpsw
, 16, prefix
))
608 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, pref_reg
),
609 &vcpu
->arch
.sie_block
->prefix
, 4, prefix
))
612 if (__guestcopy(vcpu
,
613 addr
+ offsetof(struct save_area_s390x
, fp_ctrl_reg
),
614 &vcpu
->arch
.guest_fpregs
.fpc
, 4, prefix
))
617 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, tod_reg
),
618 &vcpu
->arch
.sie_block
->todpr
, 4, prefix
))
621 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, timer
),
622 &vcpu
->arch
.sie_block
->cputm
, 8, prefix
))
625 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, clk_cmp
),
626 &vcpu
->arch
.sie_block
->ckc
, 8, prefix
))
629 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, acc_regs
),
630 &vcpu
->arch
.guest_acrs
, 64, prefix
))
633 if (__guestcopy(vcpu
,
634 addr
+ offsetof(struct save_area_s390x
, ctrl_regs
),
635 &vcpu
->arch
.sie_block
->gcr
, 128, prefix
))
640 static int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
645 rc
= __kvm_s390_vcpu_store_status(vcpu
, addr
);
650 long kvm_arch_vcpu_ioctl(struct file
*filp
,
651 unsigned int ioctl
, unsigned long arg
)
653 struct kvm_vcpu
*vcpu
= filp
->private_data
;
654 void __user
*argp
= (void __user
*)arg
;
657 case KVM_S390_INTERRUPT
: {
658 struct kvm_s390_interrupt s390int
;
660 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
662 return kvm_s390_inject_vcpu(vcpu
, &s390int
);
664 case KVM_S390_STORE_STATUS
:
665 return kvm_s390_vcpu_store_status(vcpu
, arg
);
666 case KVM_S390_SET_INITIAL_PSW
: {
669 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
671 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
673 case KVM_S390_INITIAL_RESET
:
674 return kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
681 /* Section: memory related */
682 int kvm_arch_set_memory_region(struct kvm
*kvm
,
683 struct kvm_userspace_memory_region
*mem
,
684 struct kvm_memory_slot old
,
688 struct kvm_vcpu
*vcpu
;
690 /* A few sanity checks. We can have exactly one memory slot which has
691 to start at guest virtual zero and which has to be located at a
692 page boundary in userland and which has to end at a page boundary.
693 The memory in userland is ok to be fragmented into various different
694 vmas. It is okay to mmap() and munmap() stuff in this slot after
695 doing this call at any time */
700 if (mem
->guest_phys_addr
)
703 if (mem
->userspace_addr
& (PAGE_SIZE
- 1))
706 if (mem
->memory_size
& (PAGE_SIZE
- 1))
712 /* request update of sie control block for all available vcpus */
713 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
714 if (test_and_set_bit(KVM_REQ_MMU_RELOAD
, &vcpu
->requests
))
716 kvm_s390_inject_sigp_stop(vcpu
, ACTION_RELOADVCPU_ON_STOP
);
722 void kvm_arch_flush_shadow(struct kvm
*kvm
)
726 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
731 static int __init
kvm_s390_init(void)
734 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), THIS_MODULE
);
739 * guests can ask for up to 255+1 double words, we need a full page
740 * to hold the maximum amount of facilites. On the other hand, we
741 * only set facilities that are known to work in KVM.
743 facilities
= (unsigned long long *) get_zeroed_page(GFP_DMA
);
748 stfle(facilities
, 1);
749 facilities
[0] &= 0xff00fff3f0700000ULL
;
753 static void __exit
kvm_s390_exit(void)
755 free_page((unsigned long) facilities
);
759 module_init(kvm_s390_init
);
760 module_exit(kvm_s390_exit
);