2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 #include <linux/compiler.h>
16 #include <linux/err.h>
18 #include <linux/hrtimer.h>
19 #include <linux/init.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/timer.h>
25 #include <asm/lowcore.h>
26 #include <asm/pgtable.h>
28 #include <asm/system.h>
32 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
34 struct kvm_stats_debugfs_item debugfs_entries
[] = {
35 { "userspace_handled", VCPU_STAT(exit_userspace
) },
36 { "exit_null", VCPU_STAT(exit_null
) },
37 { "exit_validity", VCPU_STAT(exit_validity
) },
38 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
39 { "exit_external_request", VCPU_STAT(exit_external_request
) },
40 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
41 { "exit_instruction", VCPU_STAT(exit_instruction
) },
42 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
43 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
44 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
45 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
46 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
47 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
48 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
49 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
50 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
51 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
52 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
53 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
54 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
55 { "instruction_spx", VCPU_STAT(instruction_spx
) },
56 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
57 { "instruction_stap", VCPU_STAT(instruction_stap
) },
58 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
59 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
60 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
61 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
62 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
63 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
64 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
65 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
66 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
67 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
68 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
69 { "diagnose_44", VCPU_STAT(diagnose_44
) },
73 static unsigned long long *facilities
;
75 /* Section: not file related */
76 void kvm_arch_hardware_enable(void *garbage
)
78 /* every s390 is virtualization enabled ;-) */
81 void kvm_arch_hardware_disable(void *garbage
)
85 int kvm_arch_hardware_setup(void)
90 void kvm_arch_hardware_unsetup(void)
94 void kvm_arch_check_processor_compat(void *rtn
)
98 int kvm_arch_init(void *opaque
)
103 void kvm_arch_exit(void)
107 /* Section: device related */
108 long kvm_arch_dev_ioctl(struct file
*filp
,
109 unsigned int ioctl
, unsigned long arg
)
111 if (ioctl
== KVM_S390_ENABLE_SIE
)
112 return s390_enable_sie();
116 int kvm_dev_ioctl_check_extension(long ext
)
124 /* Section: vm related */
126 * Get (and clear) the dirty memory log for a memory slot.
128 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
129 struct kvm_dirty_log
*log
)
134 long kvm_arch_vm_ioctl(struct file
*filp
,
135 unsigned int ioctl
, unsigned long arg
)
137 struct kvm
*kvm
= filp
->private_data
;
138 void __user
*argp
= (void __user
*)arg
;
142 case KVM_S390_INTERRUPT
: {
143 struct kvm_s390_interrupt s390int
;
146 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
148 r
= kvm_s390_inject_vm(kvm
, &s390int
);
158 struct kvm
*kvm_arch_create_vm(void)
164 rc
= s390_enable_sie();
169 kvm
= kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
173 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
177 sprintf(debug_name
, "kvm-%u", current
->pid
);
179 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
183 spin_lock_init(&kvm
->arch
.float_int
.lock
);
184 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
186 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
187 VM_EVENT(kvm
, 3, "%s", "vm created");
191 free_page((unsigned long)(kvm
->arch
.sca
));
198 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
200 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
201 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
202 (__u64
) vcpu
->arch
.sie_block
)
203 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
205 free_page((unsigned long)(vcpu
->arch
.sie_block
));
206 kvm_vcpu_uninit(vcpu
);
210 static void kvm_free_vcpus(struct kvm
*kvm
)
214 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
216 kvm_arch_vcpu_destroy(kvm
->vcpus
[i
]);
217 kvm
->vcpus
[i
] = NULL
;
222 void kvm_arch_sync_events(struct kvm
*kvm
)
226 void kvm_arch_destroy_vm(struct kvm
*kvm
)
229 kvm_free_physmem(kvm
);
230 free_page((unsigned long)(kvm
->arch
.sca
));
231 debug_unregister(kvm
->arch
.dbf
);
235 /* Section: vcpu related */
236 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
241 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
246 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
248 save_fp_regs(&vcpu
->arch
.host_fpregs
);
249 save_access_regs(vcpu
->arch
.host_acrs
);
250 vcpu
->arch
.guest_fpregs
.fpc
&= FPC_VALID_MASK
;
251 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
252 restore_access_regs(vcpu
->arch
.guest_acrs
);
255 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
257 save_fp_regs(&vcpu
->arch
.guest_fpregs
);
258 save_access_regs(vcpu
->arch
.guest_acrs
);
259 restore_fp_regs(&vcpu
->arch
.host_fpregs
);
260 restore_access_regs(vcpu
->arch
.host_acrs
);
263 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
265 /* this equals initial cpu reset in pop, but we don't switch to ESA */
266 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
267 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
268 vcpu
->arch
.sie_block
->prefix
= 0UL;
269 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
270 vcpu
->arch
.sie_block
->cputm
= 0UL;
271 vcpu
->arch
.sie_block
->ckc
= 0UL;
272 vcpu
->arch
.sie_block
->todpr
= 0;
273 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
274 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
275 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
276 vcpu
->arch
.guest_fpregs
.fpc
= 0;
277 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
278 vcpu
->arch
.sie_block
->gbea
= 1;
281 /* The current code can have up to 256 pages for virtio */
282 #define VIRTIODESCSPACE (256ul * 4096ul)
284 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
286 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
);
287 vcpu
->arch
.sie_block
->gmslm
= vcpu
->kvm
->arch
.guest_memsize
+
288 vcpu
->kvm
->arch
.guest_origin
+
289 VIRTIODESCSPACE
- 1ul;
290 vcpu
->arch
.sie_block
->gmsor
= vcpu
->kvm
->arch
.guest_origin
;
291 vcpu
->arch
.sie_block
->ecb
= 2;
292 vcpu
->arch
.sie_block
->eca
= 0xC1002001U
;
293 vcpu
->arch
.sie_block
->fac
= (int) (long) facilities
;
294 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
295 tasklet_init(&vcpu
->arch
.tasklet
, kvm_s390_tasklet
,
296 (unsigned long) vcpu
);
297 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
298 get_cpu_id(&vcpu
->arch
.cpu_id
);
299 vcpu
->arch
.cpu_id
.version
= 0xff;
303 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
306 struct kvm_vcpu
*vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
312 vcpu
->arch
.sie_block
= (struct kvm_s390_sie_block
*)
313 get_zeroed_page(GFP_KERNEL
);
315 if (!vcpu
->arch
.sie_block
)
318 vcpu
->arch
.sie_block
->icpua
= id
;
319 BUG_ON(!kvm
->arch
.sca
);
320 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
321 kvm
->arch
.sca
->cpu
[id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
323 BUG_ON(!kvm
->vcpus
[id
]); /* vcpu does already exist */
324 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
325 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
327 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
328 INIT_LIST_HEAD(&vcpu
->arch
.local_int
.list
);
329 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
330 spin_lock(&kvm
->arch
.float_int
.lock
);
331 kvm
->arch
.float_int
.local_int
[id
] = &vcpu
->arch
.local_int
;
332 init_waitqueue_head(&vcpu
->arch
.local_int
.wq
);
333 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
334 spin_unlock(&kvm
->arch
.float_int
.lock
);
336 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
339 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
340 vcpu
->arch
.sie_block
);
349 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
351 /* kvm common code refers to this, but never calls it */
356 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
359 kvm_s390_vcpu_initial_reset(vcpu
);
364 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
367 memcpy(&vcpu
->arch
.guest_gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
372 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
375 memcpy(®s
->gprs
, &vcpu
->arch
.guest_gprs
, sizeof(regs
->gprs
));
380 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
381 struct kvm_sregs
*sregs
)
384 memcpy(&vcpu
->arch
.guest_acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
385 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
390 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
391 struct kvm_sregs
*sregs
)
394 memcpy(&sregs
->acrs
, &vcpu
->arch
.guest_acrs
, sizeof(sregs
->acrs
));
395 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
400 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
403 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
404 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
409 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
412 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
413 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
418 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
423 if (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_RUNNING
)
426 vcpu
->arch
.sie_block
->gpsw
= psw
;
431 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
432 struct kvm_translation
*tr
)
434 return -EINVAL
; /* not implemented yet */
437 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
438 struct kvm_guest_debug
*dbg
)
440 return -EINVAL
; /* not implemented yet */
443 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
444 struct kvm_mp_state
*mp_state
)
446 return -EINVAL
; /* not implemented yet */
449 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
450 struct kvm_mp_state
*mp_state
)
452 return -EINVAL
; /* not implemented yet */
455 static void __vcpu_run(struct kvm_vcpu
*vcpu
)
457 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->arch
.guest_gprs
[14], 16);
462 if (test_thread_flag(TIF_MCCK_PENDING
))
465 kvm_s390_deliver_pending_interrupts(vcpu
);
467 vcpu
->arch
.sie_block
->icptcode
= 0;
471 VCPU_EVENT(vcpu
, 6, "entering sie flags %x",
472 atomic_read(&vcpu
->arch
.sie_block
->cpuflags
));
473 if (sie64a(vcpu
->arch
.sie_block
, vcpu
->arch
.guest_gprs
)) {
474 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
475 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
477 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
478 vcpu
->arch
.sie_block
->icptcode
);
483 memcpy(&vcpu
->arch
.guest_gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
486 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
493 /* verify, that memory has been registered */
494 if (!vcpu
->kvm
->arch
.guest_memsize
) {
499 if (vcpu
->sigset_active
)
500 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
502 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
504 BUG_ON(vcpu
->kvm
->arch
.float_int
.local_int
[vcpu
->vcpu_id
] == NULL
);
506 switch (kvm_run
->exit_reason
) {
507 case KVM_EXIT_S390_SIEIC
:
508 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->s390_sieic
.mask
;
509 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->s390_sieic
.addr
;
511 case KVM_EXIT_UNKNOWN
:
512 case KVM_EXIT_S390_RESET
:
522 rc
= kvm_handle_sie_intercept(vcpu
);
523 } while (!signal_pending(current
) && !rc
);
525 if (signal_pending(current
) && !rc
)
528 if (rc
== -ENOTSUPP
) {
529 /* intercept cannot be handled in-kernel, prepare kvm-run */
530 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
531 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
532 kvm_run
->s390_sieic
.mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
533 kvm_run
->s390_sieic
.addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
534 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
535 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
539 if (rc
== -EREMOTE
) {
540 /* intercept was handled, but userspace support is needed
541 * kvm_run has been prepared by the handler */
545 if (vcpu
->sigset_active
)
546 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
550 vcpu
->stat
.exit_userspace
++;
554 static int __guestcopy(struct kvm_vcpu
*vcpu
, u64 guestdest
, const void *from
,
555 unsigned long n
, int prefix
)
558 return copy_to_guest(vcpu
, guestdest
, from
, n
);
560 return copy_to_guest_absolute(vcpu
, guestdest
, from
, n
);
564 * store status at address
565 * we use have two special cases:
566 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
567 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
569 int __kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
571 const unsigned char archmode
= 1;
574 if (addr
== KVM_S390_STORE_STATUS_NOADDR
) {
575 if (copy_to_guest_absolute(vcpu
, 163ul, &archmode
, 1))
577 addr
= SAVE_AREA_BASE
;
579 } else if (addr
== KVM_S390_STORE_STATUS_PREFIXED
) {
580 if (copy_to_guest(vcpu
, 163ul, &archmode
, 1))
582 addr
= SAVE_AREA_BASE
;
587 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, fp_regs
),
588 vcpu
->arch
.guest_fpregs
.fprs
, 128, prefix
))
591 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, gp_regs
),
592 vcpu
->arch
.guest_gprs
, 128, prefix
))
595 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, psw
),
596 &vcpu
->arch
.sie_block
->gpsw
, 16, prefix
))
599 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, pref_reg
),
600 &vcpu
->arch
.sie_block
->prefix
, 4, prefix
))
603 if (__guestcopy(vcpu
,
604 addr
+ offsetof(struct save_area_s390x
, fp_ctrl_reg
),
605 &vcpu
->arch
.guest_fpregs
.fpc
, 4, prefix
))
608 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, tod_reg
),
609 &vcpu
->arch
.sie_block
->todpr
, 4, prefix
))
612 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, timer
),
613 &vcpu
->arch
.sie_block
->cputm
, 8, prefix
))
616 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, clk_cmp
),
617 &vcpu
->arch
.sie_block
->ckc
, 8, prefix
))
620 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area_s390x
, acc_regs
),
621 &vcpu
->arch
.guest_acrs
, 64, prefix
))
624 if (__guestcopy(vcpu
,
625 addr
+ offsetof(struct save_area_s390x
, ctrl_regs
),
626 &vcpu
->arch
.sie_block
->gcr
, 128, prefix
))
631 static int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
636 rc
= __kvm_s390_vcpu_store_status(vcpu
, addr
);
641 long kvm_arch_vcpu_ioctl(struct file
*filp
,
642 unsigned int ioctl
, unsigned long arg
)
644 struct kvm_vcpu
*vcpu
= filp
->private_data
;
645 void __user
*argp
= (void __user
*)arg
;
648 case KVM_S390_INTERRUPT
: {
649 struct kvm_s390_interrupt s390int
;
651 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
653 return kvm_s390_inject_vcpu(vcpu
, &s390int
);
655 case KVM_S390_STORE_STATUS
:
656 return kvm_s390_vcpu_store_status(vcpu
, arg
);
657 case KVM_S390_SET_INITIAL_PSW
: {
660 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
662 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
664 case KVM_S390_INITIAL_RESET
:
665 return kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
672 /* Section: memory related */
673 int kvm_arch_set_memory_region(struct kvm
*kvm
,
674 struct kvm_userspace_memory_region
*mem
,
675 struct kvm_memory_slot old
,
680 /* A few sanity checks. We can have exactly one memory slot which has
681 to start at guest virtual zero and which has to be located at a
682 page boundary in userland and which has to end at a page boundary.
683 The memory in userland is ok to be fragmented into various different
684 vmas. It is okay to mmap() and munmap() stuff in this slot after
685 doing this call at any time */
687 if (mem
->slot
|| kvm
->arch
.guest_memsize
)
690 if (mem
->guest_phys_addr
)
693 if (mem
->userspace_addr
& (PAGE_SIZE
- 1))
696 if (mem
->memory_size
& (PAGE_SIZE
- 1))
703 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
706 if (!mutex_trylock(&kvm
->vcpus
[i
]->mutex
))
710 kvm
->arch
.guest_origin
= mem
->userspace_addr
;
711 kvm
->arch
.guest_memsize
= mem
->memory_size
;
713 /* update sie control blocks, and unlock all vcpus */
714 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
716 kvm
->vcpus
[i
]->arch
.sie_block
->gmsor
=
717 kvm
->arch
.guest_origin
;
718 kvm
->vcpus
[i
]->arch
.sie_block
->gmslm
=
719 kvm
->arch
.guest_memsize
+
720 kvm
->arch
.guest_origin
+
721 VIRTIODESCSPACE
- 1ul;
722 mutex_unlock(&kvm
->vcpus
[i
]->mutex
);
730 mutex_unlock(&kvm
->vcpus
[i
]->mutex
);
734 void kvm_arch_flush_shadow(struct kvm
*kvm
)
738 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
743 static int __init
kvm_s390_init(void)
746 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), THIS_MODULE
);
751 * guests can ask for up to 255+1 double words, we need a full page
752 * to hold the maximum amount of facilites. On the other hand, we
753 * only set facilities that are known to work in KVM.
755 facilities
= (unsigned long long *) get_zeroed_page(GFP_DMA
);
760 stfle(facilities
, 1);
761 facilities
[0] &= 0xff00fff3f0700000ULL
;
765 static void __exit
kvm_s390_exit(void)
767 free_page((unsigned long) facilities
);
771 module_init(kvm_s390_init
);
772 module_exit(kvm_s390_exit
);