2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
23 static int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
25 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
28 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
30 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
31 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
32 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
37 static int __interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
38 struct kvm_s390_interrupt_info
*inti
)
41 case KVM_S390_INT_EXTERNAL_CALL
:
42 if (psw_extint_disabled(vcpu
))
44 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
)
46 case KVM_S390_INT_EMERGENCY
:
47 if (psw_extint_disabled(vcpu
))
49 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
52 case KVM_S390_INT_SERVICE
:
53 if (psw_extint_disabled(vcpu
))
55 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
58 case KVM_S390_INT_VIRTIO
:
59 if (psw_extint_disabled(vcpu
))
61 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
64 case KVM_S390_PROGRAM_INT
:
65 case KVM_S390_SIGP_STOP
:
66 case KVM_S390_SIGP_SET_PREFIX
:
67 case KVM_S390_RESTART
:
75 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
77 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
78 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
79 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
82 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
84 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
85 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
86 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
89 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
91 atomic_clear_mask(CPUSTAT_ECALL_PEND
|
92 CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
93 &vcpu
->arch
.sie_block
->cpuflags
);
94 vcpu
->arch
.sie_block
->lctl
= 0x0000;
97 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
99 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
102 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
103 struct kvm_s390_interrupt_info
*inti
)
105 switch (inti
->type
) {
106 case KVM_S390_INT_EXTERNAL_CALL
:
107 case KVM_S390_INT_EMERGENCY
:
108 case KVM_S390_INT_SERVICE
:
109 case KVM_S390_INT_VIRTIO
:
110 if (psw_extint_disabled(vcpu
))
111 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
113 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
115 case KVM_S390_SIGP_STOP
:
116 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
123 static void __do_deliver_interrupt(struct kvm_vcpu
*vcpu
,
124 struct kvm_s390_interrupt_info
*inti
)
126 const unsigned short table
[] = { 2, 4, 4, 6 };
127 int rc
, exception
= 0;
129 switch (inti
->type
) {
130 case KVM_S390_INT_EMERGENCY
:
131 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
132 vcpu
->stat
.deliver_emergency_signal
++;
133 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1201);
137 rc
= put_guest_u16(vcpu
, __LC_EXT_CPU_ADDR
, inti
->emerg
.code
);
141 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
142 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
146 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
147 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
152 case KVM_S390_INT_EXTERNAL_CALL
:
153 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp ext call");
154 vcpu
->stat
.deliver_external_call
++;
155 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1202);
159 rc
= put_guest_u16(vcpu
, __LC_EXT_CPU_ADDR
, inti
->extcall
.code
);
163 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
164 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
168 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
169 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
174 case KVM_S390_INT_SERVICE
:
175 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
176 inti
->ext
.ext_params
);
177 vcpu
->stat
.deliver_service_signal
++;
178 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2401);
182 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
183 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
187 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
188 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
192 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
197 case KVM_S390_INT_VIRTIO
:
198 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%llx",
199 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
200 vcpu
->stat
.deliver_virtio_interrupt
++;
201 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2603);
205 rc
= put_guest_u16(vcpu
, __LC_EXT_CPU_ADDR
, 0x0d00);
209 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
210 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
214 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
215 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
219 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
223 rc
= put_guest_u64(vcpu
, __LC_EXT_PARAMS2
,
224 inti
->ext
.ext_params2
);
229 case KVM_S390_SIGP_STOP
:
230 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
231 vcpu
->stat
.deliver_stop_signal
++;
232 __set_intercept_indicator(vcpu
, inti
);
235 case KVM_S390_SIGP_SET_PREFIX
:
236 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x",
237 inti
->prefix
.address
);
238 vcpu
->stat
.deliver_prefix_signal
++;
239 kvm_s390_set_prefix(vcpu
, inti
->prefix
.address
);
242 case KVM_S390_RESTART
:
243 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
244 vcpu
->stat
.deliver_restart_signal
++;
245 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
,
246 restart_old_psw
), &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
250 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
251 offsetof(struct _lowcore
, restart_psw
), sizeof(psw_t
));
254 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
257 case KVM_S390_PROGRAM_INT
:
258 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
260 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
261 vcpu
->stat
.deliver_program_int
++;
262 rc
= put_guest_u16(vcpu
, __LC_PGM_INT_CODE
, inti
->pgm
.code
);
266 rc
= put_guest_u16(vcpu
, __LC_PGM_ILC
,
267 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
271 rc
= copy_to_guest(vcpu
, __LC_PGM_OLD_PSW
,
272 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
276 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
277 __LC_PGM_NEW_PSW
, sizeof(psw_t
));
286 printk("kvm: The guest lowcore is not mapped during interrupt "
287 "delivery, killing userspace\n");
292 static int __try_deliver_ckc_interrupt(struct kvm_vcpu
*vcpu
)
294 int rc
, exception
= 0;
296 if (psw_extint_disabled(vcpu
))
298 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
300 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1004);
303 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
304 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
307 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
308 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
312 printk("kvm: The guest lowcore is not mapped during interrupt "
313 "delivery, killing userspace\n");
319 static int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
321 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
322 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
323 struct kvm_s390_interrupt_info
*inti
;
326 if (atomic_read(&li
->active
)) {
327 spin_lock_bh(&li
->lock
);
328 list_for_each_entry(inti
, &li
->list
, list
)
329 if (__interrupt_is_deliverable(vcpu
, inti
)) {
333 spin_unlock_bh(&li
->lock
);
336 if ((!rc
) && atomic_read(&fi
->active
)) {
337 spin_lock(&fi
->lock
);
338 list_for_each_entry(inti
, &fi
->list
, list
)
339 if (__interrupt_is_deliverable(vcpu
, inti
)) {
343 spin_unlock(&fi
->lock
);
346 if ((!rc
) && (vcpu
->arch
.sie_block
->ckc
<
347 get_clock() + vcpu
->arch
.sie_block
->epoch
)) {
348 if ((!psw_extint_disabled(vcpu
)) &&
349 (vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
356 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
361 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
364 DECLARE_WAITQUEUE(wait
, current
);
366 vcpu
->stat
.exit_wait_state
++;
367 if (kvm_cpu_has_interrupt(vcpu
))
370 __set_cpu_idle(vcpu
);
371 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
372 vcpu
->arch
.local_int
.timer_due
= 0;
373 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
375 if (psw_interrupts_disabled(vcpu
)) {
376 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
377 __unset_cpu_idle(vcpu
);
378 return -EOPNOTSUPP
; /* disabled wait */
381 if (psw_extint_disabled(vcpu
) ||
382 (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))) {
383 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
387 now
= get_clock() + vcpu
->arch
.sie_block
->epoch
;
388 if (vcpu
->arch
.sie_block
->ckc
< now
) {
389 __unset_cpu_idle(vcpu
);
393 sltime
= ((vcpu
->arch
.sie_block
->ckc
- now
)*125)>>9;
395 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
396 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
398 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
399 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
400 add_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
401 while (list_empty(&vcpu
->arch
.local_int
.list
) &&
402 list_empty(&vcpu
->arch
.local_int
.float_int
->list
) &&
403 (!vcpu
->arch
.local_int
.timer_due
) &&
404 !signal_pending(current
)) {
405 set_current_state(TASK_INTERRUPTIBLE
);
406 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
407 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
411 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
412 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
414 __unset_cpu_idle(vcpu
);
415 __set_current_state(TASK_RUNNING
);
416 remove_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
417 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
418 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
419 hrtimer_try_to_cancel(&vcpu
->arch
.ckc_timer
);
423 void kvm_s390_tasklet(unsigned long parm
)
425 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*) parm
;
427 spin_lock(&vcpu
->arch
.local_int
.lock
);
428 vcpu
->arch
.local_int
.timer_due
= 1;
429 if (waitqueue_active(&vcpu
->arch
.local_int
.wq
))
430 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
431 spin_unlock(&vcpu
->arch
.local_int
.lock
);
435 * low level hrtimer wake routine. Because this runs in hardirq context
436 * we schedule a tasklet to do the real work.
438 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
440 struct kvm_vcpu
*vcpu
;
442 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
443 tasklet_schedule(&vcpu
->arch
.tasklet
);
445 return HRTIMER_NORESTART
;
448 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
450 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
451 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
452 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
455 __reset_intercept_indicators(vcpu
);
456 if (atomic_read(&li
->active
)) {
459 spin_lock_bh(&li
->lock
);
460 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
461 if (__interrupt_is_deliverable(vcpu
, inti
)) {
462 list_del(&inti
->list
);
466 __set_intercept_indicator(vcpu
, inti
);
468 if (list_empty(&li
->list
))
469 atomic_set(&li
->active
, 0);
470 spin_unlock_bh(&li
->lock
);
472 __do_deliver_interrupt(vcpu
, inti
);
478 if ((vcpu
->arch
.sie_block
->ckc
<
479 get_clock() + vcpu
->arch
.sie_block
->epoch
))
480 __try_deliver_ckc_interrupt(vcpu
);
482 if (atomic_read(&fi
->active
)) {
485 spin_lock(&fi
->lock
);
486 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
487 if (__interrupt_is_deliverable(vcpu
, inti
)) {
488 list_del(&inti
->list
);
492 __set_intercept_indicator(vcpu
, inti
);
494 if (list_empty(&fi
->list
))
495 atomic_set(&fi
->active
, 0);
496 spin_unlock(&fi
->lock
);
498 __do_deliver_interrupt(vcpu
, inti
);
505 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
507 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
508 struct kvm_s390_interrupt_info
*inti
;
510 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
514 inti
->type
= KVM_S390_PROGRAM_INT
;
515 inti
->pgm
.code
= code
;
517 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
518 spin_lock_bh(&li
->lock
);
519 list_add(&inti
->list
, &li
->list
);
520 atomic_set(&li
->active
, 1);
521 BUG_ON(waitqueue_active(&li
->wq
));
522 spin_unlock_bh(&li
->lock
);
526 int kvm_s390_inject_vm(struct kvm
*kvm
,
527 struct kvm_s390_interrupt
*s390int
)
529 struct kvm_s390_local_interrupt
*li
;
530 struct kvm_s390_float_interrupt
*fi
;
531 struct kvm_s390_interrupt_info
*inti
;
534 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
538 switch (s390int
->type
) {
539 case KVM_S390_INT_VIRTIO
:
540 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
541 s390int
->parm
, s390int
->parm64
);
542 inti
->type
= s390int
->type
;
543 inti
->ext
.ext_params
= s390int
->parm
;
544 inti
->ext
.ext_params2
= s390int
->parm64
;
546 case KVM_S390_INT_SERVICE
:
547 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
548 inti
->type
= s390int
->type
;
549 inti
->ext
.ext_params
= s390int
->parm
;
551 case KVM_S390_PROGRAM_INT
:
552 case KVM_S390_SIGP_STOP
:
553 case KVM_S390_INT_EXTERNAL_CALL
:
554 case KVM_S390_INT_EMERGENCY
:
560 mutex_lock(&kvm
->lock
);
561 fi
= &kvm
->arch
.float_int
;
562 spin_lock(&fi
->lock
);
563 list_add_tail(&inti
->list
, &fi
->list
);
564 atomic_set(&fi
->active
, 1);
565 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
566 if (sigcpu
== KVM_MAX_VCPUS
) {
568 sigcpu
= fi
->next_rr_cpu
++;
569 if (sigcpu
== KVM_MAX_VCPUS
)
570 sigcpu
= fi
->next_rr_cpu
= 0;
571 } while (fi
->local_int
[sigcpu
] == NULL
);
573 li
= fi
->local_int
[sigcpu
];
574 spin_lock_bh(&li
->lock
);
575 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
576 if (waitqueue_active(&li
->wq
))
577 wake_up_interruptible(&li
->wq
);
578 spin_unlock_bh(&li
->lock
);
579 spin_unlock(&fi
->lock
);
580 mutex_unlock(&kvm
->lock
);
584 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
585 struct kvm_s390_interrupt
*s390int
)
587 struct kvm_s390_local_interrupt
*li
;
588 struct kvm_s390_interrupt_info
*inti
;
590 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
594 switch (s390int
->type
) {
595 case KVM_S390_PROGRAM_INT
:
596 if (s390int
->parm
& 0xffff0000) {
600 inti
->type
= s390int
->type
;
601 inti
->pgm
.code
= s390int
->parm
;
602 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
605 case KVM_S390_SIGP_SET_PREFIX
:
606 inti
->prefix
.address
= s390int
->parm
;
607 inti
->type
= s390int
->type
;
608 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
611 case KVM_S390_SIGP_STOP
:
612 case KVM_S390_RESTART
:
613 case KVM_S390_INT_EXTERNAL_CALL
:
614 case KVM_S390_INT_EMERGENCY
:
615 VCPU_EVENT(vcpu
, 3, "inject: type %x", s390int
->type
);
616 inti
->type
= s390int
->type
;
618 case KVM_S390_INT_VIRTIO
:
619 case KVM_S390_INT_SERVICE
:
625 mutex_lock(&vcpu
->kvm
->lock
);
626 li
= &vcpu
->arch
.local_int
;
627 spin_lock_bh(&li
->lock
);
628 if (inti
->type
== KVM_S390_PROGRAM_INT
)
629 list_add(&inti
->list
, &li
->list
);
631 list_add_tail(&inti
->list
, &li
->list
);
632 atomic_set(&li
->active
, 1);
633 if (inti
->type
== KVM_S390_SIGP_STOP
)
634 li
->action_bits
|= ACTION_STOP_ON_STOP
;
635 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
636 if (waitqueue_active(&li
->wq
))
637 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
638 spin_unlock_bh(&li
->lock
);
639 mutex_unlock(&vcpu
->kvm
->lock
);