2 * interrupt.c - handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
23 static int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
25 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
28 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
30 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
31 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
32 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
37 static int __interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
38 struct kvm_s390_interrupt_info
*inti
)
41 case KVM_S390_INT_EXTERNAL_CALL
:
42 if (psw_extint_disabled(vcpu
))
44 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
)
46 case KVM_S390_INT_EMERGENCY
:
47 if (psw_extint_disabled(vcpu
))
49 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
52 case KVM_S390_INT_SERVICE
:
53 if (psw_extint_disabled(vcpu
))
55 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
58 case KVM_S390_INT_VIRTIO
:
59 if (psw_extint_disabled(vcpu
))
61 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
64 case KVM_S390_PROGRAM_INT
:
65 case KVM_S390_SIGP_STOP
:
66 case KVM_S390_SIGP_SET_PREFIX
:
67 case KVM_S390_RESTART
:
75 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
77 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
78 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
79 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
82 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
84 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
85 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
86 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
89 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
91 atomic_clear_mask(CPUSTAT_ECALL_PEND
|
92 CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
93 &vcpu
->arch
.sie_block
->cpuflags
);
94 vcpu
->arch
.sie_block
->lctl
= 0x0000;
97 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
99 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
102 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
103 struct kvm_s390_interrupt_info
*inti
)
105 switch (inti
->type
) {
106 case KVM_S390_INT_EXTERNAL_CALL
:
107 case KVM_S390_INT_EMERGENCY
:
108 case KVM_S390_INT_SERVICE
:
109 case KVM_S390_INT_VIRTIO
:
110 if (psw_extint_disabled(vcpu
))
111 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
113 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
115 case KVM_S390_SIGP_STOP
:
116 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
123 static void __do_deliver_interrupt(struct kvm_vcpu
*vcpu
,
124 struct kvm_s390_interrupt_info
*inti
)
126 const unsigned short table
[] = { 2, 4, 4, 6 };
127 int rc
, exception
= 0;
129 switch (inti
->type
) {
130 case KVM_S390_INT_EMERGENCY
:
131 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
132 vcpu
->stat
.deliver_emergency_signal
++;
133 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1201);
137 rc
= put_guest_u16(vcpu
, __LC_CPU_ADDRESS
, inti
->emerg
.code
);
141 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
142 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
146 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
147 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
152 case KVM_S390_INT_EXTERNAL_CALL
:
153 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp ext call");
154 vcpu
->stat
.deliver_external_call
++;
155 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1202);
159 rc
= put_guest_u16(vcpu
, __LC_CPU_ADDRESS
, inti
->extcall
.code
);
163 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
164 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
168 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
169 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
174 case KVM_S390_INT_SERVICE
:
175 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
176 inti
->ext
.ext_params
);
177 vcpu
->stat
.deliver_service_signal
++;
178 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2401);
182 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
183 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
187 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
188 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
192 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
197 case KVM_S390_INT_VIRTIO
:
198 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%llx",
199 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
200 vcpu
->stat
.deliver_virtio_interrupt
++;
201 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2603);
205 rc
= put_guest_u16(vcpu
, __LC_CPU_ADDRESS
, 0x0d00);
209 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
210 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
214 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
215 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
219 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
223 rc
= put_guest_u64(vcpu
, __LC_EXT_PARAMS2
,
224 inti
->ext
.ext_params2
);
229 case KVM_S390_SIGP_STOP
:
230 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
231 vcpu
->stat
.deliver_stop_signal
++;
232 __set_intercept_indicator(vcpu
, inti
);
235 case KVM_S390_SIGP_SET_PREFIX
:
236 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x",
237 inti
->prefix
.address
);
238 vcpu
->stat
.deliver_prefix_signal
++;
239 vcpu
->arch
.sie_block
->prefix
= inti
->prefix
.address
;
240 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
243 case KVM_S390_RESTART
:
244 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
245 vcpu
->stat
.deliver_restart_signal
++;
246 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
,
247 restart_old_psw
), &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
251 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
252 offsetof(struct _lowcore
, restart_psw
), sizeof(psw_t
));
255 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
258 case KVM_S390_PROGRAM_INT
:
259 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
261 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
262 vcpu
->stat
.deliver_program_int
++;
263 rc
= put_guest_u16(vcpu
, __LC_PGM_INT_CODE
, inti
->pgm
.code
);
267 rc
= put_guest_u16(vcpu
, __LC_PGM_ILC
,
268 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
272 rc
= copy_to_guest(vcpu
, __LC_PGM_OLD_PSW
,
273 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
277 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
278 __LC_PGM_NEW_PSW
, sizeof(psw_t
));
287 printk("kvm: The guest lowcore is not mapped during interrupt "
288 "delivery, killing userspace\n");
293 static int __try_deliver_ckc_interrupt(struct kvm_vcpu
*vcpu
)
295 int rc
, exception
= 0;
297 if (psw_extint_disabled(vcpu
))
299 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
301 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1004);
304 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
305 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
308 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
309 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
313 printk("kvm: The guest lowcore is not mapped during interrupt "
314 "delivery, killing userspace\n");
320 static int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
322 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
323 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
324 struct kvm_s390_interrupt_info
*inti
;
327 if (atomic_read(&li
->active
)) {
328 spin_lock_bh(&li
->lock
);
329 list_for_each_entry(inti
, &li
->list
, list
)
330 if (__interrupt_is_deliverable(vcpu
, inti
)) {
334 spin_unlock_bh(&li
->lock
);
337 if ((!rc
) && atomic_read(&fi
->active
)) {
338 spin_lock(&fi
->lock
);
339 list_for_each_entry(inti
, &fi
->list
, list
)
340 if (__interrupt_is_deliverable(vcpu
, inti
)) {
344 spin_unlock(&fi
->lock
);
347 if ((!rc
) && (vcpu
->arch
.sie_block
->ckc
<
348 get_clock() + vcpu
->arch
.sie_block
->epoch
)) {
349 if ((!psw_extint_disabled(vcpu
)) &&
350 (vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
357 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
362 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
365 DECLARE_WAITQUEUE(wait
, current
);
367 vcpu
->stat
.exit_wait_state
++;
368 if (kvm_cpu_has_interrupt(vcpu
))
371 __set_cpu_idle(vcpu
);
372 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
373 vcpu
->arch
.local_int
.timer_due
= 0;
374 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
376 if (psw_interrupts_disabled(vcpu
)) {
377 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
378 __unset_cpu_idle(vcpu
);
379 return -EOPNOTSUPP
; /* disabled wait */
382 if (psw_extint_disabled(vcpu
) ||
383 (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))) {
384 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
388 now
= get_clock() + vcpu
->arch
.sie_block
->epoch
;
389 if (vcpu
->arch
.sie_block
->ckc
< now
) {
390 __unset_cpu_idle(vcpu
);
394 sltime
= ((vcpu
->arch
.sie_block
->ckc
- now
)*125)>>9;
396 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
397 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
399 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
400 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
401 add_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
402 while (list_empty(&vcpu
->arch
.local_int
.list
) &&
403 list_empty(&vcpu
->arch
.local_int
.float_int
->list
) &&
404 (!vcpu
->arch
.local_int
.timer_due
) &&
405 !signal_pending(current
)) {
406 set_current_state(TASK_INTERRUPTIBLE
);
407 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
408 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
412 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
413 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
415 __unset_cpu_idle(vcpu
);
416 __set_current_state(TASK_RUNNING
);
417 remove_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
418 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
419 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
420 hrtimer_try_to_cancel(&vcpu
->arch
.ckc_timer
);
424 void kvm_s390_tasklet(unsigned long parm
)
426 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*) parm
;
428 spin_lock(&vcpu
->arch
.local_int
.lock
);
429 vcpu
->arch
.local_int
.timer_due
= 1;
430 if (waitqueue_active(&vcpu
->arch
.local_int
.wq
))
431 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
432 spin_unlock(&vcpu
->arch
.local_int
.lock
);
436 * low level hrtimer wake routine. Because this runs in hardirq context
437 * we schedule a tasklet to do the real work.
439 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
441 struct kvm_vcpu
*vcpu
;
443 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
444 tasklet_schedule(&vcpu
->arch
.tasklet
);
446 return HRTIMER_NORESTART
;
449 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
451 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
452 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
453 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
456 __reset_intercept_indicators(vcpu
);
457 if (atomic_read(&li
->active
)) {
460 spin_lock_bh(&li
->lock
);
461 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
462 if (__interrupt_is_deliverable(vcpu
, inti
)) {
463 list_del(&inti
->list
);
467 __set_intercept_indicator(vcpu
, inti
);
469 if (list_empty(&li
->list
))
470 atomic_set(&li
->active
, 0);
471 spin_unlock_bh(&li
->lock
);
473 __do_deliver_interrupt(vcpu
, inti
);
479 if ((vcpu
->arch
.sie_block
->ckc
<
480 get_clock() + vcpu
->arch
.sie_block
->epoch
))
481 __try_deliver_ckc_interrupt(vcpu
);
483 if (atomic_read(&fi
->active
)) {
486 spin_lock(&fi
->lock
);
487 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
488 if (__interrupt_is_deliverable(vcpu
, inti
)) {
489 list_del(&inti
->list
);
493 __set_intercept_indicator(vcpu
, inti
);
495 if (list_empty(&fi
->list
))
496 atomic_set(&fi
->active
, 0);
497 spin_unlock(&fi
->lock
);
499 __do_deliver_interrupt(vcpu
, inti
);
506 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
508 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
509 struct kvm_s390_interrupt_info
*inti
;
511 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
515 inti
->type
= KVM_S390_PROGRAM_INT
;
516 inti
->pgm
.code
= code
;
518 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
519 spin_lock_bh(&li
->lock
);
520 list_add(&inti
->list
, &li
->list
);
521 atomic_set(&li
->active
, 1);
522 BUG_ON(waitqueue_active(&li
->wq
));
523 spin_unlock_bh(&li
->lock
);
527 int kvm_s390_inject_vm(struct kvm
*kvm
,
528 struct kvm_s390_interrupt
*s390int
)
530 struct kvm_s390_local_interrupt
*li
;
531 struct kvm_s390_float_interrupt
*fi
;
532 struct kvm_s390_interrupt_info
*inti
;
535 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
539 switch (s390int
->type
) {
540 case KVM_S390_INT_VIRTIO
:
541 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
542 s390int
->parm
, s390int
->parm64
);
543 inti
->type
= s390int
->type
;
544 inti
->ext
.ext_params
= s390int
->parm
;
545 inti
->ext
.ext_params2
= s390int
->parm64
;
547 case KVM_S390_INT_SERVICE
:
548 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
549 inti
->type
= s390int
->type
;
550 inti
->ext
.ext_params
= s390int
->parm
;
552 case KVM_S390_PROGRAM_INT
:
553 case KVM_S390_SIGP_STOP
:
554 case KVM_S390_INT_EXTERNAL_CALL
:
555 case KVM_S390_INT_EMERGENCY
:
561 mutex_lock(&kvm
->lock
);
562 fi
= &kvm
->arch
.float_int
;
563 spin_lock(&fi
->lock
);
564 list_add_tail(&inti
->list
, &fi
->list
);
565 atomic_set(&fi
->active
, 1);
566 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
567 if (sigcpu
== KVM_MAX_VCPUS
) {
569 sigcpu
= fi
->next_rr_cpu
++;
570 if (sigcpu
== KVM_MAX_VCPUS
)
571 sigcpu
= fi
->next_rr_cpu
= 0;
572 } while (fi
->local_int
[sigcpu
] == NULL
);
574 li
= fi
->local_int
[sigcpu
];
575 spin_lock_bh(&li
->lock
);
576 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
577 if (waitqueue_active(&li
->wq
))
578 wake_up_interruptible(&li
->wq
);
579 spin_unlock_bh(&li
->lock
);
580 spin_unlock(&fi
->lock
);
581 mutex_unlock(&kvm
->lock
);
585 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
586 struct kvm_s390_interrupt
*s390int
)
588 struct kvm_s390_local_interrupt
*li
;
589 struct kvm_s390_interrupt_info
*inti
;
591 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
595 switch (s390int
->type
) {
596 case KVM_S390_PROGRAM_INT
:
597 if (s390int
->parm
& 0xffff0000) {
601 inti
->type
= s390int
->type
;
602 inti
->pgm
.code
= s390int
->parm
;
603 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
606 case KVM_S390_SIGP_SET_PREFIX
:
607 inti
->prefix
.address
= s390int
->parm
;
608 inti
->type
= s390int
->type
;
609 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
612 case KVM_S390_SIGP_STOP
:
613 case KVM_S390_RESTART
:
614 case KVM_S390_INT_EXTERNAL_CALL
:
615 case KVM_S390_INT_EMERGENCY
:
616 VCPU_EVENT(vcpu
, 3, "inject: type %x", s390int
->type
);
617 inti
->type
= s390int
->type
;
619 case KVM_S390_INT_VIRTIO
:
620 case KVM_S390_INT_SERVICE
:
626 mutex_lock(&vcpu
->kvm
->lock
);
627 li
= &vcpu
->arch
.local_int
;
628 spin_lock_bh(&li
->lock
);
629 if (inti
->type
== KVM_S390_PROGRAM_INT
)
630 list_add(&inti
->list
, &li
->list
);
632 list_add_tail(&inti
->list
, &li
->list
);
633 atomic_set(&li
->active
, 1);
634 if (inti
->type
== KVM_S390_SIGP_STOP
)
635 li
->action_bits
|= ACTION_STOP_ON_STOP
;
636 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
637 if (waitqueue_active(&li
->wq
))
638 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
639 spin_unlock_bh(&li
->lock
);
640 mutex_unlock(&vcpu
->kvm
->lock
);