2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
22 #include "trace-s390.h"
24 static int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
26 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
29 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
31 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
32 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
33 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
38 static int __interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
39 struct kvm_s390_interrupt_info
*inti
)
42 case KVM_S390_INT_EXTERNAL_CALL
:
43 if (psw_extint_disabled(vcpu
))
45 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
)
47 case KVM_S390_INT_EMERGENCY
:
48 if (psw_extint_disabled(vcpu
))
50 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
53 case KVM_S390_INT_SERVICE
:
54 if (psw_extint_disabled(vcpu
))
56 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
59 case KVM_S390_INT_VIRTIO
:
60 if (psw_extint_disabled(vcpu
))
62 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
65 case KVM_S390_PROGRAM_INT
:
66 case KVM_S390_SIGP_STOP
:
67 case KVM_S390_SIGP_SET_PREFIX
:
68 case KVM_S390_RESTART
:
76 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
78 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
79 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
80 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
83 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
85 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
86 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
87 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
90 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
92 atomic_clear_mask(CPUSTAT_ECALL_PEND
|
93 CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
94 &vcpu
->arch
.sie_block
->cpuflags
);
95 vcpu
->arch
.sie_block
->lctl
= 0x0000;
98 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
100 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
103 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
104 struct kvm_s390_interrupt_info
*inti
)
106 switch (inti
->type
) {
107 case KVM_S390_INT_EXTERNAL_CALL
:
108 case KVM_S390_INT_EMERGENCY
:
109 case KVM_S390_INT_SERVICE
:
110 case KVM_S390_INT_VIRTIO
:
111 if (psw_extint_disabled(vcpu
))
112 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
114 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
116 case KVM_S390_SIGP_STOP
:
117 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
124 static void __do_deliver_interrupt(struct kvm_vcpu
*vcpu
,
125 struct kvm_s390_interrupt_info
*inti
)
127 const unsigned short table
[] = { 2, 4, 4, 6 };
128 int rc
, exception
= 0;
130 switch (inti
->type
) {
131 case KVM_S390_INT_EMERGENCY
:
132 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
133 vcpu
->stat
.deliver_emergency_signal
++;
134 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
135 inti
->emerg
.code
, 0);
136 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1201);
140 rc
= put_guest_u16(vcpu
, __LC_EXT_CPU_ADDR
, inti
->emerg
.code
);
144 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
145 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
149 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
150 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
155 case KVM_S390_INT_EXTERNAL_CALL
:
156 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp ext call");
157 vcpu
->stat
.deliver_external_call
++;
158 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
159 inti
->extcall
.code
, 0);
160 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1202);
164 rc
= put_guest_u16(vcpu
, __LC_EXT_CPU_ADDR
, inti
->extcall
.code
);
168 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
169 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
173 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
174 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
179 case KVM_S390_INT_SERVICE
:
180 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
181 inti
->ext
.ext_params
);
182 vcpu
->stat
.deliver_service_signal
++;
183 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
184 inti
->ext
.ext_params
, 0);
185 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2401);
189 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
190 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
194 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
195 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
199 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
204 case KVM_S390_INT_VIRTIO
:
205 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%llx",
206 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
207 vcpu
->stat
.deliver_virtio_interrupt
++;
208 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
209 inti
->ext
.ext_params
,
210 inti
->ext
.ext_params2
);
211 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2603);
215 rc
= put_guest_u16(vcpu
, __LC_EXT_CPU_ADDR
, 0x0d00);
219 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
220 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
224 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
225 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
229 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
233 rc
= put_guest_u64(vcpu
, __LC_EXT_PARAMS2
,
234 inti
->ext
.ext_params2
);
239 case KVM_S390_SIGP_STOP
:
240 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
241 vcpu
->stat
.deliver_stop_signal
++;
242 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
244 __set_intercept_indicator(vcpu
, inti
);
247 case KVM_S390_SIGP_SET_PREFIX
:
248 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x",
249 inti
->prefix
.address
);
250 vcpu
->stat
.deliver_prefix_signal
++;
251 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
252 inti
->prefix
.address
, 0);
253 kvm_s390_set_prefix(vcpu
, inti
->prefix
.address
);
256 case KVM_S390_RESTART
:
257 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
258 vcpu
->stat
.deliver_restart_signal
++;
259 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
261 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
,
262 restart_old_psw
), &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
266 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
267 offsetof(struct _lowcore
, restart_psw
), sizeof(psw_t
));
270 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
273 case KVM_S390_PROGRAM_INT
:
274 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
276 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
277 vcpu
->stat
.deliver_program_int
++;
278 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
280 rc
= put_guest_u16(vcpu
, __LC_PGM_INT_CODE
, inti
->pgm
.code
);
284 rc
= put_guest_u16(vcpu
, __LC_PGM_ILC
,
285 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
289 rc
= copy_to_guest(vcpu
, __LC_PGM_OLD_PSW
,
290 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
294 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
295 __LC_PGM_NEW_PSW
, sizeof(psw_t
));
304 printk("kvm: The guest lowcore is not mapped during interrupt "
305 "delivery, killing userspace\n");
310 static int __try_deliver_ckc_interrupt(struct kvm_vcpu
*vcpu
)
312 int rc
, exception
= 0;
314 if (psw_extint_disabled(vcpu
))
316 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
318 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1004);
321 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
322 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
325 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
326 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
330 printk("kvm: The guest lowcore is not mapped during interrupt "
331 "delivery, killing userspace\n");
337 static int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
339 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
340 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
341 struct kvm_s390_interrupt_info
*inti
;
344 if (atomic_read(&li
->active
)) {
345 spin_lock_bh(&li
->lock
);
346 list_for_each_entry(inti
, &li
->list
, list
)
347 if (__interrupt_is_deliverable(vcpu
, inti
)) {
351 spin_unlock_bh(&li
->lock
);
354 if ((!rc
) && atomic_read(&fi
->active
)) {
355 spin_lock(&fi
->lock
);
356 list_for_each_entry(inti
, &fi
->list
, list
)
357 if (__interrupt_is_deliverable(vcpu
, inti
)) {
361 spin_unlock(&fi
->lock
);
364 if ((!rc
) && (vcpu
->arch
.sie_block
->ckc
<
365 get_clock() + vcpu
->arch
.sie_block
->epoch
)) {
366 if ((!psw_extint_disabled(vcpu
)) &&
367 (vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
374 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
379 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
382 DECLARE_WAITQUEUE(wait
, current
);
384 vcpu
->stat
.exit_wait_state
++;
385 if (kvm_cpu_has_interrupt(vcpu
))
388 __set_cpu_idle(vcpu
);
389 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
390 vcpu
->arch
.local_int
.timer_due
= 0;
391 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
393 if (psw_interrupts_disabled(vcpu
)) {
394 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
395 __unset_cpu_idle(vcpu
);
396 return -EOPNOTSUPP
; /* disabled wait */
399 if (psw_extint_disabled(vcpu
) ||
400 (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))) {
401 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
405 now
= get_clock() + vcpu
->arch
.sie_block
->epoch
;
406 if (vcpu
->arch
.sie_block
->ckc
< now
) {
407 __unset_cpu_idle(vcpu
);
411 sltime
= ((vcpu
->arch
.sie_block
->ckc
- now
)*125)>>9;
413 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
414 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
416 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
417 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
418 add_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
419 while (list_empty(&vcpu
->arch
.local_int
.list
) &&
420 list_empty(&vcpu
->arch
.local_int
.float_int
->list
) &&
421 (!vcpu
->arch
.local_int
.timer_due
) &&
422 !signal_pending(current
)) {
423 set_current_state(TASK_INTERRUPTIBLE
);
424 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
425 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
427 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
428 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
430 __unset_cpu_idle(vcpu
);
431 __set_current_state(TASK_RUNNING
);
432 remove_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
433 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
434 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
435 hrtimer_try_to_cancel(&vcpu
->arch
.ckc_timer
);
439 void kvm_s390_tasklet(unsigned long parm
)
441 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*) parm
;
443 spin_lock(&vcpu
->arch
.local_int
.lock
);
444 vcpu
->arch
.local_int
.timer_due
= 1;
445 if (waitqueue_active(&vcpu
->arch
.local_int
.wq
))
446 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
447 spin_unlock(&vcpu
->arch
.local_int
.lock
);
451 * low level hrtimer wake routine. Because this runs in hardirq context
452 * we schedule a tasklet to do the real work.
454 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
456 struct kvm_vcpu
*vcpu
;
458 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
459 tasklet_schedule(&vcpu
->arch
.tasklet
);
461 return HRTIMER_NORESTART
;
464 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
466 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
467 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
468 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
471 __reset_intercept_indicators(vcpu
);
472 if (atomic_read(&li
->active
)) {
475 spin_lock_bh(&li
->lock
);
476 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
477 if (__interrupt_is_deliverable(vcpu
, inti
)) {
478 list_del(&inti
->list
);
482 __set_intercept_indicator(vcpu
, inti
);
484 if (list_empty(&li
->list
))
485 atomic_set(&li
->active
, 0);
486 spin_unlock_bh(&li
->lock
);
488 __do_deliver_interrupt(vcpu
, inti
);
494 if ((vcpu
->arch
.sie_block
->ckc
<
495 get_clock() + vcpu
->arch
.sie_block
->epoch
))
496 __try_deliver_ckc_interrupt(vcpu
);
498 if (atomic_read(&fi
->active
)) {
501 spin_lock(&fi
->lock
);
502 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
503 if (__interrupt_is_deliverable(vcpu
, inti
)) {
504 list_del(&inti
->list
);
508 __set_intercept_indicator(vcpu
, inti
);
510 if (list_empty(&fi
->list
))
511 atomic_set(&fi
->active
, 0);
512 spin_unlock(&fi
->lock
);
514 __do_deliver_interrupt(vcpu
, inti
);
521 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
523 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
524 struct kvm_s390_interrupt_info
*inti
;
526 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
530 inti
->type
= KVM_S390_PROGRAM_INT
;
531 inti
->pgm
.code
= code
;
533 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
534 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, inti
->type
, code
, 0, 1);
535 spin_lock_bh(&li
->lock
);
536 list_add(&inti
->list
, &li
->list
);
537 atomic_set(&li
->active
, 1);
538 BUG_ON(waitqueue_active(&li
->wq
));
539 spin_unlock_bh(&li
->lock
);
543 int kvm_s390_inject_vm(struct kvm
*kvm
,
544 struct kvm_s390_interrupt
*s390int
)
546 struct kvm_s390_local_interrupt
*li
;
547 struct kvm_s390_float_interrupt
*fi
;
548 struct kvm_s390_interrupt_info
*inti
;
551 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
555 switch (s390int
->type
) {
556 case KVM_S390_INT_VIRTIO
:
557 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
558 s390int
->parm
, s390int
->parm64
);
559 inti
->type
= s390int
->type
;
560 inti
->ext
.ext_params
= s390int
->parm
;
561 inti
->ext
.ext_params2
= s390int
->parm64
;
563 case KVM_S390_INT_SERVICE
:
564 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
565 inti
->type
= s390int
->type
;
566 inti
->ext
.ext_params
= s390int
->parm
;
568 case KVM_S390_PROGRAM_INT
:
569 case KVM_S390_SIGP_STOP
:
570 case KVM_S390_INT_EXTERNAL_CALL
:
571 case KVM_S390_INT_EMERGENCY
:
576 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
579 mutex_lock(&kvm
->lock
);
580 fi
= &kvm
->arch
.float_int
;
581 spin_lock(&fi
->lock
);
582 list_add_tail(&inti
->list
, &fi
->list
);
583 atomic_set(&fi
->active
, 1);
584 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
585 if (sigcpu
== KVM_MAX_VCPUS
) {
587 sigcpu
= fi
->next_rr_cpu
++;
588 if (sigcpu
== KVM_MAX_VCPUS
)
589 sigcpu
= fi
->next_rr_cpu
= 0;
590 } while (fi
->local_int
[sigcpu
] == NULL
);
592 li
= fi
->local_int
[sigcpu
];
593 spin_lock_bh(&li
->lock
);
594 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
595 if (waitqueue_active(&li
->wq
))
596 wake_up_interruptible(&li
->wq
);
597 spin_unlock_bh(&li
->lock
);
598 spin_unlock(&fi
->lock
);
599 mutex_unlock(&kvm
->lock
);
603 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
604 struct kvm_s390_interrupt
*s390int
)
606 struct kvm_s390_local_interrupt
*li
;
607 struct kvm_s390_interrupt_info
*inti
;
609 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
613 switch (s390int
->type
) {
614 case KVM_S390_PROGRAM_INT
:
615 if (s390int
->parm
& 0xffff0000) {
619 inti
->type
= s390int
->type
;
620 inti
->pgm
.code
= s390int
->parm
;
621 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
624 case KVM_S390_SIGP_SET_PREFIX
:
625 inti
->prefix
.address
= s390int
->parm
;
626 inti
->type
= s390int
->type
;
627 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
630 case KVM_S390_SIGP_STOP
:
631 case KVM_S390_RESTART
:
632 case KVM_S390_INT_EXTERNAL_CALL
:
633 case KVM_S390_INT_EMERGENCY
:
634 VCPU_EVENT(vcpu
, 3, "inject: type %x", s390int
->type
);
635 inti
->type
= s390int
->type
;
637 case KVM_S390_INT_VIRTIO
:
638 case KVM_S390_INT_SERVICE
:
643 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, s390int
->type
, s390int
->parm
,
646 mutex_lock(&vcpu
->kvm
->lock
);
647 li
= &vcpu
->arch
.local_int
;
648 spin_lock_bh(&li
->lock
);
649 if (inti
->type
== KVM_S390_PROGRAM_INT
)
650 list_add(&inti
->list
, &li
->list
);
652 list_add_tail(&inti
->list
, &li
->list
);
653 atomic_set(&li
->active
, 1);
654 if (inti
->type
== KVM_S390_SIGP_STOP
)
655 li
->action_bits
|= ACTION_STOP_ON_STOP
;
656 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
657 if (waitqueue_active(&li
->wq
))
658 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
659 spin_unlock_bh(&li
->lock
);
660 mutex_unlock(&vcpu
->kvm
->lock
);