2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
22 #include "trace-s390.h"
24 #define IOINT_SCHID_MASK 0x0000ffff
25 #define IOINT_SSID_MASK 0x00030000
26 #define IOINT_CSSID_MASK 0x03fc0000
27 #define IOINT_AI_MASK 0x04000000
29 static int is_ioint(u64 type
)
31 return ((type
& 0xfffe0000u
) != 0xfffe0000u
);
34 static int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
36 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
39 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
41 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
44 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
46 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
49 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
51 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
52 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
53 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
58 static u64
int_word_to_isc_bits(u32 int_word
)
60 u8 isc
= (int_word
& 0x38000000) >> 27;
62 return (0x80 >> isc
) << 24;
65 static int __interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
66 struct kvm_s390_interrupt_info
*inti
)
69 case KVM_S390_INT_EXTERNAL_CALL
:
70 if (psw_extint_disabled(vcpu
))
72 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
)
75 case KVM_S390_INT_EMERGENCY
:
76 if (psw_extint_disabled(vcpu
))
78 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
81 case KVM_S390_INT_SERVICE
:
82 if (psw_extint_disabled(vcpu
))
84 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
87 case KVM_S390_INT_VIRTIO
:
88 if (psw_extint_disabled(vcpu
))
90 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
93 case KVM_S390_PROGRAM_INT
:
94 case KVM_S390_SIGP_STOP
:
95 case KVM_S390_SIGP_SET_PREFIX
:
96 case KVM_S390_RESTART
:
99 if (psw_mchk_disabled(vcpu
))
101 if (vcpu
->arch
.sie_block
->gcr
[14] & inti
->mchk
.cr14
)
104 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
105 if (psw_ioint_disabled(vcpu
))
107 if (vcpu
->arch
.sie_block
->gcr
[6] &
108 int_word_to_isc_bits(inti
->io
.io_int_word
))
112 printk(KERN_WARNING
"illegal interrupt type %llx\n",
119 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
121 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
122 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
123 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
126 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
128 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
129 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
130 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
133 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
135 atomic_clear_mask(CPUSTAT_ECALL_PEND
|
136 CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
137 &vcpu
->arch
.sie_block
->cpuflags
);
138 vcpu
->arch
.sie_block
->lctl
= 0x0000;
139 vcpu
->arch
.sie_block
->ictl
&= ~ICTL_LPSW
;
142 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
144 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
147 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
148 struct kvm_s390_interrupt_info
*inti
)
150 switch (inti
->type
) {
151 case KVM_S390_INT_EXTERNAL_CALL
:
152 case KVM_S390_INT_EMERGENCY
:
153 case KVM_S390_INT_SERVICE
:
154 case KVM_S390_INT_VIRTIO
:
155 if (psw_extint_disabled(vcpu
))
156 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
158 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
160 case KVM_S390_SIGP_STOP
:
161 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
164 if (psw_mchk_disabled(vcpu
))
165 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
167 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
169 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
170 if (psw_ioint_disabled(vcpu
))
171 __set_cpuflag(vcpu
, CPUSTAT_IO_INT
);
173 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
180 static void __do_deliver_interrupt(struct kvm_vcpu
*vcpu
,
181 struct kvm_s390_interrupt_info
*inti
)
183 const unsigned short table
[] = { 2, 4, 4, 6 };
186 switch (inti
->type
) {
187 case KVM_S390_INT_EMERGENCY
:
188 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
189 vcpu
->stat
.deliver_emergency_signal
++;
190 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
191 inti
->emerg
.code
, 0);
192 rc
= put_guest(vcpu
, 0x1201, (u16 __user
*)__LC_EXT_INT_CODE
);
193 rc
|= put_guest(vcpu
, inti
->emerg
.code
,
194 (u16 __user
*)__LC_EXT_CPU_ADDR
);
195 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
196 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
197 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
198 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
200 case KVM_S390_INT_EXTERNAL_CALL
:
201 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp ext call");
202 vcpu
->stat
.deliver_external_call
++;
203 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
204 inti
->extcall
.code
, 0);
205 rc
= put_guest(vcpu
, 0x1202, (u16 __user
*)__LC_EXT_INT_CODE
);
206 rc
|= put_guest(vcpu
, inti
->extcall
.code
,
207 (u16 __user
*)__LC_EXT_CPU_ADDR
);
208 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
209 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
210 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
211 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
213 case KVM_S390_INT_SERVICE
:
214 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
215 inti
->ext
.ext_params
);
216 vcpu
->stat
.deliver_service_signal
++;
217 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
218 inti
->ext
.ext_params
, 0);
219 rc
= put_guest(vcpu
, 0x2401, (u16 __user
*)__LC_EXT_INT_CODE
);
220 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
221 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
222 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
223 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
224 rc
|= put_guest(vcpu
, inti
->ext
.ext_params
,
225 (u32 __user
*)__LC_EXT_PARAMS
);
227 case KVM_S390_INT_VIRTIO
:
228 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%llx",
229 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
230 vcpu
->stat
.deliver_virtio_interrupt
++;
231 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
232 inti
->ext
.ext_params
,
233 inti
->ext
.ext_params2
);
234 rc
= put_guest(vcpu
, 0x2603, (u16 __user
*)__LC_EXT_INT_CODE
);
235 rc
|= put_guest(vcpu
, 0x0d00, (u16 __user
*)__LC_EXT_CPU_ADDR
);
236 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
237 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
238 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
239 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
240 rc
|= put_guest(vcpu
, inti
->ext
.ext_params
,
241 (u32 __user
*)__LC_EXT_PARAMS
);
242 rc
|= put_guest(vcpu
, inti
->ext
.ext_params2
,
243 (u64 __user
*)__LC_EXT_PARAMS2
);
245 case KVM_S390_SIGP_STOP
:
246 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
247 vcpu
->stat
.deliver_stop_signal
++;
248 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
250 __set_intercept_indicator(vcpu
, inti
);
253 case KVM_S390_SIGP_SET_PREFIX
:
254 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x",
255 inti
->prefix
.address
);
256 vcpu
->stat
.deliver_prefix_signal
++;
257 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
258 inti
->prefix
.address
, 0);
259 kvm_s390_set_prefix(vcpu
, inti
->prefix
.address
);
262 case KVM_S390_RESTART
:
263 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
264 vcpu
->stat
.deliver_restart_signal
++;
265 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
267 rc
= copy_to_guest(vcpu
,
268 offsetof(struct _lowcore
, restart_old_psw
),
269 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
270 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
271 offsetof(struct _lowcore
, restart_psw
),
273 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
275 case KVM_S390_PROGRAM_INT
:
276 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
278 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
279 vcpu
->stat
.deliver_program_int
++;
280 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
282 rc
= put_guest(vcpu
, inti
->pgm
.code
, (u16 __user
*)__LC_PGM_INT_CODE
);
283 rc
|= put_guest(vcpu
, table
[vcpu
->arch
.sie_block
->ipa
>> 14],
284 (u16 __user
*)__LC_PGM_ILC
);
285 rc
|= copy_to_guest(vcpu
, __LC_PGM_OLD_PSW
,
286 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
287 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
288 __LC_PGM_NEW_PSW
, sizeof(psw_t
));
292 VCPU_EVENT(vcpu
, 4, "interrupt: machine check mcic=%llx",
294 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
297 rc
= kvm_s390_vcpu_store_status(vcpu
,
298 KVM_S390_STORE_STATUS_PREFIXED
);
299 rc
|= put_guest(vcpu
, inti
->mchk
.mcic
, (u64 __user
*) __LC_MCCK_CODE
);
300 rc
|= copy_to_guest(vcpu
, __LC_MCK_OLD_PSW
,
301 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
302 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
303 __LC_MCK_NEW_PSW
, sizeof(psw_t
));
306 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
308 __u32 param0
= ((__u32
)inti
->io
.subchannel_id
<< 16) |
309 inti
->io
.subchannel_nr
;
310 __u64 param1
= ((__u64
)inti
->io
.io_int_parm
<< 32) |
311 inti
->io
.io_int_word
;
312 VCPU_EVENT(vcpu
, 4, "interrupt: I/O %llx", inti
->type
);
313 vcpu
->stat
.deliver_io_int
++;
314 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
316 rc
= put_guest(vcpu
, inti
->io
.subchannel_id
,
317 (u16 __user
*) __LC_SUBCHANNEL_ID
);
318 rc
|= put_guest(vcpu
, inti
->io
.subchannel_nr
,
319 (u16 __user
*) __LC_SUBCHANNEL_NR
);
320 rc
|= put_guest(vcpu
, inti
->io
.io_int_parm
,
321 (u32 __user
*) __LC_IO_INT_PARM
);
322 rc
|= put_guest(vcpu
, inti
->io
.io_int_word
,
323 (u32 __user
*) __LC_IO_INT_WORD
);
324 rc
|= copy_to_guest(vcpu
, __LC_IO_OLD_PSW
,
325 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
326 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
327 __LC_IO_NEW_PSW
, sizeof(psw_t
));
334 printk("kvm: The guest lowcore is not mapped during interrupt "
335 "delivery, killing userspace\n");
340 static int __try_deliver_ckc_interrupt(struct kvm_vcpu
*vcpu
)
344 if (psw_extint_disabled(vcpu
))
346 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
348 rc
= put_guest(vcpu
, 0x1004, (u16 __user
*)__LC_EXT_INT_CODE
);
349 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
350 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
351 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
352 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
354 printk("kvm: The guest lowcore is not mapped during interrupt "
355 "delivery, killing userspace\n");
361 static int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
363 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
364 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
365 struct kvm_s390_interrupt_info
*inti
;
368 if (atomic_read(&li
->active
)) {
369 spin_lock_bh(&li
->lock
);
370 list_for_each_entry(inti
, &li
->list
, list
)
371 if (__interrupt_is_deliverable(vcpu
, inti
)) {
375 spin_unlock_bh(&li
->lock
);
378 if ((!rc
) && atomic_read(&fi
->active
)) {
379 spin_lock(&fi
->lock
);
380 list_for_each_entry(inti
, &fi
->list
, list
)
381 if (__interrupt_is_deliverable(vcpu
, inti
)) {
385 spin_unlock(&fi
->lock
);
388 if ((!rc
) && (vcpu
->arch
.sie_block
->ckc
<
389 get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
)) {
390 if ((!psw_extint_disabled(vcpu
)) &&
391 (vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
398 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
403 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
406 DECLARE_WAITQUEUE(wait
, current
);
408 vcpu
->stat
.exit_wait_state
++;
409 if (kvm_cpu_has_interrupt(vcpu
))
412 __set_cpu_idle(vcpu
);
413 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
414 vcpu
->arch
.local_int
.timer_due
= 0;
415 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
417 if (psw_interrupts_disabled(vcpu
)) {
418 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
419 __unset_cpu_idle(vcpu
);
420 return -EOPNOTSUPP
; /* disabled wait */
423 if (psw_extint_disabled(vcpu
) ||
424 (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))) {
425 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
429 now
= get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
;
430 if (vcpu
->arch
.sie_block
->ckc
< now
) {
431 __unset_cpu_idle(vcpu
);
435 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
437 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
438 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
440 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
441 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
442 add_wait_queue(&vcpu
->wq
, &wait
);
443 while (list_empty(&vcpu
->arch
.local_int
.list
) &&
444 list_empty(&vcpu
->arch
.local_int
.float_int
->list
) &&
445 (!vcpu
->arch
.local_int
.timer_due
) &&
446 !signal_pending(current
)) {
447 set_current_state(TASK_INTERRUPTIBLE
);
448 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
449 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
451 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
452 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
454 __unset_cpu_idle(vcpu
);
455 __set_current_state(TASK_RUNNING
);
456 remove_wait_queue(&vcpu
->wq
, &wait
);
457 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
458 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
459 hrtimer_try_to_cancel(&vcpu
->arch
.ckc_timer
);
463 void kvm_s390_tasklet(unsigned long parm
)
465 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*) parm
;
467 spin_lock(&vcpu
->arch
.local_int
.lock
);
468 vcpu
->arch
.local_int
.timer_due
= 1;
469 if (waitqueue_active(&vcpu
->wq
))
470 wake_up_interruptible(&vcpu
->wq
);
471 spin_unlock(&vcpu
->arch
.local_int
.lock
);
475 * low level hrtimer wake routine. Because this runs in hardirq context
476 * we schedule a tasklet to do the real work.
478 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
480 struct kvm_vcpu
*vcpu
;
482 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
483 tasklet_schedule(&vcpu
->arch
.tasklet
);
485 return HRTIMER_NORESTART
;
488 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
490 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
491 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
492 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
495 __reset_intercept_indicators(vcpu
);
496 if (atomic_read(&li
->active
)) {
499 spin_lock_bh(&li
->lock
);
500 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
501 if (__interrupt_is_deliverable(vcpu
, inti
)) {
502 list_del(&inti
->list
);
506 __set_intercept_indicator(vcpu
, inti
);
508 if (list_empty(&li
->list
))
509 atomic_set(&li
->active
, 0);
510 spin_unlock_bh(&li
->lock
);
512 __do_deliver_interrupt(vcpu
, inti
);
518 if ((vcpu
->arch
.sie_block
->ckc
<
519 get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
))
520 __try_deliver_ckc_interrupt(vcpu
);
522 if (atomic_read(&fi
->active
)) {
525 spin_lock(&fi
->lock
);
526 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
527 if (__interrupt_is_deliverable(vcpu
, inti
)) {
528 list_del(&inti
->list
);
532 __set_intercept_indicator(vcpu
, inti
);
534 if (list_empty(&fi
->list
))
535 atomic_set(&fi
->active
, 0);
536 spin_unlock(&fi
->lock
);
538 __do_deliver_interrupt(vcpu
, inti
);
545 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu
*vcpu
)
547 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
548 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
549 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
552 __reset_intercept_indicators(vcpu
);
553 if (atomic_read(&li
->active
)) {
556 spin_lock_bh(&li
->lock
);
557 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
558 if ((inti
->type
== KVM_S390_MCHK
) &&
559 __interrupt_is_deliverable(vcpu
, inti
)) {
560 list_del(&inti
->list
);
564 __set_intercept_indicator(vcpu
, inti
);
566 if (list_empty(&li
->list
))
567 atomic_set(&li
->active
, 0);
568 spin_unlock_bh(&li
->lock
);
570 __do_deliver_interrupt(vcpu
, inti
);
576 if (atomic_read(&fi
->active
)) {
579 spin_lock(&fi
->lock
);
580 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
581 if ((inti
->type
== KVM_S390_MCHK
) &&
582 __interrupt_is_deliverable(vcpu
, inti
)) {
583 list_del(&inti
->list
);
587 __set_intercept_indicator(vcpu
, inti
);
589 if (list_empty(&fi
->list
))
590 atomic_set(&fi
->active
, 0);
591 spin_unlock(&fi
->lock
);
593 __do_deliver_interrupt(vcpu
, inti
);
600 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
602 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
603 struct kvm_s390_interrupt_info
*inti
;
605 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
609 inti
->type
= KVM_S390_PROGRAM_INT
;
610 inti
->pgm
.code
= code
;
612 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
613 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, inti
->type
, code
, 0, 1);
614 spin_lock_bh(&li
->lock
);
615 list_add(&inti
->list
, &li
->list
);
616 atomic_set(&li
->active
, 1);
617 BUG_ON(waitqueue_active(li
->wq
));
618 spin_unlock_bh(&li
->lock
);
622 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
625 struct kvm_s390_float_interrupt
*fi
;
626 struct kvm_s390_interrupt_info
*inti
, *iter
;
628 if ((!schid
&& !cr6
) || (schid
&& cr6
))
630 mutex_lock(&kvm
->lock
);
631 fi
= &kvm
->arch
.float_int
;
632 spin_lock(&fi
->lock
);
634 list_for_each_entry(iter
, &fi
->list
, list
) {
635 if (!is_ioint(iter
->type
))
638 ((cr6
& int_word_to_isc_bits(iter
->io
.io_int_word
)) == 0))
641 if (((schid
& 0x00000000ffff0000) >> 16) !=
642 iter
->io
.subchannel_id
)
644 if ((schid
& 0x000000000000ffff) !=
645 iter
->io
.subchannel_nr
)
652 list_del_init(&inti
->list
);
653 if (list_empty(&fi
->list
))
654 atomic_set(&fi
->active
, 0);
655 spin_unlock(&fi
->lock
);
656 mutex_unlock(&kvm
->lock
);
660 int kvm_s390_inject_vm(struct kvm
*kvm
,
661 struct kvm_s390_interrupt
*s390int
)
663 struct kvm_s390_local_interrupt
*li
;
664 struct kvm_s390_float_interrupt
*fi
;
665 struct kvm_s390_interrupt_info
*inti
, *iter
;
668 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
672 switch (s390int
->type
) {
673 case KVM_S390_INT_VIRTIO
:
674 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
675 s390int
->parm
, s390int
->parm64
);
676 inti
->type
= s390int
->type
;
677 inti
->ext
.ext_params
= s390int
->parm
;
678 inti
->ext
.ext_params2
= s390int
->parm64
;
680 case KVM_S390_INT_SERVICE
:
681 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
682 inti
->type
= s390int
->type
;
683 inti
->ext
.ext_params
= s390int
->parm
;
685 case KVM_S390_PROGRAM_INT
:
686 case KVM_S390_SIGP_STOP
:
687 case KVM_S390_INT_EXTERNAL_CALL
:
688 case KVM_S390_INT_EMERGENCY
:
692 VM_EVENT(kvm
, 5, "inject: machine check parm64:%llx",
694 inti
->type
= s390int
->type
;
695 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
696 inti
->mchk
.mcic
= s390int
->parm64
;
698 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
699 if (s390int
->type
& IOINT_AI_MASK
)
700 VM_EVENT(kvm
, 5, "%s", "inject: I/O (AI)");
702 VM_EVENT(kvm
, 5, "inject: I/O css %x ss %x schid %04x",
703 s390int
->type
& IOINT_CSSID_MASK
,
704 s390int
->type
& IOINT_SSID_MASK
,
705 s390int
->type
& IOINT_SCHID_MASK
);
706 inti
->type
= s390int
->type
;
707 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
708 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
709 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
710 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
716 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
719 mutex_lock(&kvm
->lock
);
720 fi
= &kvm
->arch
.float_int
;
721 spin_lock(&fi
->lock
);
722 if (!is_ioint(inti
->type
))
723 list_add_tail(&inti
->list
, &fi
->list
);
725 u64 isc_bits
= int_word_to_isc_bits(inti
->io
.io_int_word
);
727 /* Keep I/O interrupts sorted in isc order. */
728 list_for_each_entry(iter
, &fi
->list
, list
) {
729 if (!is_ioint(iter
->type
))
731 if (int_word_to_isc_bits(iter
->io
.io_int_word
)
736 list_add_tail(&inti
->list
, &iter
->list
);
738 atomic_set(&fi
->active
, 1);
739 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
740 if (sigcpu
== KVM_MAX_VCPUS
) {
742 sigcpu
= fi
->next_rr_cpu
++;
743 if (sigcpu
== KVM_MAX_VCPUS
)
744 sigcpu
= fi
->next_rr_cpu
= 0;
745 } while (fi
->local_int
[sigcpu
] == NULL
);
747 li
= fi
->local_int
[sigcpu
];
748 spin_lock_bh(&li
->lock
);
749 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
750 if (waitqueue_active(li
->wq
))
751 wake_up_interruptible(li
->wq
);
752 spin_unlock_bh(&li
->lock
);
753 spin_unlock(&fi
->lock
);
754 mutex_unlock(&kvm
->lock
);
758 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
759 struct kvm_s390_interrupt
*s390int
)
761 struct kvm_s390_local_interrupt
*li
;
762 struct kvm_s390_interrupt_info
*inti
;
764 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
768 switch (s390int
->type
) {
769 case KVM_S390_PROGRAM_INT
:
770 if (s390int
->parm
& 0xffff0000) {
774 inti
->type
= s390int
->type
;
775 inti
->pgm
.code
= s390int
->parm
;
776 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
779 case KVM_S390_SIGP_SET_PREFIX
:
780 inti
->prefix
.address
= s390int
->parm
;
781 inti
->type
= s390int
->type
;
782 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
785 case KVM_S390_SIGP_STOP
:
786 case KVM_S390_RESTART
:
787 VCPU_EVENT(vcpu
, 3, "inject: type %x", s390int
->type
);
788 inti
->type
= s390int
->type
;
790 case KVM_S390_INT_EXTERNAL_CALL
:
791 if (s390int
->parm
& 0xffff0000) {
795 VCPU_EVENT(vcpu
, 3, "inject: external call source-cpu:%u",
797 inti
->type
= s390int
->type
;
798 inti
->extcall
.code
= s390int
->parm
;
800 case KVM_S390_INT_EMERGENCY
:
801 if (s390int
->parm
& 0xffff0000) {
805 VCPU_EVENT(vcpu
, 3, "inject: emergency %u\n", s390int
->parm
);
806 inti
->type
= s390int
->type
;
807 inti
->emerg
.code
= s390int
->parm
;
810 VCPU_EVENT(vcpu
, 5, "inject: machine check parm64:%llx",
812 inti
->type
= s390int
->type
;
813 inti
->mchk
.mcic
= s390int
->parm64
;
815 case KVM_S390_INT_VIRTIO
:
816 case KVM_S390_INT_SERVICE
:
817 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
822 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, s390int
->type
, s390int
->parm
,
825 mutex_lock(&vcpu
->kvm
->lock
);
826 li
= &vcpu
->arch
.local_int
;
827 spin_lock_bh(&li
->lock
);
828 if (inti
->type
== KVM_S390_PROGRAM_INT
)
829 list_add(&inti
->list
, &li
->list
);
831 list_add_tail(&inti
->list
, &li
->list
);
832 atomic_set(&li
->active
, 1);
833 if (inti
->type
== KVM_S390_SIGP_STOP
)
834 li
->action_bits
|= ACTION_STOP_ON_STOP
;
835 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
836 if (waitqueue_active(&vcpu
->wq
))
837 wake_up_interruptible(&vcpu
->wq
);
838 spin_unlock_bh(&li
->lock
);
839 mutex_unlock(&vcpu
->kvm
->lock
);