2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
22 #include "trace-s390.h"
24 #define IOINT_SCHID_MASK 0x0000ffff
25 #define IOINT_SSID_MASK 0x00030000
26 #define IOINT_CSSID_MASK 0x03fc0000
27 #define IOINT_AI_MASK 0x04000000
29 static int is_ioint(u64 type
)
31 return ((type
& 0xfffe0000u
) != 0xfffe0000u
);
34 static int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
36 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
39 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
41 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
44 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
46 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
49 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
51 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
52 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
53 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
58 static u64
int_word_to_isc_bits(u32 int_word
)
60 u8 isc
= (int_word
& 0x38000000) >> 27;
62 return (0x80 >> isc
) << 24;
65 static int __interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
66 struct kvm_s390_interrupt_info
*inti
)
69 case KVM_S390_INT_EXTERNAL_CALL
:
70 if (psw_extint_disabled(vcpu
))
72 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
)
74 case KVM_S390_INT_EMERGENCY
:
75 if (psw_extint_disabled(vcpu
))
77 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
80 case KVM_S390_INT_SERVICE
:
81 if (psw_extint_disabled(vcpu
))
83 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
86 case KVM_S390_INT_VIRTIO
:
87 if (psw_extint_disabled(vcpu
))
89 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
92 case KVM_S390_PROGRAM_INT
:
93 case KVM_S390_SIGP_STOP
:
94 case KVM_S390_SIGP_SET_PREFIX
:
95 case KVM_S390_RESTART
:
98 if (psw_mchk_disabled(vcpu
))
100 if (vcpu
->arch
.sie_block
->gcr
[14] & inti
->mchk
.cr14
)
103 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
104 if (psw_ioint_disabled(vcpu
))
106 if (vcpu
->arch
.sie_block
->gcr
[6] &
107 int_word_to_isc_bits(inti
->io
.io_int_word
))
111 printk(KERN_WARNING
"illegal interrupt type %llx\n",
118 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
120 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
121 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
122 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
125 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
127 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
128 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
129 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
132 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
134 atomic_clear_mask(CPUSTAT_ECALL_PEND
|
135 CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
136 &vcpu
->arch
.sie_block
->cpuflags
);
137 vcpu
->arch
.sie_block
->lctl
= 0x0000;
138 vcpu
->arch
.sie_block
->ictl
&= ~ICTL_LPSW
;
141 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
143 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
146 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
147 struct kvm_s390_interrupt_info
*inti
)
149 switch (inti
->type
) {
150 case KVM_S390_INT_EXTERNAL_CALL
:
151 case KVM_S390_INT_EMERGENCY
:
152 case KVM_S390_INT_SERVICE
:
153 case KVM_S390_INT_VIRTIO
:
154 if (psw_extint_disabled(vcpu
))
155 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
157 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
159 case KVM_S390_SIGP_STOP
:
160 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
163 if (psw_mchk_disabled(vcpu
))
164 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
166 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
168 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
169 if (psw_ioint_disabled(vcpu
))
170 __set_cpuflag(vcpu
, CPUSTAT_IO_INT
);
172 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
179 static void __do_deliver_interrupt(struct kvm_vcpu
*vcpu
,
180 struct kvm_s390_interrupt_info
*inti
)
182 const unsigned short table
[] = { 2, 4, 4, 6 };
185 switch (inti
->type
) {
186 case KVM_S390_INT_EMERGENCY
:
187 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
188 vcpu
->stat
.deliver_emergency_signal
++;
189 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
190 inti
->emerg
.code
, 0);
191 rc
= put_guest(vcpu
, 0x1201, (u16 __user
*)__LC_EXT_INT_CODE
);
192 rc
|= put_guest(vcpu
, inti
->emerg
.code
,
193 (u16 __user
*)__LC_EXT_CPU_ADDR
);
194 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
195 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
196 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
197 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
199 case KVM_S390_INT_EXTERNAL_CALL
:
200 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp ext call");
201 vcpu
->stat
.deliver_external_call
++;
202 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
203 inti
->extcall
.code
, 0);
204 rc
= put_guest(vcpu
, 0x1202, (u16 __user
*)__LC_EXT_INT_CODE
);
205 rc
|= put_guest(vcpu
, inti
->extcall
.code
,
206 (u16 __user
*)__LC_EXT_CPU_ADDR
);
207 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
208 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
209 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
210 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
212 case KVM_S390_INT_SERVICE
:
213 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
214 inti
->ext
.ext_params
);
215 vcpu
->stat
.deliver_service_signal
++;
216 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
217 inti
->ext
.ext_params
, 0);
218 rc
= put_guest(vcpu
, 0x2401, (u16 __user
*)__LC_EXT_INT_CODE
);
219 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
220 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
221 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
222 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
223 rc
|= put_guest(vcpu
, inti
->ext
.ext_params
,
224 (u32 __user
*)__LC_EXT_PARAMS
);
226 case KVM_S390_INT_VIRTIO
:
227 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%llx",
228 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
229 vcpu
->stat
.deliver_virtio_interrupt
++;
230 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
231 inti
->ext
.ext_params
,
232 inti
->ext
.ext_params2
);
233 rc
= put_guest(vcpu
, 0x2603, (u16 __user
*)__LC_EXT_INT_CODE
);
234 rc
|= put_guest(vcpu
, 0x0d00, (u16 __user
*)__LC_EXT_CPU_ADDR
);
235 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
236 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
237 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
238 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
239 rc
|= put_guest(vcpu
, inti
->ext
.ext_params
,
240 (u32 __user
*)__LC_EXT_PARAMS
);
241 rc
|= put_guest(vcpu
, inti
->ext
.ext_params2
,
242 (u64 __user
*)__LC_EXT_PARAMS2
);
244 case KVM_S390_SIGP_STOP
:
245 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
246 vcpu
->stat
.deliver_stop_signal
++;
247 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
249 __set_intercept_indicator(vcpu
, inti
);
252 case KVM_S390_SIGP_SET_PREFIX
:
253 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x",
254 inti
->prefix
.address
);
255 vcpu
->stat
.deliver_prefix_signal
++;
256 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
257 inti
->prefix
.address
, 0);
258 kvm_s390_set_prefix(vcpu
, inti
->prefix
.address
);
261 case KVM_S390_RESTART
:
262 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
263 vcpu
->stat
.deliver_restart_signal
++;
264 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
266 rc
= copy_to_guest(vcpu
,
267 offsetof(struct _lowcore
, restart_old_psw
),
268 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
269 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
270 offsetof(struct _lowcore
, restart_psw
),
272 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
274 case KVM_S390_PROGRAM_INT
:
275 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
277 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
278 vcpu
->stat
.deliver_program_int
++;
279 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
281 rc
= put_guest(vcpu
, inti
->pgm
.code
, (u16 __user
*)__LC_PGM_INT_CODE
);
282 rc
|= put_guest(vcpu
, table
[vcpu
->arch
.sie_block
->ipa
>> 14],
283 (u16 __user
*)__LC_PGM_ILC
);
284 rc
|= copy_to_guest(vcpu
, __LC_PGM_OLD_PSW
,
285 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
286 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
287 __LC_PGM_NEW_PSW
, sizeof(psw_t
));
291 VCPU_EVENT(vcpu
, 4, "interrupt: machine check mcic=%llx",
293 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
296 rc
= kvm_s390_vcpu_store_status(vcpu
,
297 KVM_S390_STORE_STATUS_PREFIXED
);
298 rc
|= put_guest(vcpu
, inti
->mchk
.mcic
, (u64 __user
*) __LC_MCCK_CODE
);
299 rc
|= copy_to_guest(vcpu
, __LC_MCK_OLD_PSW
,
300 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
301 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
302 __LC_MCK_NEW_PSW
, sizeof(psw_t
));
305 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
307 __u32 param0
= ((__u32
)inti
->io
.subchannel_id
<< 16) |
308 inti
->io
.subchannel_nr
;
309 __u64 param1
= ((__u64
)inti
->io
.io_int_parm
<< 32) |
310 inti
->io
.io_int_word
;
311 VCPU_EVENT(vcpu
, 4, "interrupt: I/O %llx", inti
->type
);
312 vcpu
->stat
.deliver_io_int
++;
313 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
315 rc
= put_guest(vcpu
, inti
->io
.subchannel_id
,
316 (u16 __user
*) __LC_SUBCHANNEL_ID
);
317 rc
|= put_guest(vcpu
, inti
->io
.subchannel_nr
,
318 (u16 __user
*) __LC_SUBCHANNEL_NR
);
319 rc
|= put_guest(vcpu
, inti
->io
.io_int_parm
,
320 (u32 __user
*) __LC_IO_INT_PARM
);
321 rc
|= put_guest(vcpu
, inti
->io
.io_int_word
,
322 (u32 __user
*) __LC_IO_INT_WORD
);
323 rc
|= copy_to_guest(vcpu
, __LC_IO_OLD_PSW
,
324 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
325 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
326 __LC_IO_NEW_PSW
, sizeof(psw_t
));
333 printk("kvm: The guest lowcore is not mapped during interrupt "
334 "delivery, killing userspace\n");
339 static int __try_deliver_ckc_interrupt(struct kvm_vcpu
*vcpu
)
343 if (psw_extint_disabled(vcpu
))
345 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
347 rc
= put_guest(vcpu
, 0x1004, (u16 __user
*)__LC_EXT_INT_CODE
);
348 rc
|= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
349 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
350 rc
|= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
351 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
353 printk("kvm: The guest lowcore is not mapped during interrupt "
354 "delivery, killing userspace\n");
360 static int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
362 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
363 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
364 struct kvm_s390_interrupt_info
*inti
;
367 if (atomic_read(&li
->active
)) {
368 spin_lock_bh(&li
->lock
);
369 list_for_each_entry(inti
, &li
->list
, list
)
370 if (__interrupt_is_deliverable(vcpu
, inti
)) {
374 spin_unlock_bh(&li
->lock
);
377 if ((!rc
) && atomic_read(&fi
->active
)) {
378 spin_lock(&fi
->lock
);
379 list_for_each_entry(inti
, &fi
->list
, list
)
380 if (__interrupt_is_deliverable(vcpu
, inti
)) {
384 spin_unlock(&fi
->lock
);
387 if ((!rc
) && (vcpu
->arch
.sie_block
->ckc
<
388 get_tod_clock() + vcpu
->arch
.sie_block
->epoch
)) {
389 if ((!psw_extint_disabled(vcpu
)) &&
390 (vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
397 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
402 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
405 DECLARE_WAITQUEUE(wait
, current
);
407 vcpu
->stat
.exit_wait_state
++;
408 if (kvm_cpu_has_interrupt(vcpu
))
411 __set_cpu_idle(vcpu
);
412 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
413 vcpu
->arch
.local_int
.timer_due
= 0;
414 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
416 if (psw_interrupts_disabled(vcpu
)) {
417 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
418 __unset_cpu_idle(vcpu
);
419 return -EOPNOTSUPP
; /* disabled wait */
422 if (psw_extint_disabled(vcpu
) ||
423 (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))) {
424 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
428 now
= get_tod_clock() + vcpu
->arch
.sie_block
->epoch
;
429 if (vcpu
->arch
.sie_block
->ckc
< now
) {
430 __unset_cpu_idle(vcpu
);
434 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
436 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
437 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
439 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
440 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
441 add_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
442 while (list_empty(&vcpu
->arch
.local_int
.list
) &&
443 list_empty(&vcpu
->arch
.local_int
.float_int
->list
) &&
444 (!vcpu
->arch
.local_int
.timer_due
) &&
445 !signal_pending(current
)) {
446 set_current_state(TASK_INTERRUPTIBLE
);
447 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
448 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
450 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
451 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
453 __unset_cpu_idle(vcpu
);
454 __set_current_state(TASK_RUNNING
);
455 remove_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
456 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
457 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
458 hrtimer_try_to_cancel(&vcpu
->arch
.ckc_timer
);
462 void kvm_s390_tasklet(unsigned long parm
)
464 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*) parm
;
466 spin_lock(&vcpu
->arch
.local_int
.lock
);
467 vcpu
->arch
.local_int
.timer_due
= 1;
468 if (waitqueue_active(&vcpu
->arch
.local_int
.wq
))
469 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
470 spin_unlock(&vcpu
->arch
.local_int
.lock
);
474 * low level hrtimer wake routine. Because this runs in hardirq context
475 * we schedule a tasklet to do the real work.
477 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
479 struct kvm_vcpu
*vcpu
;
481 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
482 tasklet_schedule(&vcpu
->arch
.tasklet
);
484 return HRTIMER_NORESTART
;
487 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
489 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
490 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
491 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
494 __reset_intercept_indicators(vcpu
);
495 if (atomic_read(&li
->active
)) {
498 spin_lock_bh(&li
->lock
);
499 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
500 if (__interrupt_is_deliverable(vcpu
, inti
)) {
501 list_del(&inti
->list
);
505 __set_intercept_indicator(vcpu
, inti
);
507 if (list_empty(&li
->list
))
508 atomic_set(&li
->active
, 0);
509 spin_unlock_bh(&li
->lock
);
511 __do_deliver_interrupt(vcpu
, inti
);
517 if ((vcpu
->arch
.sie_block
->ckc
<
518 get_tod_clock() + vcpu
->arch
.sie_block
->epoch
))
519 __try_deliver_ckc_interrupt(vcpu
);
521 if (atomic_read(&fi
->active
)) {
524 spin_lock(&fi
->lock
);
525 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
526 if (__interrupt_is_deliverable(vcpu
, inti
)) {
527 list_del(&inti
->list
);
531 __set_intercept_indicator(vcpu
, inti
);
533 if (list_empty(&fi
->list
))
534 atomic_set(&fi
->active
, 0);
535 spin_unlock(&fi
->lock
);
537 __do_deliver_interrupt(vcpu
, inti
);
544 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu
*vcpu
)
546 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
547 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
548 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
551 __reset_intercept_indicators(vcpu
);
552 if (atomic_read(&li
->active
)) {
555 spin_lock_bh(&li
->lock
);
556 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
557 if ((inti
->type
== KVM_S390_MCHK
) &&
558 __interrupt_is_deliverable(vcpu
, inti
)) {
559 list_del(&inti
->list
);
563 __set_intercept_indicator(vcpu
, inti
);
565 if (list_empty(&li
->list
))
566 atomic_set(&li
->active
, 0);
567 spin_unlock_bh(&li
->lock
);
569 __do_deliver_interrupt(vcpu
, inti
);
575 if (atomic_read(&fi
->active
)) {
578 spin_lock(&fi
->lock
);
579 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
580 if ((inti
->type
== KVM_S390_MCHK
) &&
581 __interrupt_is_deliverable(vcpu
, inti
)) {
582 list_del(&inti
->list
);
586 __set_intercept_indicator(vcpu
, inti
);
588 if (list_empty(&fi
->list
))
589 atomic_set(&fi
->active
, 0);
590 spin_unlock(&fi
->lock
);
592 __do_deliver_interrupt(vcpu
, inti
);
599 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
601 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
602 struct kvm_s390_interrupt_info
*inti
;
604 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
608 inti
->type
= KVM_S390_PROGRAM_INT
;
609 inti
->pgm
.code
= code
;
611 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
612 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, inti
->type
, code
, 0, 1);
613 spin_lock_bh(&li
->lock
);
614 list_add(&inti
->list
, &li
->list
);
615 atomic_set(&li
->active
, 1);
616 BUG_ON(waitqueue_active(&li
->wq
));
617 spin_unlock_bh(&li
->lock
);
621 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
624 struct kvm_s390_float_interrupt
*fi
;
625 struct kvm_s390_interrupt_info
*inti
, *iter
;
627 if ((!schid
&& !cr6
) || (schid
&& cr6
))
629 mutex_lock(&kvm
->lock
);
630 fi
= &kvm
->arch
.float_int
;
631 spin_lock(&fi
->lock
);
633 list_for_each_entry(iter
, &fi
->list
, list
) {
634 if (!is_ioint(iter
->type
))
637 ((cr6
& int_word_to_isc_bits(iter
->io
.io_int_word
)) == 0))
640 if (((schid
& 0x00000000ffff0000) >> 16) !=
641 iter
->io
.subchannel_id
)
643 if ((schid
& 0x000000000000ffff) !=
644 iter
->io
.subchannel_nr
)
651 list_del_init(&inti
->list
);
652 if (list_empty(&fi
->list
))
653 atomic_set(&fi
->active
, 0);
654 spin_unlock(&fi
->lock
);
655 mutex_unlock(&kvm
->lock
);
659 int kvm_s390_inject_vm(struct kvm
*kvm
,
660 struct kvm_s390_interrupt
*s390int
)
662 struct kvm_s390_local_interrupt
*li
;
663 struct kvm_s390_float_interrupt
*fi
;
664 struct kvm_s390_interrupt_info
*inti
, *iter
;
667 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
671 switch (s390int
->type
) {
672 case KVM_S390_INT_VIRTIO
:
673 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
674 s390int
->parm
, s390int
->parm64
);
675 inti
->type
= s390int
->type
;
676 inti
->ext
.ext_params
= s390int
->parm
;
677 inti
->ext
.ext_params2
= s390int
->parm64
;
679 case KVM_S390_INT_SERVICE
:
680 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
681 inti
->type
= s390int
->type
;
682 inti
->ext
.ext_params
= s390int
->parm
;
684 case KVM_S390_PROGRAM_INT
:
685 case KVM_S390_SIGP_STOP
:
686 case KVM_S390_INT_EXTERNAL_CALL
:
687 case KVM_S390_INT_EMERGENCY
:
691 VM_EVENT(kvm
, 5, "inject: machine check parm64:%llx",
693 inti
->type
= s390int
->type
;
694 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
695 inti
->mchk
.mcic
= s390int
->parm64
;
697 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
698 if (s390int
->type
& IOINT_AI_MASK
)
699 VM_EVENT(kvm
, 5, "%s", "inject: I/O (AI)");
701 VM_EVENT(kvm
, 5, "inject: I/O css %x ss %x schid %04x",
702 s390int
->type
& IOINT_CSSID_MASK
,
703 s390int
->type
& IOINT_SSID_MASK
,
704 s390int
->type
& IOINT_SCHID_MASK
);
705 inti
->type
= s390int
->type
;
706 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
707 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
708 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
709 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
715 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
718 mutex_lock(&kvm
->lock
);
719 fi
= &kvm
->arch
.float_int
;
720 spin_lock(&fi
->lock
);
721 if (!is_ioint(inti
->type
))
722 list_add_tail(&inti
->list
, &fi
->list
);
724 u64 isc_bits
= int_word_to_isc_bits(inti
->io
.io_int_word
);
726 /* Keep I/O interrupts sorted in isc order. */
727 list_for_each_entry(iter
, &fi
->list
, list
) {
728 if (!is_ioint(iter
->type
))
730 if (int_word_to_isc_bits(iter
->io
.io_int_word
)
735 list_add_tail(&inti
->list
, &iter
->list
);
737 atomic_set(&fi
->active
, 1);
738 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
739 if (sigcpu
== KVM_MAX_VCPUS
) {
741 sigcpu
= fi
->next_rr_cpu
++;
742 if (sigcpu
== KVM_MAX_VCPUS
)
743 sigcpu
= fi
->next_rr_cpu
= 0;
744 } while (fi
->local_int
[sigcpu
] == NULL
);
746 li
= fi
->local_int
[sigcpu
];
747 spin_lock_bh(&li
->lock
);
748 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
749 if (waitqueue_active(&li
->wq
))
750 wake_up_interruptible(&li
->wq
);
751 spin_unlock_bh(&li
->lock
);
752 spin_unlock(&fi
->lock
);
753 mutex_unlock(&kvm
->lock
);
757 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
758 struct kvm_s390_interrupt
*s390int
)
760 struct kvm_s390_local_interrupt
*li
;
761 struct kvm_s390_interrupt_info
*inti
;
763 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
767 switch (s390int
->type
) {
768 case KVM_S390_PROGRAM_INT
:
769 if (s390int
->parm
& 0xffff0000) {
773 inti
->type
= s390int
->type
;
774 inti
->pgm
.code
= s390int
->parm
;
775 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
778 case KVM_S390_SIGP_SET_PREFIX
:
779 inti
->prefix
.address
= s390int
->parm
;
780 inti
->type
= s390int
->type
;
781 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
784 case KVM_S390_SIGP_STOP
:
785 case KVM_S390_RESTART
:
786 VCPU_EVENT(vcpu
, 3, "inject: type %x", s390int
->type
);
787 inti
->type
= s390int
->type
;
789 case KVM_S390_INT_EXTERNAL_CALL
:
790 if (s390int
->parm
& 0xffff0000) {
794 VCPU_EVENT(vcpu
, 3, "inject: external call source-cpu:%u",
796 inti
->type
= s390int
->type
;
797 inti
->extcall
.code
= s390int
->parm
;
799 case KVM_S390_INT_EMERGENCY
:
800 if (s390int
->parm
& 0xffff0000) {
804 VCPU_EVENT(vcpu
, 3, "inject: emergency %u\n", s390int
->parm
);
805 inti
->type
= s390int
->type
;
806 inti
->emerg
.code
= s390int
->parm
;
809 VCPU_EVENT(vcpu
, 5, "inject: machine check parm64:%llx",
811 inti
->type
= s390int
->type
;
812 inti
->mchk
.mcic
= s390int
->parm64
;
814 case KVM_S390_INT_VIRTIO
:
815 case KVM_S390_INT_SERVICE
:
816 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
821 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, s390int
->type
, s390int
->parm
,
824 mutex_lock(&vcpu
->kvm
->lock
);
825 li
= &vcpu
->arch
.local_int
;
826 spin_lock_bh(&li
->lock
);
827 if (inti
->type
== KVM_S390_PROGRAM_INT
)
828 list_add(&inti
->list
, &li
->list
);
830 list_add_tail(&inti
->list
, &li
->list
);
831 atomic_set(&li
->active
, 1);
832 if (inti
->type
== KVM_S390_SIGP_STOP
)
833 li
->action_bits
|= ACTION_STOP_ON_STOP
;
834 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
835 if (waitqueue_active(&li
->wq
))
836 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
837 spin_unlock_bh(&li
->lock
);
838 mutex_unlock(&vcpu
->kvm
->lock
);