2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008, 2015
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <linux/vmalloc.h>
21 #include <asm/asm-offsets.h>
23 #include <asm/uaccess.h>
27 #include <asm/switch_to.h>
31 #include "trace-s390.h"
33 #define PFAULT_INIT 0x0600
34 #define PFAULT_DONE 0x0680
35 #define VIRTIO_PARAM 0x0d00
37 /* handle external calls via sigp interpretation facility */
38 static int sca_ext_call_pending(struct kvm_vcpu
*vcpu
, int *src_id
)
42 if (!(atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_ECALL_PEND
))
45 BUG_ON(!kvm_s390_use_sca_entries());
46 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
47 if (vcpu
->kvm
->arch
.use_esca
) {
48 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
49 union esca_sigp_ctrl sigp_ctrl
=
50 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
55 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
56 union bsca_sigp_ctrl sigp_ctrl
=
57 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
62 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
70 static int sca_inject_ext_call(struct kvm_vcpu
*vcpu
, int src_id
)
74 BUG_ON(!kvm_s390_use_sca_entries());
75 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
76 if (vcpu
->kvm
->arch
.use_esca
) {
77 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
78 union esca_sigp_ctrl
*sigp_ctrl
=
79 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
80 union esca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
86 expect
= old_val
.value
;
87 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
89 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
90 union bsca_sigp_ctrl
*sigp_ctrl
=
91 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
92 union bsca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
98 expect
= old_val
.value
;
99 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
101 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
104 /* another external call is pending */
107 atomic_or(CPUSTAT_ECALL_PEND
, &vcpu
->arch
.sie_block
->cpuflags
);
111 static void sca_clear_ext_call(struct kvm_vcpu
*vcpu
)
113 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
116 if (!kvm_s390_use_sca_entries())
118 atomic_andnot(CPUSTAT_ECALL_PEND
, li
->cpuflags
);
119 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
120 if (vcpu
->kvm
->arch
.use_esca
) {
121 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
122 union esca_sigp_ctrl
*sigp_ctrl
=
123 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
124 union esca_sigp_ctrl old
= *sigp_ctrl
;
127 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
129 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
130 union bsca_sigp_ctrl
*sigp_ctrl
=
131 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
132 union bsca_sigp_ctrl old
= *sigp_ctrl
;
135 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
137 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
138 WARN_ON(rc
!= expect
); /* cannot clear? */
141 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
143 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
146 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
148 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
151 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
153 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
156 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
158 return psw_extint_disabled(vcpu
) &&
159 psw_ioint_disabled(vcpu
) &&
160 psw_mchk_disabled(vcpu
);
163 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
165 if (psw_extint_disabled(vcpu
) ||
166 !(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
168 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
169 /* No timer interrupts when single stepping */
174 static int ckc_irq_pending(struct kvm_vcpu
*vcpu
)
176 if (vcpu
->arch
.sie_block
->ckc
>= kvm_s390_get_tod_clock_fast(vcpu
->kvm
))
178 return ckc_interrupts_enabled(vcpu
);
181 static int cpu_timer_interrupts_enabled(struct kvm_vcpu
*vcpu
)
183 return !psw_extint_disabled(vcpu
) &&
184 (vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
);
187 static int cpu_timer_irq_pending(struct kvm_vcpu
*vcpu
)
189 if (!cpu_timer_interrupts_enabled(vcpu
))
191 return kvm_s390_get_cpu_timer(vcpu
) >> 63;
194 static inline int is_ioirq(unsigned long irq_type
)
196 return ((irq_type
>= IRQ_PEND_IO_ISC_0
) &&
197 (irq_type
<= IRQ_PEND_IO_ISC_7
));
200 static uint64_t isc_to_isc_bits(int isc
)
202 return (0x80 >> isc
) << 24;
205 static inline u8
int_word_to_isc(u32 int_word
)
207 return (int_word
& 0x38000000) >> 27;
210 static inline unsigned long pending_irqs(struct kvm_vcpu
*vcpu
)
212 return vcpu
->kvm
->arch
.float_int
.pending_irqs
|
213 vcpu
->arch
.local_int
.pending_irqs
;
216 static unsigned long disable_iscs(struct kvm_vcpu
*vcpu
,
217 unsigned long active_mask
)
221 for (i
= 0; i
<= MAX_ISC
; i
++)
222 if (!(vcpu
->arch
.sie_block
->gcr
[6] & isc_to_isc_bits(i
)))
223 active_mask
&= ~(1UL << (IRQ_PEND_IO_ISC_0
+ i
));
228 static unsigned long deliverable_irqs(struct kvm_vcpu
*vcpu
)
230 unsigned long active_mask
;
232 active_mask
= pending_irqs(vcpu
);
236 if (psw_extint_disabled(vcpu
))
237 active_mask
&= ~IRQ_PEND_EXT_MASK
;
238 if (psw_ioint_disabled(vcpu
))
239 active_mask
&= ~IRQ_PEND_IO_MASK
;
241 active_mask
= disable_iscs(vcpu
, active_mask
);
242 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
243 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
244 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
))
245 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
246 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
247 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
248 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
))
249 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
250 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
251 __clear_bit(IRQ_PEND_EXT_SERVICE
, &active_mask
);
252 if (psw_mchk_disabled(vcpu
))
253 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
254 if (!(vcpu
->arch
.sie_block
->gcr
[14] &
255 vcpu
->kvm
->arch
.float_int
.mchk
.cr14
))
256 __clear_bit(IRQ_PEND_MCHK_REP
, &active_mask
);
259 * STOP irqs will never be actively delivered. They are triggered via
260 * intercept requests and cleared when the stop intercept is performed.
262 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
267 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
269 atomic_or(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
270 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
273 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
275 atomic_andnot(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
276 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
279 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
281 atomic_andnot(CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
282 &vcpu
->arch
.sie_block
->cpuflags
);
283 vcpu
->arch
.sie_block
->lctl
= 0x0000;
284 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
286 if (guestdbg_enabled(vcpu
)) {
287 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
288 LCTL_CR10
| LCTL_CR11
);
289 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
293 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
295 atomic_or(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
298 static void set_intercept_indicators_io(struct kvm_vcpu
*vcpu
)
300 if (!(pending_irqs(vcpu
) & IRQ_PEND_IO_MASK
))
302 else if (psw_ioint_disabled(vcpu
))
303 __set_cpuflag(vcpu
, CPUSTAT_IO_INT
);
305 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
308 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
310 if (!(pending_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
312 if (psw_extint_disabled(vcpu
))
313 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
315 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
318 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
320 if (!(pending_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
322 if (psw_mchk_disabled(vcpu
))
323 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
325 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
328 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
330 if (kvm_s390_is_stop_irq_pending(vcpu
))
331 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
334 /* Set interception request for non-deliverable interrupts */
335 static void set_intercept_indicators(struct kvm_vcpu
*vcpu
)
337 set_intercept_indicators_io(vcpu
);
338 set_intercept_indicators_ext(vcpu
);
339 set_intercept_indicators_mchk(vcpu
);
340 set_intercept_indicators_stop(vcpu
);
343 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
345 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
348 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
351 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
352 (u16
*)__LC_EXT_INT_CODE
);
353 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
354 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
355 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
356 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
357 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
358 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
359 return rc
? -EFAULT
: 0;
362 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
364 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
367 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
370 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
371 (u16 __user
*)__LC_EXT_INT_CODE
);
372 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
373 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
374 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
375 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
376 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
377 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
378 return rc
? -EFAULT
: 0;
381 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
383 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
384 struct kvm_s390_ext_info ext
;
387 spin_lock(&li
->lock
);
389 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
390 li
->irq
.ext
.ext_params2
= 0;
391 spin_unlock(&li
->lock
);
393 VCPU_EVENT(vcpu
, 4, "deliver: pfault init token 0x%llx",
395 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
396 KVM_S390_INT_PFAULT_INIT
,
399 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
400 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
401 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
402 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
403 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
404 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
405 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
406 return rc
? -EFAULT
: 0;
409 static int __write_machine_check(struct kvm_vcpu
*vcpu
,
410 struct kvm_s390_mchk_info
*mchk
)
412 unsigned long ext_sa_addr
;
413 freg_t fprs
[NUM_FPRS
];
417 mci
.val
= mchk
->mcic
;
418 /* take care of lazy register loading via vcpu load/put */
420 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
422 /* Extended save area */
423 rc
= read_guest_lc(vcpu
, __LC_VX_SAVE_AREA_ADDR
, &ext_sa_addr
,
424 sizeof(unsigned long));
425 /* Only bits 0-53 are used for address formation */
426 ext_sa_addr
&= ~0x3ffUL
;
427 if (!rc
&& mci
.vr
&& ext_sa_addr
&& test_kvm_facility(vcpu
->kvm
, 129)) {
428 if (write_guest_abs(vcpu
, ext_sa_addr
, vcpu
->run
->s
.regs
.vrs
,
435 /* General interruption information */
436 rc
|= put_guest_lc(vcpu
, 1, (u8 __user
*) __LC_AR_MODE_ID
);
437 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
438 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
439 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
440 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
441 rc
|= put_guest_lc(vcpu
, mci
.val
, (u64 __user
*) __LC_MCCK_CODE
);
443 /* Register-save areas */
444 if (MACHINE_HAS_VX
) {
445 convert_vx_to_fp(fprs
, (__vector128
*) vcpu
->run
->s
.regs
.vrs
);
446 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
, fprs
, 128);
448 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
,
449 vcpu
->run
->s
.regs
.fprs
, 128);
451 rc
|= write_guest_lc(vcpu
, __LC_GPREGS_SAVE_AREA
,
452 vcpu
->run
->s
.regs
.gprs
, 128);
453 rc
|= put_guest_lc(vcpu
, current
->thread
.fpu
.fpc
,
454 (u32 __user
*) __LC_FP_CREG_SAVE_AREA
);
455 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->todpr
,
456 (u32 __user
*) __LC_TOD_PROGREG_SAVE_AREA
);
457 rc
|= put_guest_lc(vcpu
, kvm_s390_get_cpu_timer(vcpu
),
458 (u64 __user
*) __LC_CPU_TIMER_SAVE_AREA
);
459 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->ckc
>> 8,
460 (u64 __user
*) __LC_CLOCK_COMP_SAVE_AREA
);
461 rc
|= write_guest_lc(vcpu
, __LC_AREGS_SAVE_AREA
,
462 &vcpu
->run
->s
.regs
.acrs
, 64);
463 rc
|= write_guest_lc(vcpu
, __LC_CREGS_SAVE_AREA
,
464 &vcpu
->arch
.sie_block
->gcr
, 128);
466 /* Extended interruption information */
467 rc
|= put_guest_lc(vcpu
, mchk
->ext_damage_code
,
468 (u32 __user
*) __LC_EXT_DAMAGE_CODE
);
469 rc
|= put_guest_lc(vcpu
, mchk
->failing_storage_address
,
470 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
471 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
, &mchk
->fixed_logout
,
472 sizeof(mchk
->fixed_logout
));
473 return rc
? -EFAULT
: 0;
476 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
478 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
479 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
480 struct kvm_s390_mchk_info mchk
= {};
484 spin_lock(&fi
->lock
);
485 spin_lock(&li
->lock
);
486 if (test_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
) ||
487 test_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
)) {
489 * If there was an exigent machine check pending, then any
490 * repressible machine checks that might have been pending
491 * are indicated along with it, so always clear bits for
492 * repressible and exigent interrupts
495 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
496 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
497 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
501 * We indicate floating repressible conditions along with
502 * other pending conditions. Channel Report Pending and Channel
503 * Subsystem damage are the only two and and are indicated by
504 * bits in mcic and masked in cr14.
506 if (test_and_clear_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
507 mchk
.mcic
|= fi
->mchk
.mcic
;
508 mchk
.cr14
|= fi
->mchk
.cr14
;
509 memset(&fi
->mchk
, 0, sizeof(mchk
));
512 spin_unlock(&li
->lock
);
513 spin_unlock(&fi
->lock
);
516 VCPU_EVENT(vcpu
, 3, "deliver: machine check mcic 0x%llx",
518 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
520 mchk
.cr14
, mchk
.mcic
);
521 rc
= __write_machine_check(vcpu
, &mchk
);
526 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
528 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
531 VCPU_EVENT(vcpu
, 3, "%s", "deliver: cpu restart");
532 vcpu
->stat
.deliver_restart_signal
++;
533 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
535 rc
= write_guest_lc(vcpu
,
536 offsetof(struct lowcore
, restart_old_psw
),
537 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
538 rc
|= read_guest_lc(vcpu
, offsetof(struct lowcore
, restart_psw
),
539 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
540 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
541 return rc
? -EFAULT
: 0;
544 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
546 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
547 struct kvm_s390_prefix_info prefix
;
549 spin_lock(&li
->lock
);
550 prefix
= li
->irq
.prefix
;
551 li
->irq
.prefix
.address
= 0;
552 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
553 spin_unlock(&li
->lock
);
555 vcpu
->stat
.deliver_prefix_signal
++;
556 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
557 KVM_S390_SIGP_SET_PREFIX
,
560 kvm_s390_set_prefix(vcpu
, prefix
.address
);
564 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
566 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
570 spin_lock(&li
->lock
);
571 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
572 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
573 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
574 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
575 spin_unlock(&li
->lock
);
577 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp emerg");
578 vcpu
->stat
.deliver_emergency_signal
++;
579 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
582 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
583 (u16
*)__LC_EXT_INT_CODE
);
584 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
585 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
586 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
587 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
588 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
589 return rc
? -EFAULT
: 0;
592 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
594 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
595 struct kvm_s390_extcall_info extcall
;
598 spin_lock(&li
->lock
);
599 extcall
= li
->irq
.extcall
;
600 li
->irq
.extcall
.code
= 0;
601 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
602 spin_unlock(&li
->lock
);
604 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp ext call");
605 vcpu
->stat
.deliver_external_call
++;
606 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
607 KVM_S390_INT_EXTERNAL_CALL
,
610 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
611 (u16
*)__LC_EXT_INT_CODE
);
612 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
613 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
614 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
615 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
617 return rc
? -EFAULT
: 0;
620 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
622 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
623 struct kvm_s390_pgm_info pgm_info
;
624 int rc
= 0, nullifying
= false;
627 spin_lock(&li
->lock
);
628 pgm_info
= li
->irq
.pgm
;
629 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
630 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
631 spin_unlock(&li
->lock
);
633 ilen
= pgm_info
.flags
& KVM_S390_PGM_FLAGS_ILC_MASK
;
634 VCPU_EVENT(vcpu
, 3, "deliver: program irq code 0x%x, ilen:%d",
635 pgm_info
.code
, ilen
);
636 vcpu
->stat
.deliver_program_int
++;
637 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
640 switch (pgm_info
.code
& ~PGM_PER
) {
641 case PGM_AFX_TRANSLATION
:
642 case PGM_ASX_TRANSLATION
:
643 case PGM_EX_TRANSLATION
:
644 case PGM_LFX_TRANSLATION
:
645 case PGM_LSTE_SEQUENCE
:
646 case PGM_LSX_TRANSLATION
:
647 case PGM_LX_TRANSLATION
:
648 case PGM_PRIMARY_AUTHORITY
:
649 case PGM_SECONDARY_AUTHORITY
:
652 case PGM_SPACE_SWITCH
:
653 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
654 (u64
*)__LC_TRANS_EXC_CODE
);
656 case PGM_ALEN_TRANSLATION
:
657 case PGM_ALE_SEQUENCE
:
658 case PGM_ASTE_INSTANCE
:
659 case PGM_ASTE_SEQUENCE
:
660 case PGM_ASTE_VALIDITY
:
661 case PGM_EXTENDED_AUTHORITY
:
662 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
663 (u8
*)__LC_EXC_ACCESS_ID
);
667 case PGM_PAGE_TRANSLATION
:
668 case PGM_REGION_FIRST_TRANS
:
669 case PGM_REGION_SECOND_TRANS
:
670 case PGM_REGION_THIRD_TRANS
:
671 case PGM_SEGMENT_TRANSLATION
:
672 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
673 (u64
*)__LC_TRANS_EXC_CODE
);
674 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
675 (u8
*)__LC_EXC_ACCESS_ID
);
676 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
677 (u8
*)__LC_OP_ACCESS_ID
);
681 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
682 (u16
*)__LC_MON_CLASS_NR
);
683 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
684 (u64
*)__LC_MON_CODE
);
686 case PGM_VECTOR_PROCESSING
:
688 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
689 (u32
*)__LC_DATA_EXC_CODE
);
692 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
693 (u64
*)__LC_TRANS_EXC_CODE
);
694 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
695 (u8
*)__LC_EXC_ACCESS_ID
);
698 case PGM_STACK_EMPTY
:
699 case PGM_STACK_SPECIFICATION
:
701 case PGM_STACK_OPERATION
:
702 case PGM_TRACE_TABEL
:
703 case PGM_CRYPTO_OPERATION
:
708 if (pgm_info
.code
& PGM_PER
) {
709 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
710 (u8
*) __LC_PER_CODE
);
711 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
712 (u8
*)__LC_PER_ATMID
);
713 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
714 (u64
*) __LC_PER_ADDRESS
);
715 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
716 (u8
*) __LC_PER_ACCESS_ID
);
719 if (nullifying
&& !(pgm_info
.flags
& KVM_S390_PGM_FLAGS_NO_REWIND
))
720 kvm_s390_rewind_psw(vcpu
, ilen
);
722 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
723 rc
|= put_guest_lc(vcpu
, ilen
, (u16
*) __LC_PGM_ILC
);
724 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->gbea
,
725 (u64
*) __LC_LAST_BREAK
);
726 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
727 (u16
*)__LC_PGM_INT_CODE
);
728 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
729 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
730 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
731 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
732 return rc
? -EFAULT
: 0;
735 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
)
737 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
738 struct kvm_s390_ext_info ext
;
741 spin_lock(&fi
->lock
);
742 if (!(test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
))) {
743 spin_unlock(&fi
->lock
);
746 ext
= fi
->srv_signal
;
747 memset(&fi
->srv_signal
, 0, sizeof(ext
));
748 clear_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
749 spin_unlock(&fi
->lock
);
751 VCPU_EVENT(vcpu
, 4, "deliver: sclp parameter 0x%x",
753 vcpu
->stat
.deliver_service_signal
++;
754 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
757 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
758 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
759 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
760 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
761 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
762 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
763 rc
|= put_guest_lc(vcpu
, ext
.ext_params
,
764 (u32
*)__LC_EXT_PARAMS
);
766 return rc
? -EFAULT
: 0;
769 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
)
771 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
772 struct kvm_s390_interrupt_info
*inti
;
775 spin_lock(&fi
->lock
);
776 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_PFAULT
],
777 struct kvm_s390_interrupt_info
,
780 list_del(&inti
->list
);
781 fi
->counters
[FIRQ_CNTR_PFAULT
] -= 1;
783 if (list_empty(&fi
->lists
[FIRQ_LIST_PFAULT
]))
784 clear_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
785 spin_unlock(&fi
->lock
);
788 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
789 KVM_S390_INT_PFAULT_DONE
, 0,
790 inti
->ext
.ext_params2
);
791 VCPU_EVENT(vcpu
, 4, "deliver: pfault done token 0x%llx",
792 inti
->ext
.ext_params2
);
794 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
795 (u16
*)__LC_EXT_INT_CODE
);
796 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
,
797 (u16
*)__LC_EXT_CPU_ADDR
);
798 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
799 &vcpu
->arch
.sie_block
->gpsw
,
801 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
802 &vcpu
->arch
.sie_block
->gpsw
,
804 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
805 (u64
*)__LC_EXT_PARAMS2
);
808 return rc
? -EFAULT
: 0;
811 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
)
813 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
814 struct kvm_s390_interrupt_info
*inti
;
817 spin_lock(&fi
->lock
);
818 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_VIRTIO
],
819 struct kvm_s390_interrupt_info
,
823 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
824 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
825 vcpu
->stat
.deliver_virtio_interrupt
++;
826 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
828 inti
->ext
.ext_params
,
829 inti
->ext
.ext_params2
);
830 list_del(&inti
->list
);
831 fi
->counters
[FIRQ_CNTR_VIRTIO
] -= 1;
833 if (list_empty(&fi
->lists
[FIRQ_LIST_VIRTIO
]))
834 clear_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
835 spin_unlock(&fi
->lock
);
838 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
839 (u16
*)__LC_EXT_INT_CODE
);
840 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
,
841 (u16
*)__LC_EXT_CPU_ADDR
);
842 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
843 &vcpu
->arch
.sie_block
->gpsw
,
845 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
846 &vcpu
->arch
.sie_block
->gpsw
,
848 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
849 (u32
*)__LC_EXT_PARAMS
);
850 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
851 (u64
*)__LC_EXT_PARAMS2
);
854 return rc
? -EFAULT
: 0;
857 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
858 unsigned long irq_type
)
860 struct list_head
*isc_list
;
861 struct kvm_s390_float_interrupt
*fi
;
862 struct kvm_s390_interrupt_info
*inti
= NULL
;
865 fi
= &vcpu
->kvm
->arch
.float_int
;
867 spin_lock(&fi
->lock
);
868 isc_list
= &fi
->lists
[irq_type
- IRQ_PEND_IO_ISC_0
];
869 inti
= list_first_entry_or_null(isc_list
,
870 struct kvm_s390_interrupt_info
,
873 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
874 VCPU_EVENT(vcpu
, 4, "%s", "deliver: I/O (AI)");
876 VCPU_EVENT(vcpu
, 4, "deliver: I/O %x ss %x schid %04x",
877 inti
->io
.subchannel_id
>> 8,
878 inti
->io
.subchannel_id
>> 1 & 0x3,
879 inti
->io
.subchannel_nr
);
881 vcpu
->stat
.deliver_io_int
++;
882 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
884 ((__u32
)inti
->io
.subchannel_id
<< 16) |
885 inti
->io
.subchannel_nr
,
886 ((__u64
)inti
->io
.io_int_parm
<< 32) |
887 inti
->io
.io_int_word
);
888 list_del(&inti
->list
);
889 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
891 if (list_empty(isc_list
))
892 clear_bit(irq_type
, &fi
->pending_irqs
);
893 spin_unlock(&fi
->lock
);
896 rc
= put_guest_lc(vcpu
, inti
->io
.subchannel_id
,
897 (u16
*)__LC_SUBCHANNEL_ID
);
898 rc
|= put_guest_lc(vcpu
, inti
->io
.subchannel_nr
,
899 (u16
*)__LC_SUBCHANNEL_NR
);
900 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_parm
,
901 (u32
*)__LC_IO_INT_PARM
);
902 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_word
,
903 (u32
*)__LC_IO_INT_WORD
);
904 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
905 &vcpu
->arch
.sie_block
->gpsw
,
907 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
908 &vcpu
->arch
.sie_block
->gpsw
,
913 return rc
? -EFAULT
: 0;
916 typedef int (*deliver_irq_t
)(struct kvm_vcpu
*vcpu
);
918 static const deliver_irq_t deliver_irq_funcs
[] = {
919 [IRQ_PEND_MCHK_EX
] = __deliver_machine_check
,
920 [IRQ_PEND_MCHK_REP
] = __deliver_machine_check
,
921 [IRQ_PEND_PROG
] = __deliver_prog
,
922 [IRQ_PEND_EXT_EMERGENCY
] = __deliver_emergency_signal
,
923 [IRQ_PEND_EXT_EXTERNAL
] = __deliver_external_call
,
924 [IRQ_PEND_EXT_CLOCK_COMP
] = __deliver_ckc
,
925 [IRQ_PEND_EXT_CPU_TIMER
] = __deliver_cpu_timer
,
926 [IRQ_PEND_RESTART
] = __deliver_restart
,
927 [IRQ_PEND_SET_PREFIX
] = __deliver_set_prefix
,
928 [IRQ_PEND_PFAULT_INIT
] = __deliver_pfault_init
,
929 [IRQ_PEND_EXT_SERVICE
] = __deliver_service
,
930 [IRQ_PEND_PFAULT_DONE
] = __deliver_pfault_done
,
931 [IRQ_PEND_VIRTIO
] = __deliver_virtio
,
934 /* Check whether an external call is pending (deliverable or not) */
935 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
937 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
939 if (!sclp
.has_sigpif
)
940 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
942 return sca_ext_call_pending(vcpu
, NULL
);
945 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
947 if (deliverable_irqs(vcpu
))
950 if (kvm_cpu_has_pending_timer(vcpu
))
953 /* external call pending and deliverable */
954 if (kvm_s390_ext_call_pending(vcpu
) &&
955 !psw_extint_disabled(vcpu
) &&
956 (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
959 if (!exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
964 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
966 return ckc_irq_pending(vcpu
) || cpu_timer_irq_pending(vcpu
);
969 static u64
__calculate_sltime(struct kvm_vcpu
*vcpu
)
971 u64 now
, cputm
, sltime
= 0;
973 if (ckc_interrupts_enabled(vcpu
)) {
974 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
975 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
976 /* already expired or overflow? */
977 if (!sltime
|| vcpu
->arch
.sie_block
->ckc
<= now
)
979 if (cpu_timer_interrupts_enabled(vcpu
)) {
980 cputm
= kvm_s390_get_cpu_timer(vcpu
);
981 /* already expired? */
984 return min(sltime
, tod_to_ns(cputm
));
986 } else if (cpu_timer_interrupts_enabled(vcpu
)) {
987 sltime
= kvm_s390_get_cpu_timer(vcpu
);
988 /* already expired? */
995 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
999 vcpu
->stat
.exit_wait_state
++;
1002 if (kvm_arch_vcpu_runnable(vcpu
))
1005 if (psw_interrupts_disabled(vcpu
)) {
1006 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
1007 return -EOPNOTSUPP
; /* disabled wait */
1010 if (!ckc_interrupts_enabled(vcpu
) &&
1011 !cpu_timer_interrupts_enabled(vcpu
)) {
1012 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
1013 __set_cpu_idle(vcpu
);
1017 sltime
= __calculate_sltime(vcpu
);
1021 __set_cpu_idle(vcpu
);
1022 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
1023 VCPU_EVENT(vcpu
, 4, "enabled wait: %llu ns", sltime
);
1025 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1026 kvm_vcpu_block(vcpu
);
1027 __unset_cpu_idle(vcpu
);
1028 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1030 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
1034 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
1037 * We cannot move this into the if, as the CPU might be already
1038 * in kvm_vcpu_block without having the waitqueue set (polling)
1040 vcpu
->valid_wakeup
= true;
1041 if (swait_active(&vcpu
->wq
)) {
1043 * The vcpu gave up the cpu voluntarily, mark it as a good
1046 vcpu
->preempted
= true;
1047 swake_up(&vcpu
->wq
);
1048 vcpu
->stat
.halt_wakeup
++;
1051 * The VCPU might not be sleeping but is executing the VSIE. Let's
1052 * kick it, so it leaves the SIE to process the request.
1054 kvm_s390_vsie_kick(vcpu
);
1057 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
1059 struct kvm_vcpu
*vcpu
;
1062 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
1063 sltime
= __calculate_sltime(vcpu
);
1066 * If the monotonic clock runs faster than the tod clock we might be
1067 * woken up too early and have to go back to sleep to avoid deadlocks.
1069 if (sltime
&& hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
1070 return HRTIMER_RESTART
;
1071 kvm_s390_vcpu_wakeup(vcpu
);
1072 return HRTIMER_NORESTART
;
1075 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
1077 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1079 spin_lock(&li
->lock
);
1080 li
->pending_irqs
= 0;
1081 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
1082 memset(&li
->irq
, 0, sizeof(li
->irq
));
1083 spin_unlock(&li
->lock
);
1085 sca_clear_ext_call(vcpu
);
1088 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
1090 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1093 unsigned long irq_type
;
1096 __reset_intercept_indicators(vcpu
);
1098 /* pending ckc conditions might have been invalidated */
1099 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1100 if (ckc_irq_pending(vcpu
))
1101 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1103 /* pending cpu timer conditions might have been invalidated */
1104 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1105 if (cpu_timer_irq_pending(vcpu
))
1106 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1108 while ((irqs
= deliverable_irqs(vcpu
)) && !rc
) {
1109 /* bits are in the order of interrupt priority */
1110 irq_type
= find_first_bit(&irqs
, IRQ_PEND_COUNT
);
1111 if (is_ioirq(irq_type
)) {
1112 rc
= __deliver_io(vcpu
, irq_type
);
1114 func
= deliver_irq_funcs
[irq_type
];
1116 WARN_ON_ONCE(func
== NULL
);
1117 clear_bit(irq_type
, &li
->pending_irqs
);
1124 set_intercept_indicators(vcpu
);
1129 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1131 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1133 VCPU_EVENT(vcpu
, 3, "inject: program irq code 0x%x", irq
->u
.pgm
.code
);
1134 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
1135 irq
->u
.pgm
.code
, 0);
1137 if (!(irq
->u
.pgm
.flags
& KVM_S390_PGM_FLAGS_ILC_VALID
)) {
1138 /* auto detection if no valid ILC was given */
1139 irq
->u
.pgm
.flags
&= ~KVM_S390_PGM_FLAGS_ILC_MASK
;
1140 irq
->u
.pgm
.flags
|= kvm_s390_get_ilen(vcpu
);
1141 irq
->u
.pgm
.flags
|= KVM_S390_PGM_FLAGS_ILC_VALID
;
1144 if (irq
->u
.pgm
.code
== PGM_PER
) {
1145 li
->irq
.pgm
.code
|= PGM_PER
;
1146 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1147 /* only modify PER related information */
1148 li
->irq
.pgm
.per_address
= irq
->u
.pgm
.per_address
;
1149 li
->irq
.pgm
.per_code
= irq
->u
.pgm
.per_code
;
1150 li
->irq
.pgm
.per_atmid
= irq
->u
.pgm
.per_atmid
;
1151 li
->irq
.pgm
.per_access_id
= irq
->u
.pgm
.per_access_id
;
1152 } else if (!(irq
->u
.pgm
.code
& PGM_PER
)) {
1153 li
->irq
.pgm
.code
= (li
->irq
.pgm
.code
& PGM_PER
) |
1155 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1156 /* only modify non-PER information */
1157 li
->irq
.pgm
.trans_exc_code
= irq
->u
.pgm
.trans_exc_code
;
1158 li
->irq
.pgm
.mon_code
= irq
->u
.pgm
.mon_code
;
1159 li
->irq
.pgm
.data_exc_code
= irq
->u
.pgm
.data_exc_code
;
1160 li
->irq
.pgm
.mon_class_nr
= irq
->u
.pgm
.mon_class_nr
;
1161 li
->irq
.pgm
.exc_access_id
= irq
->u
.pgm
.exc_access_id
;
1162 li
->irq
.pgm
.op_access_id
= irq
->u
.pgm
.op_access_id
;
1164 li
->irq
.pgm
= irq
->u
.pgm
;
1166 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
1170 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1172 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1174 VCPU_EVENT(vcpu
, 4, "inject: pfault init parameter block at 0x%llx",
1175 irq
->u
.ext
.ext_params2
);
1176 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
1177 irq
->u
.ext
.ext_params
,
1178 irq
->u
.ext
.ext_params2
);
1180 li
->irq
.ext
= irq
->u
.ext
;
1181 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1182 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1186 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1188 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1189 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1190 uint16_t src_id
= irq
->u
.extcall
.code
;
1192 VCPU_EVENT(vcpu
, 4, "inject: external call source-cpu:%u",
1194 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1197 /* sending vcpu invalid */
1198 if (kvm_get_vcpu_by_id(vcpu
->kvm
, src_id
) == NULL
)
1201 if (sclp
.has_sigpif
)
1202 return sca_inject_ext_call(vcpu
, src_id
);
1204 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1206 *extcall
= irq
->u
.extcall
;
1207 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1211 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1213 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1214 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1216 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x",
1217 irq
->u
.prefix
.address
);
1218 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1219 irq
->u
.prefix
.address
, 0);
1221 if (!is_vcpu_stopped(vcpu
))
1224 *prefix
= irq
->u
.prefix
;
1225 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1229 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1230 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1232 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1233 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1236 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0);
1238 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1241 if (is_vcpu_stopped(vcpu
)) {
1242 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1243 rc
= kvm_s390_store_status_unloaded(vcpu
,
1244 KVM_S390_STORE_STATUS_NOADDR
);
1248 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1250 stop
->flags
= irq
->u
.stop
.flags
;
1251 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
1255 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1256 struct kvm_s390_irq
*irq
)
1258 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1260 VCPU_EVENT(vcpu
, 3, "%s", "inject: restart int");
1261 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
1263 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1267 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1268 struct kvm_s390_irq
*irq
)
1270 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1272 VCPU_EVENT(vcpu
, 4, "inject: emergency from cpu %u",
1274 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1275 irq
->u
.emerg
.code
, 0);
1277 /* sending vcpu invalid */
1278 if (kvm_get_vcpu_by_id(vcpu
->kvm
, irq
->u
.emerg
.code
) == NULL
)
1281 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1282 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1283 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1287 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1289 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1290 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1292 VCPU_EVENT(vcpu
, 3, "inject: machine check mcic 0x%llx",
1294 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1298 * Because repressible machine checks can be indicated along with
1299 * exigent machine checks (PoP, Chapter 11, Interruption action)
1300 * we need to combine cr14, mcic and external damage code.
1301 * Failing storage address and the logout area should not be or'ed
1302 * together, we just indicate the last occurrence of the corresponding
1305 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1306 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1307 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1308 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1309 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1310 sizeof(mchk
->fixed_logout
));
1311 if (mchk
->mcic
& MCHK_EX_MASK
)
1312 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1313 else if (mchk
->mcic
& MCHK_REP_MASK
)
1314 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1318 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1320 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1322 VCPU_EVENT(vcpu
, 3, "%s", "inject: clock comparator external");
1323 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1326 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1327 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1331 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1333 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1335 VCPU_EVENT(vcpu
, 3, "%s", "inject: cpu timer external");
1336 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1339 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1340 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1344 static struct kvm_s390_interrupt_info
*get_io_int(struct kvm
*kvm
,
1347 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1348 struct list_head
*isc_list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1349 struct kvm_s390_interrupt_info
*iter
;
1350 u16 id
= (schid
& 0xffff0000U
) >> 16;
1351 u16 nr
= schid
& 0x0000ffffU
;
1353 spin_lock(&fi
->lock
);
1354 list_for_each_entry(iter
, isc_list
, list
) {
1355 if (schid
&& (id
!= iter
->io
.subchannel_id
||
1356 nr
!= iter
->io
.subchannel_nr
))
1358 /* found an appropriate entry */
1359 list_del_init(&iter
->list
);
1360 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1361 if (list_empty(isc_list
))
1362 clear_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1363 spin_unlock(&fi
->lock
);
1366 spin_unlock(&fi
->lock
);
1371 * Dequeue and return an I/O interrupt matching any of the interruption
1372 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1374 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1375 u64 isc_mask
, u32 schid
)
1377 struct kvm_s390_interrupt_info
*inti
= NULL
;
1380 for (isc
= 0; isc
<= MAX_ISC
&& !inti
; isc
++) {
1381 if (isc_mask
& isc_to_isc_bits(isc
))
1382 inti
= get_io_int(kvm
, isc
, schid
);
1387 #define SCCB_MASK 0xFFFFFFF8
1388 #define SCCB_EVENT_PENDING 0x3
1390 static int __inject_service(struct kvm
*kvm
,
1391 struct kvm_s390_interrupt_info
*inti
)
1393 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1395 spin_lock(&fi
->lock
);
1396 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_EVENT_PENDING
;
1398 * Early versions of the QEMU s390 bios will inject several
1399 * service interrupts after another without handling a
1400 * condition code indicating busy.
1401 * We will silently ignore those superfluous sccb values.
1402 * A future version of QEMU will take care of serialization
1405 if (fi
->srv_signal
.ext_params
& SCCB_MASK
)
1407 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_MASK
;
1408 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1410 spin_unlock(&fi
->lock
);
1415 static int __inject_virtio(struct kvm
*kvm
,
1416 struct kvm_s390_interrupt_info
*inti
)
1418 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1420 spin_lock(&fi
->lock
);
1421 if (fi
->counters
[FIRQ_CNTR_VIRTIO
] >= KVM_S390_MAX_VIRTIO_IRQS
) {
1422 spin_unlock(&fi
->lock
);
1425 fi
->counters
[FIRQ_CNTR_VIRTIO
] += 1;
1426 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_VIRTIO
]);
1427 set_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1428 spin_unlock(&fi
->lock
);
1432 static int __inject_pfault_done(struct kvm
*kvm
,
1433 struct kvm_s390_interrupt_info
*inti
)
1435 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1437 spin_lock(&fi
->lock
);
1438 if (fi
->counters
[FIRQ_CNTR_PFAULT
] >=
1439 (ASYNC_PF_PER_VCPU
* KVM_MAX_VCPUS
)) {
1440 spin_unlock(&fi
->lock
);
1443 fi
->counters
[FIRQ_CNTR_PFAULT
] += 1;
1444 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_PFAULT
]);
1445 set_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1446 spin_unlock(&fi
->lock
);
1450 #define CR_PENDING_SUBCLASS 28
1451 static int __inject_float_mchk(struct kvm
*kvm
,
1452 struct kvm_s390_interrupt_info
*inti
)
1454 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1456 spin_lock(&fi
->lock
);
1457 fi
->mchk
.cr14
|= inti
->mchk
.cr14
& (1UL << CR_PENDING_SUBCLASS
);
1458 fi
->mchk
.mcic
|= inti
->mchk
.mcic
;
1459 set_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
);
1460 spin_unlock(&fi
->lock
);
1465 static int __inject_io(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1467 struct kvm_s390_float_interrupt
*fi
;
1468 struct list_head
*list
;
1471 fi
= &kvm
->arch
.float_int
;
1472 spin_lock(&fi
->lock
);
1473 if (fi
->counters
[FIRQ_CNTR_IO
] >= KVM_S390_MAX_FLOAT_IRQS
) {
1474 spin_unlock(&fi
->lock
);
1477 fi
->counters
[FIRQ_CNTR_IO
] += 1;
1479 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
1480 VM_EVENT(kvm
, 4, "%s", "inject: I/O (AI)");
1482 VM_EVENT(kvm
, 4, "inject: I/O %x ss %x schid %04x",
1483 inti
->io
.subchannel_id
>> 8,
1484 inti
->io
.subchannel_id
>> 1 & 0x3,
1485 inti
->io
.subchannel_nr
);
1486 isc
= int_word_to_isc(inti
->io
.io_int_word
);
1487 list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1488 list_add_tail(&inti
->list
, list
);
1489 set_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1490 spin_unlock(&fi
->lock
);
1495 * Find a destination VCPU for a floating irq and kick it.
1497 static void __floating_irq_kick(struct kvm
*kvm
, u64 type
)
1499 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1500 struct kvm_s390_local_interrupt
*li
;
1501 struct kvm_vcpu
*dst_vcpu
;
1502 int sigcpu
, online_vcpus
, nr_tries
= 0;
1504 online_vcpus
= atomic_read(&kvm
->online_vcpus
);
1508 /* find idle VCPUs first, then round robin */
1509 sigcpu
= find_first_bit(fi
->idle_mask
, online_vcpus
);
1510 if (sigcpu
== online_vcpus
) {
1512 sigcpu
= fi
->next_rr_cpu
;
1513 fi
->next_rr_cpu
= (fi
->next_rr_cpu
+ 1) % online_vcpus
;
1514 /* avoid endless loops if all vcpus are stopped */
1515 if (nr_tries
++ >= online_vcpus
)
1517 } while (is_vcpu_stopped(kvm_get_vcpu(kvm
, sigcpu
)));
1519 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1521 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1522 li
= &dst_vcpu
->arch
.local_int
;
1523 spin_lock(&li
->lock
);
1526 atomic_or(CPUSTAT_STOP_INT
, li
->cpuflags
);
1528 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1529 atomic_or(CPUSTAT_IO_INT
, li
->cpuflags
);
1532 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1535 spin_unlock(&li
->lock
);
1536 kvm_s390_vcpu_wakeup(dst_vcpu
);
1539 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1541 u64 type
= READ_ONCE(inti
->type
);
1546 rc
= __inject_float_mchk(kvm
, inti
);
1548 case KVM_S390_INT_VIRTIO
:
1549 rc
= __inject_virtio(kvm
, inti
);
1551 case KVM_S390_INT_SERVICE
:
1552 rc
= __inject_service(kvm
, inti
);
1554 case KVM_S390_INT_PFAULT_DONE
:
1555 rc
= __inject_pfault_done(kvm
, inti
);
1557 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1558 rc
= __inject_io(kvm
, inti
);
1566 __floating_irq_kick(kvm
, type
);
1570 int kvm_s390_inject_vm(struct kvm
*kvm
,
1571 struct kvm_s390_interrupt
*s390int
)
1573 struct kvm_s390_interrupt_info
*inti
;
1576 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1580 inti
->type
= s390int
->type
;
1581 switch (inti
->type
) {
1582 case KVM_S390_INT_VIRTIO
:
1583 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1584 s390int
->parm
, s390int
->parm64
);
1585 inti
->ext
.ext_params
= s390int
->parm
;
1586 inti
->ext
.ext_params2
= s390int
->parm64
;
1588 case KVM_S390_INT_SERVICE
:
1589 VM_EVENT(kvm
, 4, "inject: sclp parm:%x", s390int
->parm
);
1590 inti
->ext
.ext_params
= s390int
->parm
;
1592 case KVM_S390_INT_PFAULT_DONE
:
1593 inti
->ext
.ext_params2
= s390int
->parm64
;
1596 VM_EVENT(kvm
, 3, "inject: machine check mcic 0x%llx",
1598 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1599 inti
->mchk
.mcic
= s390int
->parm64
;
1601 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1602 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1603 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1604 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1605 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1611 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1614 rc
= __inject_vm(kvm
, inti
);
1620 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
1621 struct kvm_s390_interrupt_info
*inti
)
1623 return __inject_vm(kvm
, inti
);
1626 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1627 struct kvm_s390_irq
*irq
)
1629 irq
->type
= s390int
->type
;
1630 switch (irq
->type
) {
1631 case KVM_S390_PROGRAM_INT
:
1632 if (s390int
->parm
& 0xffff0000)
1634 irq
->u
.pgm
.code
= s390int
->parm
;
1636 case KVM_S390_SIGP_SET_PREFIX
:
1637 irq
->u
.prefix
.address
= s390int
->parm
;
1639 case KVM_S390_SIGP_STOP
:
1640 irq
->u
.stop
.flags
= s390int
->parm
;
1642 case KVM_S390_INT_EXTERNAL_CALL
:
1643 if (s390int
->parm
& 0xffff0000)
1645 irq
->u
.extcall
.code
= s390int
->parm
;
1647 case KVM_S390_INT_EMERGENCY
:
1648 if (s390int
->parm
& 0xffff0000)
1650 irq
->u
.emerg
.code
= s390int
->parm
;
1653 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1659 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
1661 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1663 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1666 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
1668 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1670 spin_lock(&li
->lock
);
1671 li
->irq
.stop
.flags
= 0;
1672 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1673 spin_unlock(&li
->lock
);
1676 static int do_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1680 switch (irq
->type
) {
1681 case KVM_S390_PROGRAM_INT
:
1682 rc
= __inject_prog(vcpu
, irq
);
1684 case KVM_S390_SIGP_SET_PREFIX
:
1685 rc
= __inject_set_prefix(vcpu
, irq
);
1687 case KVM_S390_SIGP_STOP
:
1688 rc
= __inject_sigp_stop(vcpu
, irq
);
1690 case KVM_S390_RESTART
:
1691 rc
= __inject_sigp_restart(vcpu
, irq
);
1693 case KVM_S390_INT_CLOCK_COMP
:
1694 rc
= __inject_ckc(vcpu
);
1696 case KVM_S390_INT_CPU_TIMER
:
1697 rc
= __inject_cpu_timer(vcpu
);
1699 case KVM_S390_INT_EXTERNAL_CALL
:
1700 rc
= __inject_extcall(vcpu
, irq
);
1702 case KVM_S390_INT_EMERGENCY
:
1703 rc
= __inject_sigp_emergency(vcpu
, irq
);
1706 rc
= __inject_mchk(vcpu
, irq
);
1708 case KVM_S390_INT_PFAULT_INIT
:
1709 rc
= __inject_pfault_init(vcpu
, irq
);
1711 case KVM_S390_INT_VIRTIO
:
1712 case KVM_S390_INT_SERVICE
:
1713 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1721 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1723 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1726 spin_lock(&li
->lock
);
1727 rc
= do_inject_vcpu(vcpu
, irq
);
1728 spin_unlock(&li
->lock
);
1730 kvm_s390_vcpu_wakeup(vcpu
);
1734 static inline void clear_irq_list(struct list_head
*_list
)
1736 struct kvm_s390_interrupt_info
*inti
, *n
;
1738 list_for_each_entry_safe(inti
, n
, _list
, list
) {
1739 list_del(&inti
->list
);
1744 static void inti_to_irq(struct kvm_s390_interrupt_info
*inti
,
1745 struct kvm_s390_irq
*irq
)
1747 irq
->type
= inti
->type
;
1748 switch (inti
->type
) {
1749 case KVM_S390_INT_PFAULT_INIT
:
1750 case KVM_S390_INT_PFAULT_DONE
:
1751 case KVM_S390_INT_VIRTIO
:
1752 irq
->u
.ext
= inti
->ext
;
1754 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1755 irq
->u
.io
= inti
->io
;
1760 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1762 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1765 spin_lock(&fi
->lock
);
1766 fi
->pending_irqs
= 0;
1767 memset(&fi
->srv_signal
, 0, sizeof(fi
->srv_signal
));
1768 memset(&fi
->mchk
, 0, sizeof(fi
->mchk
));
1769 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1770 clear_irq_list(&fi
->lists
[i
]);
1771 for (i
= 0; i
< FIRQ_MAX_COUNT
; i
++)
1772 fi
->counters
[i
] = 0;
1773 spin_unlock(&fi
->lock
);
1776 static int get_all_floating_irqs(struct kvm
*kvm
, u8 __user
*usrbuf
, u64 len
)
1778 struct kvm_s390_interrupt_info
*inti
;
1779 struct kvm_s390_float_interrupt
*fi
;
1780 struct kvm_s390_irq
*buf
;
1781 struct kvm_s390_irq
*irq
;
1787 if (len
> KVM_S390_FLIC_MAX_BUFFER
|| len
== 0)
1791 * We are already using -ENOMEM to signal
1792 * userspace it may retry with a bigger buffer,
1793 * so we need to use something else for this case
1799 max_irqs
= len
/ sizeof(struct kvm_s390_irq
);
1801 fi
= &kvm
->arch
.float_int
;
1802 spin_lock(&fi
->lock
);
1803 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++) {
1804 list_for_each_entry(inti
, &fi
->lists
[i
], list
) {
1805 if (n
== max_irqs
) {
1806 /* signal userspace to try again */
1810 inti_to_irq(inti
, &buf
[n
]);
1814 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
)) {
1815 if (n
== max_irqs
) {
1816 /* signal userspace to try again */
1820 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1821 irq
->type
= KVM_S390_INT_SERVICE
;
1822 irq
->u
.ext
= fi
->srv_signal
;
1825 if (test_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
1826 if (n
== max_irqs
) {
1827 /* signal userspace to try again */
1831 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1832 irq
->type
= KVM_S390_MCHK
;
1833 irq
->u
.mchk
= fi
->mchk
;
1838 spin_unlock(&fi
->lock
);
1839 if (!ret
&& n
> 0) {
1840 if (copy_to_user(usrbuf
, buf
, sizeof(struct kvm_s390_irq
) * n
))
1845 return ret
< 0 ? ret
: n
;
1848 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1852 switch (attr
->group
) {
1853 case KVM_DEV_FLIC_GET_ALL_IRQS
:
1854 r
= get_all_floating_irqs(dev
->kvm
, (u8 __user
*) attr
->addr
,
1864 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
1867 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
1868 void *target
= NULL
;
1869 void __user
*source
;
1872 if (get_user(inti
->type
, (u64 __user
*)addr
))
1875 switch (inti
->type
) {
1876 case KVM_S390_INT_PFAULT_INIT
:
1877 case KVM_S390_INT_PFAULT_DONE
:
1878 case KVM_S390_INT_VIRTIO
:
1879 case KVM_S390_INT_SERVICE
:
1880 target
= (void *) &inti
->ext
;
1881 source
= &uptr
->u
.ext
;
1882 size
= sizeof(inti
->ext
);
1884 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1885 target
= (void *) &inti
->io
;
1886 source
= &uptr
->u
.io
;
1887 size
= sizeof(inti
->io
);
1890 target
= (void *) &inti
->mchk
;
1891 source
= &uptr
->u
.mchk
;
1892 size
= sizeof(inti
->mchk
);
1898 if (copy_from_user(target
, source
, size
))
1904 static int enqueue_floating_irq(struct kvm_device
*dev
,
1905 struct kvm_device_attr
*attr
)
1907 struct kvm_s390_interrupt_info
*inti
= NULL
;
1909 int len
= attr
->attr
;
1911 if (len
% sizeof(struct kvm_s390_irq
) != 0)
1913 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
1916 while (len
>= sizeof(struct kvm_s390_irq
)) {
1917 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1921 r
= copy_irq_from_user(inti
, attr
->addr
);
1926 r
= __inject_vm(dev
->kvm
, inti
);
1931 len
-= sizeof(struct kvm_s390_irq
);
1932 attr
->addr
+= sizeof(struct kvm_s390_irq
);
1938 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
1940 if (id
>= MAX_S390_IO_ADAPTERS
)
1942 return kvm
->arch
.adapters
[id
];
1945 static int register_io_adapter(struct kvm_device
*dev
,
1946 struct kvm_device_attr
*attr
)
1948 struct s390_io_adapter
*adapter
;
1949 struct kvm_s390_io_adapter adapter_info
;
1951 if (copy_from_user(&adapter_info
,
1952 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
1955 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
1956 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
1959 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
1963 INIT_LIST_HEAD(&adapter
->maps
);
1964 init_rwsem(&adapter
->maps_lock
);
1965 atomic_set(&adapter
->nr_maps
, 0);
1966 adapter
->id
= adapter_info
.id
;
1967 adapter
->isc
= adapter_info
.isc
;
1968 adapter
->maskable
= adapter_info
.maskable
;
1969 adapter
->masked
= false;
1970 adapter
->swap
= adapter_info
.swap
;
1971 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
1976 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
1979 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1981 if (!adapter
|| !adapter
->maskable
)
1983 ret
= adapter
->masked
;
1984 adapter
->masked
= masked
;
1988 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1990 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1991 struct s390_map_info
*map
;
1994 if (!adapter
|| !addr
)
1997 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
2002 INIT_LIST_HEAD(&map
->list
);
2003 map
->guest_addr
= addr
;
2004 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
2005 if (map
->addr
== -EFAULT
) {
2009 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
2013 down_write(&adapter
->maps_lock
);
2014 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
2015 list_add_tail(&map
->list
, &adapter
->maps
);
2018 put_page(map
->page
);
2021 up_write(&adapter
->maps_lock
);
2028 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
2030 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2031 struct s390_map_info
*map
, *tmp
;
2034 if (!adapter
|| !addr
)
2037 down_write(&adapter
->maps_lock
);
2038 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
2039 if (map
->guest_addr
== addr
) {
2041 atomic_dec(&adapter
->nr_maps
);
2042 list_del(&map
->list
);
2043 put_page(map
->page
);
2048 up_write(&adapter
->maps_lock
);
2050 return found
? 0 : -EINVAL
;
2053 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
2056 struct s390_map_info
*map
, *tmp
;
2058 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
2059 if (!kvm
->arch
.adapters
[i
])
2061 list_for_each_entry_safe(map
, tmp
,
2062 &kvm
->arch
.adapters
[i
]->maps
, list
) {
2063 list_del(&map
->list
);
2064 put_page(map
->page
);
2067 kfree(kvm
->arch
.adapters
[i
]);
2071 static int modify_io_adapter(struct kvm_device
*dev
,
2072 struct kvm_device_attr
*attr
)
2074 struct kvm_s390_io_adapter_req req
;
2075 struct s390_io_adapter
*adapter
;
2078 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2081 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
2085 case KVM_S390_IO_ADAPTER_MASK
:
2086 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
2090 case KVM_S390_IO_ADAPTER_MAP
:
2091 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
2093 case KVM_S390_IO_ADAPTER_UNMAP
:
2094 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
2103 static int clear_io_irq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2106 const u64 isc_mask
= 0xffUL
<< 24; /* all iscs set */
2111 if (attr
->attr
!= sizeof(schid
))
2113 if (copy_from_user(&schid
, (void __user
*) attr
->addr
, sizeof(schid
)))
2115 kfree(kvm_s390_get_io_int(kvm
, isc_mask
, schid
));
2117 * If userspace is conforming to the architecture, we can have at most
2118 * one pending I/O interrupt per subchannel, so this is effectively a
2124 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2128 struct kvm_vcpu
*vcpu
;
2130 switch (attr
->group
) {
2131 case KVM_DEV_FLIC_ENQUEUE
:
2132 r
= enqueue_floating_irq(dev
, attr
);
2134 case KVM_DEV_FLIC_CLEAR_IRQS
:
2135 kvm_s390_clear_float_irqs(dev
->kvm
);
2137 case KVM_DEV_FLIC_APF_ENABLE
:
2138 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
2140 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2141 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
2143 * Make sure no async faults are in transition when
2144 * clearing the queues. So we don't need to worry
2145 * about late coming workers.
2147 synchronize_srcu(&dev
->kvm
->srcu
);
2148 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
2149 kvm_clear_async_pf_completion_queue(vcpu
);
2151 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2152 r
= register_io_adapter(dev
, attr
);
2154 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2155 r
= modify_io_adapter(dev
, attr
);
2157 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2158 r
= clear_io_irq(dev
->kvm
, attr
);
2167 static int flic_has_attr(struct kvm_device
*dev
,
2168 struct kvm_device_attr
*attr
)
2170 switch (attr
->group
) {
2171 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2172 case KVM_DEV_FLIC_ENQUEUE
:
2173 case KVM_DEV_FLIC_CLEAR_IRQS
:
2174 case KVM_DEV_FLIC_APF_ENABLE
:
2175 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2176 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2177 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2178 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2184 static int flic_create(struct kvm_device
*dev
, u32 type
)
2188 if (dev
->kvm
->arch
.flic
)
2190 dev
->kvm
->arch
.flic
= dev
;
2194 static void flic_destroy(struct kvm_device
*dev
)
2196 dev
->kvm
->arch
.flic
= NULL
;
2200 /* s390 floating irq controller (flic) */
2201 struct kvm_device_ops kvm_flic_ops
= {
2203 .get_attr
= flic_get_attr
,
2204 .set_attr
= flic_set_attr
,
2205 .has_attr
= flic_has_attr
,
2206 .create
= flic_create
,
2207 .destroy
= flic_destroy
,
2210 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
2214 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
2216 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
2219 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
2222 struct s390_map_info
*map
;
2227 list_for_each_entry(map
, &adapter
->maps
, list
) {
2228 if (map
->guest_addr
== addr
)
2234 static int adapter_indicators_set(struct kvm
*kvm
,
2235 struct s390_io_adapter
*adapter
,
2236 struct kvm_s390_adapter_int
*adapter_int
)
2239 int summary_set
, idx
;
2240 struct s390_map_info
*info
;
2243 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
2246 map
= page_address(info
->page
);
2247 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
2249 idx
= srcu_read_lock(&kvm
->srcu
);
2250 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2251 set_page_dirty_lock(info
->page
);
2252 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
2254 srcu_read_unlock(&kvm
->srcu
, idx
);
2257 map
= page_address(info
->page
);
2258 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
2260 summary_set
= test_and_set_bit(bit
, map
);
2261 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2262 set_page_dirty_lock(info
->page
);
2263 srcu_read_unlock(&kvm
->srcu
, idx
);
2264 return summary_set
? 0 : 1;
2268 * < 0 - not injected due to error
2269 * = 0 - coalesced, summary indicator already active
2270 * > 0 - injected interrupt
2272 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
2273 struct kvm
*kvm
, int irq_source_id
, int level
,
2277 struct s390_io_adapter
*adapter
;
2279 /* We're only interested in the 0->1 transition. */
2282 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
2285 down_read(&adapter
->maps_lock
);
2286 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
2287 up_read(&adapter
->maps_lock
);
2288 if ((ret
> 0) && !adapter
->masked
) {
2289 struct kvm_s390_interrupt s390int
= {
2290 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
2292 .parm64
= (adapter
->isc
<< 27) | 0x80000000,
2294 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
2301 int kvm_set_routing_entry(struct kvm
*kvm
,
2302 struct kvm_kernel_irq_routing_entry
*e
,
2303 const struct kvm_irq_routing_entry
*ue
)
2308 case KVM_IRQ_ROUTING_S390_ADAPTER
:
2309 e
->set
= set_adapter_int
;
2310 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
2311 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
2312 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
2313 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
2314 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
2324 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
2325 int irq_source_id
, int level
, bool line_status
)
2330 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
, void __user
*irqstate
, int len
)
2332 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2333 struct kvm_s390_irq
*buf
;
2341 if (copy_from_user((void *) buf
, irqstate
, len
)) {
2347 * Don't allow setting the interrupt state
2348 * when there are already interrupts pending
2350 spin_lock(&li
->lock
);
2351 if (li
->pending_irqs
) {
2356 for (n
= 0; n
< len
/ sizeof(*buf
); n
++) {
2357 r
= do_inject_vcpu(vcpu
, &buf
[n
]);
2363 spin_unlock(&li
->lock
);
2370 static void store_local_irq(struct kvm_s390_local_interrupt
*li
,
2371 struct kvm_s390_irq
*irq
,
2372 unsigned long irq_type
)
2375 case IRQ_PEND_MCHK_EX
:
2376 case IRQ_PEND_MCHK_REP
:
2377 irq
->type
= KVM_S390_MCHK
;
2378 irq
->u
.mchk
= li
->irq
.mchk
;
2381 irq
->type
= KVM_S390_PROGRAM_INT
;
2382 irq
->u
.pgm
= li
->irq
.pgm
;
2384 case IRQ_PEND_PFAULT_INIT
:
2385 irq
->type
= KVM_S390_INT_PFAULT_INIT
;
2386 irq
->u
.ext
= li
->irq
.ext
;
2388 case IRQ_PEND_EXT_EXTERNAL
:
2389 irq
->type
= KVM_S390_INT_EXTERNAL_CALL
;
2390 irq
->u
.extcall
= li
->irq
.extcall
;
2392 case IRQ_PEND_EXT_CLOCK_COMP
:
2393 irq
->type
= KVM_S390_INT_CLOCK_COMP
;
2395 case IRQ_PEND_EXT_CPU_TIMER
:
2396 irq
->type
= KVM_S390_INT_CPU_TIMER
;
2398 case IRQ_PEND_SIGP_STOP
:
2399 irq
->type
= KVM_S390_SIGP_STOP
;
2400 irq
->u
.stop
= li
->irq
.stop
;
2402 case IRQ_PEND_RESTART
:
2403 irq
->type
= KVM_S390_RESTART
;
2405 case IRQ_PEND_SET_PREFIX
:
2406 irq
->type
= KVM_S390_SIGP_SET_PREFIX
;
2407 irq
->u
.prefix
= li
->irq
.prefix
;
2412 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
, __u8 __user
*buf
, int len
)
2415 unsigned long sigp_emerg_pending
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
2416 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2417 unsigned long pending_irqs
;
2418 struct kvm_s390_irq irq
;
2419 unsigned long irq_type
;
2423 spin_lock(&li
->lock
);
2424 pending_irqs
= li
->pending_irqs
;
2425 memcpy(&sigp_emerg_pending
, &li
->sigp_emerg_pending
,
2426 sizeof(sigp_emerg_pending
));
2427 spin_unlock(&li
->lock
);
2429 for_each_set_bit(irq_type
, &pending_irqs
, IRQ_PEND_COUNT
) {
2430 memset(&irq
, 0, sizeof(irq
));
2431 if (irq_type
== IRQ_PEND_EXT_EMERGENCY
)
2433 if (n
+ sizeof(irq
) > len
)
2435 store_local_irq(&vcpu
->arch
.local_int
, &irq
, irq_type
);
2436 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2441 if (test_bit(IRQ_PEND_EXT_EMERGENCY
, &pending_irqs
)) {
2442 for_each_set_bit(cpuaddr
, sigp_emerg_pending
, KVM_MAX_VCPUS
) {
2443 memset(&irq
, 0, sizeof(irq
));
2444 if (n
+ sizeof(irq
) > len
)
2446 irq
.type
= KVM_S390_INT_EMERGENCY
;
2447 irq
.u
.emerg
.code
= cpuaddr
;
2448 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2454 if (sca_ext_call_pending(vcpu
, &scn
)) {
2455 if (n
+ sizeof(irq
) > len
)
2457 memset(&irq
, 0, sizeof(irq
));
2458 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
2459 irq
.u
.extcall
.code
= scn
;
2460 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))