1 // SPDX-License-Identifier: GPL-2.0
3 * handling kvm guest interrupts
5 * Copyright IBM Corp. 2008, 2015
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
10 #include <linux/interrupt.h>
11 #include <linux/kvm_host.h>
12 #include <linux/hrtimer.h>
13 #include <linux/mmu_context.h>
14 #include <linux/signal.h>
15 #include <linux/slab.h>
16 #include <linux/bitmap.h>
17 #include <linux/vmalloc.h>
18 #include <asm/asm-offsets.h>
20 #include <linux/uaccess.h>
24 #include <asm/switch_to.h>
28 #include "trace-s390.h"
30 #define PFAULT_INIT 0x0600
31 #define PFAULT_DONE 0x0680
32 #define VIRTIO_PARAM 0x0d00
34 /* handle external calls via sigp interpretation facility */
35 static int sca_ext_call_pending(struct kvm_vcpu
*vcpu
, int *src_id
)
39 if (!kvm_s390_test_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
))
42 BUG_ON(!kvm_s390_use_sca_entries());
43 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
44 if (vcpu
->kvm
->arch
.use_esca
) {
45 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
46 union esca_sigp_ctrl sigp_ctrl
=
47 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
52 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
53 union bsca_sigp_ctrl sigp_ctrl
=
54 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
59 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
67 static int sca_inject_ext_call(struct kvm_vcpu
*vcpu
, int src_id
)
71 BUG_ON(!kvm_s390_use_sca_entries());
72 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
73 if (vcpu
->kvm
->arch
.use_esca
) {
74 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
75 union esca_sigp_ctrl
*sigp_ctrl
=
76 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
77 union esca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
83 expect
= old_val
.value
;
84 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
86 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
87 union bsca_sigp_ctrl
*sigp_ctrl
=
88 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
89 union bsca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
95 expect
= old_val
.value
;
96 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
98 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
101 /* another external call is pending */
104 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
);
108 static void sca_clear_ext_call(struct kvm_vcpu
*vcpu
)
112 if (!kvm_s390_use_sca_entries())
114 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
);
115 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
116 if (vcpu
->kvm
->arch
.use_esca
) {
117 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
118 union esca_sigp_ctrl
*sigp_ctrl
=
119 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
120 union esca_sigp_ctrl old
= *sigp_ctrl
;
123 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
125 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
126 union bsca_sigp_ctrl
*sigp_ctrl
=
127 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
128 union bsca_sigp_ctrl old
= *sigp_ctrl
;
131 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
133 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
134 WARN_ON(rc
!= expect
); /* cannot clear? */
137 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
139 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
142 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
144 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
147 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
149 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
152 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
154 return psw_extint_disabled(vcpu
) &&
155 psw_ioint_disabled(vcpu
) &&
156 psw_mchk_disabled(vcpu
);
159 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
161 if (psw_extint_disabled(vcpu
) ||
162 !(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
164 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
165 /* No timer interrupts when single stepping */
170 static int ckc_irq_pending(struct kvm_vcpu
*vcpu
)
172 const u64 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
173 const u64 ckc
= vcpu
->arch
.sie_block
->ckc
;
175 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x0020000000000000ul
) {
176 if ((s64
)ckc
>= (s64
)now
)
178 } else if (ckc
>= now
) {
181 return ckc_interrupts_enabled(vcpu
);
184 static int cpu_timer_interrupts_enabled(struct kvm_vcpu
*vcpu
)
186 return !psw_extint_disabled(vcpu
) &&
187 (vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
);
190 static int cpu_timer_irq_pending(struct kvm_vcpu
*vcpu
)
192 if (!cpu_timer_interrupts_enabled(vcpu
))
194 return kvm_s390_get_cpu_timer(vcpu
) >> 63;
197 static uint64_t isc_to_isc_bits(int isc
)
199 return (0x80 >> isc
) << 24;
202 static inline u32
isc_to_int_word(u8 isc
)
204 return ((u32
)isc
<< 27) | 0x80000000;
207 static inline u8
int_word_to_isc(u32 int_word
)
209 return (int_word
& 0x38000000) >> 27;
213 * To use atomic bitmap functions, we have to provide a bitmap address
214 * that is u64 aligned. However, the ipm might be u32 aligned.
215 * Therefore, we logically start the bitmap at the very beginning of the
216 * struct and fixup the bit number.
218 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
220 static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
222 set_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
225 static inline u8
kvm_s390_gisa_get_ipm(struct kvm_s390_gisa
*gisa
)
227 return READ_ONCE(gisa
->ipm
);
230 static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
232 clear_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
235 static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
237 return test_and_clear_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
240 static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu
*vcpu
)
242 return vcpu
->kvm
->arch
.float_int
.pending_irqs
|
243 vcpu
->arch
.local_int
.pending_irqs
;
246 static inline unsigned long pending_irqs(struct kvm_vcpu
*vcpu
)
248 return pending_irqs_no_gisa(vcpu
) |
249 kvm_s390_gisa_get_ipm(vcpu
->kvm
->arch
.gisa
) << IRQ_PEND_IO_ISC_7
;
252 static inline int isc_to_irq_type(unsigned long isc
)
254 return IRQ_PEND_IO_ISC_0
- isc
;
257 static inline int irq_type_to_isc(unsigned long irq_type
)
259 return IRQ_PEND_IO_ISC_0
- irq_type
;
262 static unsigned long disable_iscs(struct kvm_vcpu
*vcpu
,
263 unsigned long active_mask
)
267 for (i
= 0; i
<= MAX_ISC
; i
++)
268 if (!(vcpu
->arch
.sie_block
->gcr
[6] & isc_to_isc_bits(i
)))
269 active_mask
&= ~(1UL << (isc_to_irq_type(i
)));
274 static unsigned long deliverable_irqs(struct kvm_vcpu
*vcpu
)
276 unsigned long active_mask
;
278 active_mask
= pending_irqs(vcpu
);
282 if (psw_extint_disabled(vcpu
))
283 active_mask
&= ~IRQ_PEND_EXT_MASK
;
284 if (psw_ioint_disabled(vcpu
))
285 active_mask
&= ~IRQ_PEND_IO_MASK
;
287 active_mask
= disable_iscs(vcpu
, active_mask
);
288 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
289 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
290 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
))
291 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
292 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
293 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
294 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
))
295 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
296 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
297 __clear_bit(IRQ_PEND_EXT_SERVICE
, &active_mask
);
298 if (psw_mchk_disabled(vcpu
))
299 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
301 * Check both floating and local interrupt's cr14 because
302 * bit IRQ_PEND_MCHK_REP could be set in both cases.
304 if (!(vcpu
->arch
.sie_block
->gcr
[14] &
305 (vcpu
->kvm
->arch
.float_int
.mchk
.cr14
|
306 vcpu
->arch
.local_int
.irq
.mchk
.cr14
)))
307 __clear_bit(IRQ_PEND_MCHK_REP
, &active_mask
);
310 * STOP irqs will never be actively delivered. They are triggered via
311 * intercept requests and cleared when the stop intercept is performed.
313 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
318 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
320 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_WAIT
);
321 set_bit(vcpu
->vcpu_id
, vcpu
->kvm
->arch
.float_int
.idle_mask
);
324 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
326 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_WAIT
);
327 clear_bit(vcpu
->vcpu_id
, vcpu
->kvm
->arch
.float_int
.idle_mask
);
330 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
332 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
|
334 vcpu
->arch
.sie_block
->lctl
= 0x0000;
335 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
337 if (guestdbg_enabled(vcpu
)) {
338 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
339 LCTL_CR10
| LCTL_CR11
);
340 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
344 static void set_intercept_indicators_io(struct kvm_vcpu
*vcpu
)
346 if (!(pending_irqs_no_gisa(vcpu
) & IRQ_PEND_IO_MASK
))
348 else if (psw_ioint_disabled(vcpu
))
349 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_IO_INT
);
351 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
354 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
356 if (!(pending_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
358 if (psw_extint_disabled(vcpu
))
359 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
361 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
364 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
366 if (!(pending_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
368 if (psw_mchk_disabled(vcpu
))
369 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
371 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
374 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
376 if (kvm_s390_is_stop_irq_pending(vcpu
))
377 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOP_INT
);
380 /* Set interception request for non-deliverable interrupts */
381 static void set_intercept_indicators(struct kvm_vcpu
*vcpu
)
383 set_intercept_indicators_io(vcpu
);
384 set_intercept_indicators_ext(vcpu
);
385 set_intercept_indicators_mchk(vcpu
);
386 set_intercept_indicators_stop(vcpu
);
389 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
391 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
394 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
397 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
398 (u16
*)__LC_EXT_INT_CODE
);
399 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
400 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
401 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
402 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
403 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
404 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
405 return rc
? -EFAULT
: 0;
408 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
410 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
413 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
416 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
417 (u16 __user
*)__LC_EXT_INT_CODE
);
418 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
419 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
420 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
421 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
422 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
423 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
424 return rc
? -EFAULT
: 0;
427 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
429 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
430 struct kvm_s390_ext_info ext
;
433 spin_lock(&li
->lock
);
435 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
436 li
->irq
.ext
.ext_params2
= 0;
437 spin_unlock(&li
->lock
);
439 VCPU_EVENT(vcpu
, 4, "deliver: pfault init token 0x%llx",
441 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
442 KVM_S390_INT_PFAULT_INIT
,
445 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
446 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
447 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
448 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
449 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
450 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
451 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
452 return rc
? -EFAULT
: 0;
455 static int __write_machine_check(struct kvm_vcpu
*vcpu
,
456 struct kvm_s390_mchk_info
*mchk
)
458 unsigned long ext_sa_addr
;
460 freg_t fprs
[NUM_FPRS
];
464 mci
.val
= mchk
->mcic
;
465 /* take care of lazy register loading */
467 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
468 if (MACHINE_HAS_GS
&& vcpu
->arch
.gs_enabled
)
469 save_gs_cb(current
->thread
.gs_cb
);
471 /* Extended save area */
472 rc
= read_guest_lc(vcpu
, __LC_MCESAD
, &ext_sa_addr
,
473 sizeof(unsigned long));
474 /* Only bits 0 through 63-LC are used for address formation */
475 lc
= ext_sa_addr
& MCESA_LC_MASK
;
476 if (test_kvm_facility(vcpu
->kvm
, 133)) {
480 ext_sa_addr
&= ~0x3ffUL
;
483 ext_sa_addr
&= ~0x7ffUL
;
486 ext_sa_addr
&= ~0xfffUL
;
493 ext_sa_addr
&= ~0x3ffUL
;
496 if (!rc
&& mci
.vr
&& ext_sa_addr
&& test_kvm_facility(vcpu
->kvm
, 129)) {
497 if (write_guest_abs(vcpu
, ext_sa_addr
, vcpu
->run
->s
.regs
.vrs
,
503 if (!rc
&& mci
.gs
&& ext_sa_addr
&& test_kvm_facility(vcpu
->kvm
, 133)
504 && (lc
== 11 || lc
== 12)) {
505 if (write_guest_abs(vcpu
, ext_sa_addr
+ 1024,
506 &vcpu
->run
->s
.regs
.gscb
, 32))
512 /* General interruption information */
513 rc
|= put_guest_lc(vcpu
, 1, (u8 __user
*) __LC_AR_MODE_ID
);
514 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
515 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
516 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
517 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
518 rc
|= put_guest_lc(vcpu
, mci
.val
, (u64 __user
*) __LC_MCCK_CODE
);
520 /* Register-save areas */
521 if (MACHINE_HAS_VX
) {
522 convert_vx_to_fp(fprs
, (__vector128
*) vcpu
->run
->s
.regs
.vrs
);
523 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
, fprs
, 128);
525 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
,
526 vcpu
->run
->s
.regs
.fprs
, 128);
528 rc
|= write_guest_lc(vcpu
, __LC_GPREGS_SAVE_AREA
,
529 vcpu
->run
->s
.regs
.gprs
, 128);
530 rc
|= put_guest_lc(vcpu
, current
->thread
.fpu
.fpc
,
531 (u32 __user
*) __LC_FP_CREG_SAVE_AREA
);
532 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->todpr
,
533 (u32 __user
*) __LC_TOD_PROGREG_SAVE_AREA
);
534 rc
|= put_guest_lc(vcpu
, kvm_s390_get_cpu_timer(vcpu
),
535 (u64 __user
*) __LC_CPU_TIMER_SAVE_AREA
);
536 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->ckc
>> 8,
537 (u64 __user
*) __LC_CLOCK_COMP_SAVE_AREA
);
538 rc
|= write_guest_lc(vcpu
, __LC_AREGS_SAVE_AREA
,
539 &vcpu
->run
->s
.regs
.acrs
, 64);
540 rc
|= write_guest_lc(vcpu
, __LC_CREGS_SAVE_AREA
,
541 &vcpu
->arch
.sie_block
->gcr
, 128);
543 /* Extended interruption information */
544 rc
|= put_guest_lc(vcpu
, mchk
->ext_damage_code
,
545 (u32 __user
*) __LC_EXT_DAMAGE_CODE
);
546 rc
|= put_guest_lc(vcpu
, mchk
->failing_storage_address
,
547 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
548 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
, &mchk
->fixed_logout
,
549 sizeof(mchk
->fixed_logout
));
550 return rc
? -EFAULT
: 0;
553 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
555 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
556 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
557 struct kvm_s390_mchk_info mchk
= {};
561 spin_lock(&fi
->lock
);
562 spin_lock(&li
->lock
);
563 if (test_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
) ||
564 test_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
)) {
566 * If there was an exigent machine check pending, then any
567 * repressible machine checks that might have been pending
568 * are indicated along with it, so always clear bits for
569 * repressible and exigent interrupts
572 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
573 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
574 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
578 * We indicate floating repressible conditions along with
579 * other pending conditions. Channel Report Pending and Channel
580 * Subsystem damage are the only two and and are indicated by
581 * bits in mcic and masked in cr14.
583 if (test_and_clear_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
584 mchk
.mcic
|= fi
->mchk
.mcic
;
585 mchk
.cr14
|= fi
->mchk
.cr14
;
586 memset(&fi
->mchk
, 0, sizeof(mchk
));
589 spin_unlock(&li
->lock
);
590 spin_unlock(&fi
->lock
);
593 VCPU_EVENT(vcpu
, 3, "deliver: machine check mcic 0x%llx",
595 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
597 mchk
.cr14
, mchk
.mcic
);
598 rc
= __write_machine_check(vcpu
, &mchk
);
603 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
605 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
608 VCPU_EVENT(vcpu
, 3, "%s", "deliver: cpu restart");
609 vcpu
->stat
.deliver_restart_signal
++;
610 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
612 rc
= write_guest_lc(vcpu
,
613 offsetof(struct lowcore
, restart_old_psw
),
614 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
615 rc
|= read_guest_lc(vcpu
, offsetof(struct lowcore
, restart_psw
),
616 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
617 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
618 return rc
? -EFAULT
: 0;
621 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
623 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
624 struct kvm_s390_prefix_info prefix
;
626 spin_lock(&li
->lock
);
627 prefix
= li
->irq
.prefix
;
628 li
->irq
.prefix
.address
= 0;
629 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
630 spin_unlock(&li
->lock
);
632 vcpu
->stat
.deliver_prefix_signal
++;
633 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
634 KVM_S390_SIGP_SET_PREFIX
,
637 kvm_s390_set_prefix(vcpu
, prefix
.address
);
641 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
643 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
647 spin_lock(&li
->lock
);
648 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
649 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
650 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
651 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
652 spin_unlock(&li
->lock
);
654 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp emerg");
655 vcpu
->stat
.deliver_emergency_signal
++;
656 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
659 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
660 (u16
*)__LC_EXT_INT_CODE
);
661 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
662 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
663 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
664 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
665 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
666 return rc
? -EFAULT
: 0;
669 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
671 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
672 struct kvm_s390_extcall_info extcall
;
675 spin_lock(&li
->lock
);
676 extcall
= li
->irq
.extcall
;
677 li
->irq
.extcall
.code
= 0;
678 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
679 spin_unlock(&li
->lock
);
681 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp ext call");
682 vcpu
->stat
.deliver_external_call
++;
683 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
684 KVM_S390_INT_EXTERNAL_CALL
,
687 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
688 (u16
*)__LC_EXT_INT_CODE
);
689 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
690 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
691 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
692 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
694 return rc
? -EFAULT
: 0;
697 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
699 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
700 struct kvm_s390_pgm_info pgm_info
;
701 int rc
= 0, nullifying
= false;
704 spin_lock(&li
->lock
);
705 pgm_info
= li
->irq
.pgm
;
706 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
707 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
708 spin_unlock(&li
->lock
);
710 ilen
= pgm_info
.flags
& KVM_S390_PGM_FLAGS_ILC_MASK
;
711 VCPU_EVENT(vcpu
, 3, "deliver: program irq code 0x%x, ilen:%d",
712 pgm_info
.code
, ilen
);
713 vcpu
->stat
.deliver_program_int
++;
714 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
717 switch (pgm_info
.code
& ~PGM_PER
) {
718 case PGM_AFX_TRANSLATION
:
719 case PGM_ASX_TRANSLATION
:
720 case PGM_EX_TRANSLATION
:
721 case PGM_LFX_TRANSLATION
:
722 case PGM_LSTE_SEQUENCE
:
723 case PGM_LSX_TRANSLATION
:
724 case PGM_LX_TRANSLATION
:
725 case PGM_PRIMARY_AUTHORITY
:
726 case PGM_SECONDARY_AUTHORITY
:
729 case PGM_SPACE_SWITCH
:
730 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
731 (u64
*)__LC_TRANS_EXC_CODE
);
733 case PGM_ALEN_TRANSLATION
:
734 case PGM_ALE_SEQUENCE
:
735 case PGM_ASTE_INSTANCE
:
736 case PGM_ASTE_SEQUENCE
:
737 case PGM_ASTE_VALIDITY
:
738 case PGM_EXTENDED_AUTHORITY
:
739 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
740 (u8
*)__LC_EXC_ACCESS_ID
);
744 case PGM_PAGE_TRANSLATION
:
745 case PGM_REGION_FIRST_TRANS
:
746 case PGM_REGION_SECOND_TRANS
:
747 case PGM_REGION_THIRD_TRANS
:
748 case PGM_SEGMENT_TRANSLATION
:
749 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
750 (u64
*)__LC_TRANS_EXC_CODE
);
751 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
752 (u8
*)__LC_EXC_ACCESS_ID
);
753 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
754 (u8
*)__LC_OP_ACCESS_ID
);
758 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
759 (u16
*)__LC_MON_CLASS_NR
);
760 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
761 (u64
*)__LC_MON_CODE
);
763 case PGM_VECTOR_PROCESSING
:
765 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
766 (u32
*)__LC_DATA_EXC_CODE
);
769 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
770 (u64
*)__LC_TRANS_EXC_CODE
);
771 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
772 (u8
*)__LC_EXC_ACCESS_ID
);
775 case PGM_STACK_EMPTY
:
776 case PGM_STACK_SPECIFICATION
:
778 case PGM_STACK_OPERATION
:
779 case PGM_TRACE_TABEL
:
780 case PGM_CRYPTO_OPERATION
:
785 if (pgm_info
.code
& PGM_PER
) {
786 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
787 (u8
*) __LC_PER_CODE
);
788 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
789 (u8
*)__LC_PER_ATMID
);
790 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
791 (u64
*) __LC_PER_ADDRESS
);
792 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
793 (u8
*) __LC_PER_ACCESS_ID
);
796 if (nullifying
&& !(pgm_info
.flags
& KVM_S390_PGM_FLAGS_NO_REWIND
))
797 kvm_s390_rewind_psw(vcpu
, ilen
);
799 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
800 rc
|= put_guest_lc(vcpu
, ilen
, (u16
*) __LC_PGM_ILC
);
801 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->gbea
,
802 (u64
*) __LC_LAST_BREAK
);
803 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
804 (u16
*)__LC_PGM_INT_CODE
);
805 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
806 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
807 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
808 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
809 return rc
? -EFAULT
: 0;
812 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
)
814 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
815 struct kvm_s390_ext_info ext
;
818 spin_lock(&fi
->lock
);
819 if (!(test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
))) {
820 spin_unlock(&fi
->lock
);
823 ext
= fi
->srv_signal
;
824 memset(&fi
->srv_signal
, 0, sizeof(ext
));
825 clear_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
826 spin_unlock(&fi
->lock
);
828 VCPU_EVENT(vcpu
, 4, "deliver: sclp parameter 0x%x",
830 vcpu
->stat
.deliver_service_signal
++;
831 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
834 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
835 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
836 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
837 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
838 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
839 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
840 rc
|= put_guest_lc(vcpu
, ext
.ext_params
,
841 (u32
*)__LC_EXT_PARAMS
);
843 return rc
? -EFAULT
: 0;
846 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
)
848 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
849 struct kvm_s390_interrupt_info
*inti
;
852 spin_lock(&fi
->lock
);
853 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_PFAULT
],
854 struct kvm_s390_interrupt_info
,
857 list_del(&inti
->list
);
858 fi
->counters
[FIRQ_CNTR_PFAULT
] -= 1;
860 if (list_empty(&fi
->lists
[FIRQ_LIST_PFAULT
]))
861 clear_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
862 spin_unlock(&fi
->lock
);
865 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
866 KVM_S390_INT_PFAULT_DONE
, 0,
867 inti
->ext
.ext_params2
);
868 VCPU_EVENT(vcpu
, 4, "deliver: pfault done token 0x%llx",
869 inti
->ext
.ext_params2
);
871 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
872 (u16
*)__LC_EXT_INT_CODE
);
873 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
,
874 (u16
*)__LC_EXT_CPU_ADDR
);
875 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
876 &vcpu
->arch
.sie_block
->gpsw
,
878 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
879 &vcpu
->arch
.sie_block
->gpsw
,
881 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
882 (u64
*)__LC_EXT_PARAMS2
);
885 return rc
? -EFAULT
: 0;
888 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
)
890 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
891 struct kvm_s390_interrupt_info
*inti
;
894 spin_lock(&fi
->lock
);
895 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_VIRTIO
],
896 struct kvm_s390_interrupt_info
,
900 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
901 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
902 vcpu
->stat
.deliver_virtio_interrupt
++;
903 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
905 inti
->ext
.ext_params
,
906 inti
->ext
.ext_params2
);
907 list_del(&inti
->list
);
908 fi
->counters
[FIRQ_CNTR_VIRTIO
] -= 1;
910 if (list_empty(&fi
->lists
[FIRQ_LIST_VIRTIO
]))
911 clear_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
912 spin_unlock(&fi
->lock
);
915 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
916 (u16
*)__LC_EXT_INT_CODE
);
917 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
,
918 (u16
*)__LC_EXT_CPU_ADDR
);
919 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
920 &vcpu
->arch
.sie_block
->gpsw
,
922 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
923 &vcpu
->arch
.sie_block
->gpsw
,
925 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
926 (u32
*)__LC_EXT_PARAMS
);
927 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
928 (u64
*)__LC_EXT_PARAMS2
);
931 return rc
? -EFAULT
: 0;
934 static int __do_deliver_io(struct kvm_vcpu
*vcpu
, struct kvm_s390_io_info
*io
)
938 rc
= put_guest_lc(vcpu
, io
->subchannel_id
, (u16
*)__LC_SUBCHANNEL_ID
);
939 rc
|= put_guest_lc(vcpu
, io
->subchannel_nr
, (u16
*)__LC_SUBCHANNEL_NR
);
940 rc
|= put_guest_lc(vcpu
, io
->io_int_parm
, (u32
*)__LC_IO_INT_PARM
);
941 rc
|= put_guest_lc(vcpu
, io
->io_int_word
, (u32
*)__LC_IO_INT_WORD
);
942 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
943 &vcpu
->arch
.sie_block
->gpsw
,
945 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
946 &vcpu
->arch
.sie_block
->gpsw
,
948 return rc
? -EFAULT
: 0;
951 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
952 unsigned long irq_type
)
954 struct list_head
*isc_list
;
955 struct kvm_s390_float_interrupt
*fi
;
956 struct kvm_s390_interrupt_info
*inti
= NULL
;
957 struct kvm_s390_io_info io
;
961 fi
= &vcpu
->kvm
->arch
.float_int
;
963 spin_lock(&fi
->lock
);
964 isc
= irq_type_to_isc(irq_type
);
965 isc_list
= &fi
->lists
[isc
];
966 inti
= list_first_entry_or_null(isc_list
,
967 struct kvm_s390_interrupt_info
,
970 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
971 VCPU_EVENT(vcpu
, 4, "%s", "deliver: I/O (AI)");
973 VCPU_EVENT(vcpu
, 4, "deliver: I/O %x ss %x schid %04x",
974 inti
->io
.subchannel_id
>> 8,
975 inti
->io
.subchannel_id
>> 1 & 0x3,
976 inti
->io
.subchannel_nr
);
978 vcpu
->stat
.deliver_io_int
++;
979 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
981 ((__u32
)inti
->io
.subchannel_id
<< 16) |
982 inti
->io
.subchannel_nr
,
983 ((__u64
)inti
->io
.io_int_parm
<< 32) |
984 inti
->io
.io_int_word
);
985 list_del(&inti
->list
);
986 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
988 if (list_empty(isc_list
))
989 clear_bit(irq_type
, &fi
->pending_irqs
);
990 spin_unlock(&fi
->lock
);
993 rc
= __do_deliver_io(vcpu
, &(inti
->io
));
998 if (vcpu
->kvm
->arch
.gisa
&&
999 kvm_s390_gisa_tac_ipm_gisc(vcpu
->kvm
->arch
.gisa
, isc
)) {
1001 * in case an adapter interrupt was not delivered
1002 * in SIE context KVM will handle the delivery
1004 VCPU_EVENT(vcpu
, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc
);
1005 memset(&io
, 0, sizeof(io
));
1006 io
.io_int_word
= isc_to_int_word(isc
);
1007 vcpu
->stat
.deliver_io_int
++;
1008 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
1009 KVM_S390_INT_IO(1, 0, 0, 0),
1010 ((__u32
)io
.subchannel_id
<< 16) |
1012 ((__u64
)io
.io_int_parm
<< 32) |
1014 rc
= __do_deliver_io(vcpu
, &io
);
1020 /* Check whether an external call is pending (deliverable or not) */
1021 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
1023 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1025 if (!sclp
.has_sigpif
)
1026 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
1028 return sca_ext_call_pending(vcpu
, NULL
);
1031 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
1033 if (deliverable_irqs(vcpu
))
1036 if (kvm_cpu_has_pending_timer(vcpu
))
1039 /* external call pending and deliverable */
1040 if (kvm_s390_ext_call_pending(vcpu
) &&
1041 !psw_extint_disabled(vcpu
) &&
1042 (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
1045 if (!exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
1050 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1052 return ckc_irq_pending(vcpu
) || cpu_timer_irq_pending(vcpu
);
1055 static u64
__calculate_sltime(struct kvm_vcpu
*vcpu
)
1057 const u64 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
1058 const u64 ckc
= vcpu
->arch
.sie_block
->ckc
;
1059 u64 cputm
, sltime
= 0;
1061 if (ckc_interrupts_enabled(vcpu
)) {
1062 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x0020000000000000ul
) {
1063 if ((s64
)now
< (s64
)ckc
)
1064 sltime
= tod_to_ns((s64
)ckc
- (s64
)now
);
1065 } else if (now
< ckc
) {
1066 sltime
= tod_to_ns(ckc
- now
);
1068 /* already expired */
1071 if (cpu_timer_interrupts_enabled(vcpu
)) {
1072 cputm
= kvm_s390_get_cpu_timer(vcpu
);
1073 /* already expired? */
1076 return min(sltime
, tod_to_ns(cputm
));
1078 } else if (cpu_timer_interrupts_enabled(vcpu
)) {
1079 sltime
= kvm_s390_get_cpu_timer(vcpu
);
1080 /* already expired? */
1087 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
1091 vcpu
->stat
.exit_wait_state
++;
1094 if (kvm_arch_vcpu_runnable(vcpu
))
1097 if (psw_interrupts_disabled(vcpu
)) {
1098 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
1099 return -EOPNOTSUPP
; /* disabled wait */
1102 if (!ckc_interrupts_enabled(vcpu
) &&
1103 !cpu_timer_interrupts_enabled(vcpu
)) {
1104 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
1105 __set_cpu_idle(vcpu
);
1109 sltime
= __calculate_sltime(vcpu
);
1113 __set_cpu_idle(vcpu
);
1114 hrtimer_start(&vcpu
->arch
.ckc_timer
, sltime
, HRTIMER_MODE_REL
);
1115 VCPU_EVENT(vcpu
, 4, "enabled wait: %llu ns", sltime
);
1117 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1118 kvm_vcpu_block(vcpu
);
1119 __unset_cpu_idle(vcpu
);
1120 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1122 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
1126 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
1129 * We cannot move this into the if, as the CPU might be already
1130 * in kvm_vcpu_block without having the waitqueue set (polling)
1132 vcpu
->valid_wakeup
= true;
1134 * This is mostly to document, that the read in swait_active could
1135 * be moved before other stores, leading to subtle races.
1136 * All current users do not store or use an atomic like update
1138 smp_mb__after_atomic();
1139 if (swait_active(&vcpu
->wq
)) {
1141 * The vcpu gave up the cpu voluntarily, mark it as a good
1144 vcpu
->preempted
= true;
1145 swake_up(&vcpu
->wq
);
1146 vcpu
->stat
.halt_wakeup
++;
1149 * The VCPU might not be sleeping but is executing the VSIE. Let's
1150 * kick it, so it leaves the SIE to process the request.
1152 kvm_s390_vsie_kick(vcpu
);
1155 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
1157 struct kvm_vcpu
*vcpu
;
1160 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
1161 sltime
= __calculate_sltime(vcpu
);
1164 * If the monotonic clock runs faster than the tod clock we might be
1165 * woken up too early and have to go back to sleep to avoid deadlocks.
1167 if (sltime
&& hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
1168 return HRTIMER_RESTART
;
1169 kvm_s390_vcpu_wakeup(vcpu
);
1170 return HRTIMER_NORESTART
;
1173 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
1175 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1177 spin_lock(&li
->lock
);
1178 li
->pending_irqs
= 0;
1179 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
1180 memset(&li
->irq
, 0, sizeof(li
->irq
));
1181 spin_unlock(&li
->lock
);
1183 sca_clear_ext_call(vcpu
);
1186 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
1188 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1190 unsigned long irq_type
;
1193 __reset_intercept_indicators(vcpu
);
1195 /* pending ckc conditions might have been invalidated */
1196 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1197 if (ckc_irq_pending(vcpu
))
1198 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1200 /* pending cpu timer conditions might have been invalidated */
1201 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1202 if (cpu_timer_irq_pending(vcpu
))
1203 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1205 while ((irqs
= deliverable_irqs(vcpu
)) && !rc
) {
1206 /* bits are in the reverse order of interrupt priority */
1207 irq_type
= find_last_bit(&irqs
, IRQ_PEND_COUNT
);
1209 case IRQ_PEND_IO_ISC_0
:
1210 case IRQ_PEND_IO_ISC_1
:
1211 case IRQ_PEND_IO_ISC_2
:
1212 case IRQ_PEND_IO_ISC_3
:
1213 case IRQ_PEND_IO_ISC_4
:
1214 case IRQ_PEND_IO_ISC_5
:
1215 case IRQ_PEND_IO_ISC_6
:
1216 case IRQ_PEND_IO_ISC_7
:
1217 rc
= __deliver_io(vcpu
, irq_type
);
1219 case IRQ_PEND_MCHK_EX
:
1220 case IRQ_PEND_MCHK_REP
:
1221 rc
= __deliver_machine_check(vcpu
);
1224 rc
= __deliver_prog(vcpu
);
1226 case IRQ_PEND_EXT_EMERGENCY
:
1227 rc
= __deliver_emergency_signal(vcpu
);
1229 case IRQ_PEND_EXT_EXTERNAL
:
1230 rc
= __deliver_external_call(vcpu
);
1232 case IRQ_PEND_EXT_CLOCK_COMP
:
1233 rc
= __deliver_ckc(vcpu
);
1235 case IRQ_PEND_EXT_CPU_TIMER
:
1236 rc
= __deliver_cpu_timer(vcpu
);
1238 case IRQ_PEND_RESTART
:
1239 rc
= __deliver_restart(vcpu
);
1241 case IRQ_PEND_SET_PREFIX
:
1242 rc
= __deliver_set_prefix(vcpu
);
1244 case IRQ_PEND_PFAULT_INIT
:
1245 rc
= __deliver_pfault_init(vcpu
);
1247 case IRQ_PEND_EXT_SERVICE
:
1248 rc
= __deliver_service(vcpu
);
1250 case IRQ_PEND_PFAULT_DONE
:
1251 rc
= __deliver_pfault_done(vcpu
);
1253 case IRQ_PEND_VIRTIO
:
1254 rc
= __deliver_virtio(vcpu
);
1257 WARN_ONCE(1, "Unknown pending irq type %ld", irq_type
);
1258 clear_bit(irq_type
, &li
->pending_irqs
);
1262 set_intercept_indicators(vcpu
);
1267 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1269 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1271 VCPU_EVENT(vcpu
, 3, "inject: program irq code 0x%x", irq
->u
.pgm
.code
);
1272 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
1273 irq
->u
.pgm
.code
, 0);
1275 if (!(irq
->u
.pgm
.flags
& KVM_S390_PGM_FLAGS_ILC_VALID
)) {
1276 /* auto detection if no valid ILC was given */
1277 irq
->u
.pgm
.flags
&= ~KVM_S390_PGM_FLAGS_ILC_MASK
;
1278 irq
->u
.pgm
.flags
|= kvm_s390_get_ilen(vcpu
);
1279 irq
->u
.pgm
.flags
|= KVM_S390_PGM_FLAGS_ILC_VALID
;
1282 if (irq
->u
.pgm
.code
== PGM_PER
) {
1283 li
->irq
.pgm
.code
|= PGM_PER
;
1284 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1285 /* only modify PER related information */
1286 li
->irq
.pgm
.per_address
= irq
->u
.pgm
.per_address
;
1287 li
->irq
.pgm
.per_code
= irq
->u
.pgm
.per_code
;
1288 li
->irq
.pgm
.per_atmid
= irq
->u
.pgm
.per_atmid
;
1289 li
->irq
.pgm
.per_access_id
= irq
->u
.pgm
.per_access_id
;
1290 } else if (!(irq
->u
.pgm
.code
& PGM_PER
)) {
1291 li
->irq
.pgm
.code
= (li
->irq
.pgm
.code
& PGM_PER
) |
1293 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1294 /* only modify non-PER information */
1295 li
->irq
.pgm
.trans_exc_code
= irq
->u
.pgm
.trans_exc_code
;
1296 li
->irq
.pgm
.mon_code
= irq
->u
.pgm
.mon_code
;
1297 li
->irq
.pgm
.data_exc_code
= irq
->u
.pgm
.data_exc_code
;
1298 li
->irq
.pgm
.mon_class_nr
= irq
->u
.pgm
.mon_class_nr
;
1299 li
->irq
.pgm
.exc_access_id
= irq
->u
.pgm
.exc_access_id
;
1300 li
->irq
.pgm
.op_access_id
= irq
->u
.pgm
.op_access_id
;
1302 li
->irq
.pgm
= irq
->u
.pgm
;
1304 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
1308 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1310 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1312 VCPU_EVENT(vcpu
, 4, "inject: pfault init parameter block at 0x%llx",
1313 irq
->u
.ext
.ext_params2
);
1314 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
1315 irq
->u
.ext
.ext_params
,
1316 irq
->u
.ext
.ext_params2
);
1318 li
->irq
.ext
= irq
->u
.ext
;
1319 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1320 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1324 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1326 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1327 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1328 uint16_t src_id
= irq
->u
.extcall
.code
;
1330 VCPU_EVENT(vcpu
, 4, "inject: external call source-cpu:%u",
1332 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1335 /* sending vcpu invalid */
1336 if (kvm_get_vcpu_by_id(vcpu
->kvm
, src_id
) == NULL
)
1339 if (sclp
.has_sigpif
)
1340 return sca_inject_ext_call(vcpu
, src_id
);
1342 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1344 *extcall
= irq
->u
.extcall
;
1345 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1349 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1351 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1352 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1354 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x",
1355 irq
->u
.prefix
.address
);
1356 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1357 irq
->u
.prefix
.address
, 0);
1359 if (!is_vcpu_stopped(vcpu
))
1362 *prefix
= irq
->u
.prefix
;
1363 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1367 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1368 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1370 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1371 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1374 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0);
1376 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1379 if (is_vcpu_stopped(vcpu
)) {
1380 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1381 rc
= kvm_s390_store_status_unloaded(vcpu
,
1382 KVM_S390_STORE_STATUS_NOADDR
);
1386 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1388 stop
->flags
= irq
->u
.stop
.flags
;
1389 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOP_INT
);
1393 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1394 struct kvm_s390_irq
*irq
)
1396 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1398 VCPU_EVENT(vcpu
, 3, "%s", "inject: restart int");
1399 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
1401 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1405 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1406 struct kvm_s390_irq
*irq
)
1408 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1410 VCPU_EVENT(vcpu
, 4, "inject: emergency from cpu %u",
1412 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1413 irq
->u
.emerg
.code
, 0);
1415 /* sending vcpu invalid */
1416 if (kvm_get_vcpu_by_id(vcpu
->kvm
, irq
->u
.emerg
.code
) == NULL
)
1419 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1420 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1421 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1425 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1427 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1428 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1430 VCPU_EVENT(vcpu
, 3, "inject: machine check mcic 0x%llx",
1432 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1436 * Because repressible machine checks can be indicated along with
1437 * exigent machine checks (PoP, Chapter 11, Interruption action)
1438 * we need to combine cr14, mcic and external damage code.
1439 * Failing storage address and the logout area should not be or'ed
1440 * together, we just indicate the last occurrence of the corresponding
1443 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1444 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1445 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1446 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1447 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1448 sizeof(mchk
->fixed_logout
));
1449 if (mchk
->mcic
& MCHK_EX_MASK
)
1450 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1451 else if (mchk
->mcic
& MCHK_REP_MASK
)
1452 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1456 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1458 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1460 VCPU_EVENT(vcpu
, 3, "%s", "inject: clock comparator external");
1461 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1464 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1465 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1469 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1471 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1473 VCPU_EVENT(vcpu
, 3, "%s", "inject: cpu timer external");
1474 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1477 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1478 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1482 static struct kvm_s390_interrupt_info
*get_io_int(struct kvm
*kvm
,
1485 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1486 struct list_head
*isc_list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1487 struct kvm_s390_interrupt_info
*iter
;
1488 u16 id
= (schid
& 0xffff0000U
) >> 16;
1489 u16 nr
= schid
& 0x0000ffffU
;
1491 spin_lock(&fi
->lock
);
1492 list_for_each_entry(iter
, isc_list
, list
) {
1493 if (schid
&& (id
!= iter
->io
.subchannel_id
||
1494 nr
!= iter
->io
.subchannel_nr
))
1496 /* found an appropriate entry */
1497 list_del_init(&iter
->list
);
1498 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1499 if (list_empty(isc_list
))
1500 clear_bit(isc_to_irq_type(isc
), &fi
->pending_irqs
);
1501 spin_unlock(&fi
->lock
);
1504 spin_unlock(&fi
->lock
);
1508 static struct kvm_s390_interrupt_info
*get_top_io_int(struct kvm
*kvm
,
1509 u64 isc_mask
, u32 schid
)
1511 struct kvm_s390_interrupt_info
*inti
= NULL
;
1514 for (isc
= 0; isc
<= MAX_ISC
&& !inti
; isc
++) {
1515 if (isc_mask
& isc_to_isc_bits(isc
))
1516 inti
= get_io_int(kvm
, isc
, schid
);
1521 static int get_top_gisa_isc(struct kvm
*kvm
, u64 isc_mask
, u32 schid
)
1523 unsigned long active_mask
;
1528 if (!kvm
->arch
.gisa
)
1531 active_mask
= (isc_mask
& kvm_s390_gisa_get_ipm(kvm
->arch
.gisa
) << 24) << 32;
1532 while (active_mask
) {
1533 isc
= __fls(active_mask
) ^ (BITS_PER_LONG
- 1);
1534 if (kvm_s390_gisa_tac_ipm_gisc(kvm
->arch
.gisa
, isc
))
1536 clear_bit_inv(isc
, &active_mask
);
1543 * Dequeue and return an I/O interrupt matching any of the interruption
1544 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1545 * Take into account the interrupts pending in the interrupt list and in GISA.
1547 * Note that for a guest that does not enable I/O interrupts
1548 * but relies on TPI, a flood of classic interrupts may starve
1549 * out adapter interrupts on the same isc. Linux does not do
1550 * that, and it is possible to work around the issue by configuring
1551 * different iscs for classic and adapter interrupts in the guest,
1552 * but we may want to revisit this in the future.
1554 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1555 u64 isc_mask
, u32 schid
)
1557 struct kvm_s390_interrupt_info
*inti
, *tmp_inti
;
1560 inti
= get_top_io_int(kvm
, isc_mask
, schid
);
1562 isc
= get_top_gisa_isc(kvm
, isc_mask
, schid
);
1568 /* AI in GISA but no classical IO int */
1571 /* both types of interrupts present */
1572 if (int_word_to_isc(inti
->io
.io_int_word
) <= isc
) {
1573 /* classical IO int with higher priority */
1574 kvm_s390_gisa_set_ipm_gisc(kvm
->arch
.gisa
, isc
);
1578 tmp_inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1580 tmp_inti
->type
= KVM_S390_INT_IO(1, 0, 0, 0);
1581 tmp_inti
->io
.io_int_word
= isc_to_int_word(isc
);
1583 kvm_s390_reinject_io_int(kvm
, inti
);
1586 kvm_s390_gisa_set_ipm_gisc(kvm
->arch
.gisa
, isc
);
1591 #define SCCB_MASK 0xFFFFFFF8
1592 #define SCCB_EVENT_PENDING 0x3
1594 static int __inject_service(struct kvm
*kvm
,
1595 struct kvm_s390_interrupt_info
*inti
)
1597 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1599 spin_lock(&fi
->lock
);
1600 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_EVENT_PENDING
;
1602 * Early versions of the QEMU s390 bios will inject several
1603 * service interrupts after another without handling a
1604 * condition code indicating busy.
1605 * We will silently ignore those superfluous sccb values.
1606 * A future version of QEMU will take care of serialization
1609 if (fi
->srv_signal
.ext_params
& SCCB_MASK
)
1611 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_MASK
;
1612 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1614 spin_unlock(&fi
->lock
);
1619 static int __inject_virtio(struct kvm
*kvm
,
1620 struct kvm_s390_interrupt_info
*inti
)
1622 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1624 spin_lock(&fi
->lock
);
1625 if (fi
->counters
[FIRQ_CNTR_VIRTIO
] >= KVM_S390_MAX_VIRTIO_IRQS
) {
1626 spin_unlock(&fi
->lock
);
1629 fi
->counters
[FIRQ_CNTR_VIRTIO
] += 1;
1630 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_VIRTIO
]);
1631 set_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1632 spin_unlock(&fi
->lock
);
1636 static int __inject_pfault_done(struct kvm
*kvm
,
1637 struct kvm_s390_interrupt_info
*inti
)
1639 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1641 spin_lock(&fi
->lock
);
1642 if (fi
->counters
[FIRQ_CNTR_PFAULT
] >=
1643 (ASYNC_PF_PER_VCPU
* KVM_MAX_VCPUS
)) {
1644 spin_unlock(&fi
->lock
);
1647 fi
->counters
[FIRQ_CNTR_PFAULT
] += 1;
1648 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_PFAULT
]);
1649 set_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1650 spin_unlock(&fi
->lock
);
1654 #define CR_PENDING_SUBCLASS 28
1655 static int __inject_float_mchk(struct kvm
*kvm
,
1656 struct kvm_s390_interrupt_info
*inti
)
1658 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1660 spin_lock(&fi
->lock
);
1661 fi
->mchk
.cr14
|= inti
->mchk
.cr14
& (1UL << CR_PENDING_SUBCLASS
);
1662 fi
->mchk
.mcic
|= inti
->mchk
.mcic
;
1663 set_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
);
1664 spin_unlock(&fi
->lock
);
1669 static int __inject_io(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1671 struct kvm_s390_float_interrupt
*fi
;
1672 struct list_head
*list
;
1675 isc
= int_word_to_isc(inti
->io
.io_int_word
);
1677 if (kvm
->arch
.gisa
&& inti
->type
& KVM_S390_INT_IO_AI_MASK
) {
1678 VM_EVENT(kvm
, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc
);
1679 kvm_s390_gisa_set_ipm_gisc(kvm
->arch
.gisa
, isc
);
1684 fi
= &kvm
->arch
.float_int
;
1685 spin_lock(&fi
->lock
);
1686 if (fi
->counters
[FIRQ_CNTR_IO
] >= KVM_S390_MAX_FLOAT_IRQS
) {
1687 spin_unlock(&fi
->lock
);
1690 fi
->counters
[FIRQ_CNTR_IO
] += 1;
1692 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
1693 VM_EVENT(kvm
, 4, "%s", "inject: I/O (AI)");
1695 VM_EVENT(kvm
, 4, "inject: I/O %x ss %x schid %04x",
1696 inti
->io
.subchannel_id
>> 8,
1697 inti
->io
.subchannel_id
>> 1 & 0x3,
1698 inti
->io
.subchannel_nr
);
1699 list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1700 list_add_tail(&inti
->list
, list
);
1701 set_bit(isc_to_irq_type(isc
), &fi
->pending_irqs
);
1702 spin_unlock(&fi
->lock
);
1707 * Find a destination VCPU for a floating irq and kick it.
1709 static void __floating_irq_kick(struct kvm
*kvm
, u64 type
)
1711 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1712 struct kvm_vcpu
*dst_vcpu
;
1713 int sigcpu
, online_vcpus
, nr_tries
= 0;
1715 online_vcpus
= atomic_read(&kvm
->online_vcpus
);
1719 /* find idle VCPUs first, then round robin */
1720 sigcpu
= find_first_bit(fi
->idle_mask
, online_vcpus
);
1721 if (sigcpu
== online_vcpus
) {
1723 sigcpu
= fi
->next_rr_cpu
;
1724 fi
->next_rr_cpu
= (fi
->next_rr_cpu
+ 1) % online_vcpus
;
1725 /* avoid endless loops if all vcpus are stopped */
1726 if (nr_tries
++ >= online_vcpus
)
1728 } while (is_vcpu_stopped(kvm_get_vcpu(kvm
, sigcpu
)));
1730 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1732 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1735 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_STOP_INT
);
1737 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1738 if (!(type
& KVM_S390_INT_IO_AI_MASK
&& kvm
->arch
.gisa
))
1739 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_IO_INT
);
1742 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_EXT_INT
);
1745 kvm_s390_vcpu_wakeup(dst_vcpu
);
1748 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1750 u64 type
= READ_ONCE(inti
->type
);
1755 rc
= __inject_float_mchk(kvm
, inti
);
1757 case KVM_S390_INT_VIRTIO
:
1758 rc
= __inject_virtio(kvm
, inti
);
1760 case KVM_S390_INT_SERVICE
:
1761 rc
= __inject_service(kvm
, inti
);
1763 case KVM_S390_INT_PFAULT_DONE
:
1764 rc
= __inject_pfault_done(kvm
, inti
);
1766 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1767 rc
= __inject_io(kvm
, inti
);
1775 __floating_irq_kick(kvm
, type
);
1779 int kvm_s390_inject_vm(struct kvm
*kvm
,
1780 struct kvm_s390_interrupt
*s390int
)
1782 struct kvm_s390_interrupt_info
*inti
;
1785 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1789 inti
->type
= s390int
->type
;
1790 switch (inti
->type
) {
1791 case KVM_S390_INT_VIRTIO
:
1792 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1793 s390int
->parm
, s390int
->parm64
);
1794 inti
->ext
.ext_params
= s390int
->parm
;
1795 inti
->ext
.ext_params2
= s390int
->parm64
;
1797 case KVM_S390_INT_SERVICE
:
1798 VM_EVENT(kvm
, 4, "inject: sclp parm:%x", s390int
->parm
);
1799 inti
->ext
.ext_params
= s390int
->parm
;
1801 case KVM_S390_INT_PFAULT_DONE
:
1802 inti
->ext
.ext_params2
= s390int
->parm64
;
1805 VM_EVENT(kvm
, 3, "inject: machine check mcic 0x%llx",
1807 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1808 inti
->mchk
.mcic
= s390int
->parm64
;
1810 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1811 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1812 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1813 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1814 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1820 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1823 rc
= __inject_vm(kvm
, inti
);
1829 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
1830 struct kvm_s390_interrupt_info
*inti
)
1832 return __inject_vm(kvm
, inti
);
1835 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1836 struct kvm_s390_irq
*irq
)
1838 irq
->type
= s390int
->type
;
1839 switch (irq
->type
) {
1840 case KVM_S390_PROGRAM_INT
:
1841 if (s390int
->parm
& 0xffff0000)
1843 irq
->u
.pgm
.code
= s390int
->parm
;
1845 case KVM_S390_SIGP_SET_PREFIX
:
1846 irq
->u
.prefix
.address
= s390int
->parm
;
1848 case KVM_S390_SIGP_STOP
:
1849 irq
->u
.stop
.flags
= s390int
->parm
;
1851 case KVM_S390_INT_EXTERNAL_CALL
:
1852 if (s390int
->parm
& 0xffff0000)
1854 irq
->u
.extcall
.code
= s390int
->parm
;
1856 case KVM_S390_INT_EMERGENCY
:
1857 if (s390int
->parm
& 0xffff0000)
1859 irq
->u
.emerg
.code
= s390int
->parm
;
1862 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1868 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
1870 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1872 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1875 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
1877 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1879 spin_lock(&li
->lock
);
1880 li
->irq
.stop
.flags
= 0;
1881 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1882 spin_unlock(&li
->lock
);
1885 static int do_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1889 switch (irq
->type
) {
1890 case KVM_S390_PROGRAM_INT
:
1891 rc
= __inject_prog(vcpu
, irq
);
1893 case KVM_S390_SIGP_SET_PREFIX
:
1894 rc
= __inject_set_prefix(vcpu
, irq
);
1896 case KVM_S390_SIGP_STOP
:
1897 rc
= __inject_sigp_stop(vcpu
, irq
);
1899 case KVM_S390_RESTART
:
1900 rc
= __inject_sigp_restart(vcpu
, irq
);
1902 case KVM_S390_INT_CLOCK_COMP
:
1903 rc
= __inject_ckc(vcpu
);
1905 case KVM_S390_INT_CPU_TIMER
:
1906 rc
= __inject_cpu_timer(vcpu
);
1908 case KVM_S390_INT_EXTERNAL_CALL
:
1909 rc
= __inject_extcall(vcpu
, irq
);
1911 case KVM_S390_INT_EMERGENCY
:
1912 rc
= __inject_sigp_emergency(vcpu
, irq
);
1915 rc
= __inject_mchk(vcpu
, irq
);
1917 case KVM_S390_INT_PFAULT_INIT
:
1918 rc
= __inject_pfault_init(vcpu
, irq
);
1920 case KVM_S390_INT_VIRTIO
:
1921 case KVM_S390_INT_SERVICE
:
1922 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1930 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1932 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1935 spin_lock(&li
->lock
);
1936 rc
= do_inject_vcpu(vcpu
, irq
);
1937 spin_unlock(&li
->lock
);
1939 kvm_s390_vcpu_wakeup(vcpu
);
1943 static inline void clear_irq_list(struct list_head
*_list
)
1945 struct kvm_s390_interrupt_info
*inti
, *n
;
1947 list_for_each_entry_safe(inti
, n
, _list
, list
) {
1948 list_del(&inti
->list
);
1953 static void inti_to_irq(struct kvm_s390_interrupt_info
*inti
,
1954 struct kvm_s390_irq
*irq
)
1956 irq
->type
= inti
->type
;
1957 switch (inti
->type
) {
1958 case KVM_S390_INT_PFAULT_INIT
:
1959 case KVM_S390_INT_PFAULT_DONE
:
1960 case KVM_S390_INT_VIRTIO
:
1961 irq
->u
.ext
= inti
->ext
;
1963 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1964 irq
->u
.io
= inti
->io
;
1969 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1971 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1974 spin_lock(&fi
->lock
);
1975 fi
->pending_irqs
= 0;
1976 memset(&fi
->srv_signal
, 0, sizeof(fi
->srv_signal
));
1977 memset(&fi
->mchk
, 0, sizeof(fi
->mchk
));
1978 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1979 clear_irq_list(&fi
->lists
[i
]);
1980 for (i
= 0; i
< FIRQ_MAX_COUNT
; i
++)
1981 fi
->counters
[i
] = 0;
1982 spin_unlock(&fi
->lock
);
1983 kvm_s390_gisa_clear(kvm
);
1986 static int get_all_floating_irqs(struct kvm
*kvm
, u8 __user
*usrbuf
, u64 len
)
1988 struct kvm_s390_interrupt_info
*inti
;
1989 struct kvm_s390_float_interrupt
*fi
;
1990 struct kvm_s390_irq
*buf
;
1991 struct kvm_s390_irq
*irq
;
1997 if (len
> KVM_S390_FLIC_MAX_BUFFER
|| len
== 0)
2001 * We are already using -ENOMEM to signal
2002 * userspace it may retry with a bigger buffer,
2003 * so we need to use something else for this case
2009 max_irqs
= len
/ sizeof(struct kvm_s390_irq
);
2011 if (kvm
->arch
.gisa
&&
2012 kvm_s390_gisa_get_ipm(kvm
->arch
.gisa
)) {
2013 for (i
= 0; i
<= MAX_ISC
; i
++) {
2014 if (n
== max_irqs
) {
2015 /* signal userspace to try again */
2019 if (kvm_s390_gisa_tac_ipm_gisc(kvm
->arch
.gisa
, i
)) {
2020 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2021 irq
->type
= KVM_S390_INT_IO(1, 0, 0, 0);
2022 irq
->u
.io
.io_int_word
= isc_to_int_word(i
);
2027 fi
= &kvm
->arch
.float_int
;
2028 spin_lock(&fi
->lock
);
2029 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++) {
2030 list_for_each_entry(inti
, &fi
->lists
[i
], list
) {
2031 if (n
== max_irqs
) {
2032 /* signal userspace to try again */
2036 inti_to_irq(inti
, &buf
[n
]);
2040 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
)) {
2041 if (n
== max_irqs
) {
2042 /* signal userspace to try again */
2046 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2047 irq
->type
= KVM_S390_INT_SERVICE
;
2048 irq
->u
.ext
= fi
->srv_signal
;
2051 if (test_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
2052 if (n
== max_irqs
) {
2053 /* signal userspace to try again */
2057 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2058 irq
->type
= KVM_S390_MCHK
;
2059 irq
->u
.mchk
= fi
->mchk
;
2064 spin_unlock(&fi
->lock
);
2066 if (!ret
&& n
> 0) {
2067 if (copy_to_user(usrbuf
, buf
, sizeof(struct kvm_s390_irq
) * n
))
2072 return ret
< 0 ? ret
: n
;
2075 static int flic_ais_mode_get_all(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2077 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2078 struct kvm_s390_ais_all ais
;
2080 if (attr
->attr
< sizeof(ais
))
2083 if (!test_kvm_facility(kvm
, 72))
2086 mutex_lock(&fi
->ais_lock
);
2087 ais
.simm
= fi
->simm
;
2088 ais
.nimm
= fi
->nimm
;
2089 mutex_unlock(&fi
->ais_lock
);
2091 if (copy_to_user((void __user
*)attr
->addr
, &ais
, sizeof(ais
)))
2097 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2101 switch (attr
->group
) {
2102 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2103 r
= get_all_floating_irqs(dev
->kvm
, (u8 __user
*) attr
->addr
,
2106 case KVM_DEV_FLIC_AISM_ALL
:
2107 r
= flic_ais_mode_get_all(dev
->kvm
, attr
);
2116 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
2119 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
2120 void *target
= NULL
;
2121 void __user
*source
;
2124 if (get_user(inti
->type
, (u64 __user
*)addr
))
2127 switch (inti
->type
) {
2128 case KVM_S390_INT_PFAULT_INIT
:
2129 case KVM_S390_INT_PFAULT_DONE
:
2130 case KVM_S390_INT_VIRTIO
:
2131 case KVM_S390_INT_SERVICE
:
2132 target
= (void *) &inti
->ext
;
2133 source
= &uptr
->u
.ext
;
2134 size
= sizeof(inti
->ext
);
2136 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
2137 target
= (void *) &inti
->io
;
2138 source
= &uptr
->u
.io
;
2139 size
= sizeof(inti
->io
);
2142 target
= (void *) &inti
->mchk
;
2143 source
= &uptr
->u
.mchk
;
2144 size
= sizeof(inti
->mchk
);
2150 if (copy_from_user(target
, source
, size
))
2156 static int enqueue_floating_irq(struct kvm_device
*dev
,
2157 struct kvm_device_attr
*attr
)
2159 struct kvm_s390_interrupt_info
*inti
= NULL
;
2161 int len
= attr
->attr
;
2163 if (len
% sizeof(struct kvm_s390_irq
) != 0)
2165 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
2168 while (len
>= sizeof(struct kvm_s390_irq
)) {
2169 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
2173 r
= copy_irq_from_user(inti
, attr
->addr
);
2178 r
= __inject_vm(dev
->kvm
, inti
);
2183 len
-= sizeof(struct kvm_s390_irq
);
2184 attr
->addr
+= sizeof(struct kvm_s390_irq
);
2190 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
2192 if (id
>= MAX_S390_IO_ADAPTERS
)
2194 return kvm
->arch
.adapters
[id
];
2197 static int register_io_adapter(struct kvm_device
*dev
,
2198 struct kvm_device_attr
*attr
)
2200 struct s390_io_adapter
*adapter
;
2201 struct kvm_s390_io_adapter adapter_info
;
2203 if (copy_from_user(&adapter_info
,
2204 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
2207 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
2208 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
2211 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
2215 INIT_LIST_HEAD(&adapter
->maps
);
2216 init_rwsem(&adapter
->maps_lock
);
2217 atomic_set(&adapter
->nr_maps
, 0);
2218 adapter
->id
= adapter_info
.id
;
2219 adapter
->isc
= adapter_info
.isc
;
2220 adapter
->maskable
= adapter_info
.maskable
;
2221 adapter
->masked
= false;
2222 adapter
->swap
= adapter_info
.swap
;
2223 adapter
->suppressible
= (adapter_info
.flags
) &
2224 KVM_S390_ADAPTER_SUPPRESSIBLE
;
2225 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
2230 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
2233 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2235 if (!adapter
|| !adapter
->maskable
)
2237 ret
= adapter
->masked
;
2238 adapter
->masked
= masked
;
2242 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
2244 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2245 struct s390_map_info
*map
;
2248 if (!adapter
|| !addr
)
2251 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
2256 INIT_LIST_HEAD(&map
->list
);
2257 map
->guest_addr
= addr
;
2258 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
2259 if (map
->addr
== -EFAULT
) {
2263 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
2267 down_write(&adapter
->maps_lock
);
2268 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
2269 list_add_tail(&map
->list
, &adapter
->maps
);
2272 put_page(map
->page
);
2275 up_write(&adapter
->maps_lock
);
2282 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
2284 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2285 struct s390_map_info
*map
, *tmp
;
2288 if (!adapter
|| !addr
)
2291 down_write(&adapter
->maps_lock
);
2292 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
2293 if (map
->guest_addr
== addr
) {
2295 atomic_dec(&adapter
->nr_maps
);
2296 list_del(&map
->list
);
2297 put_page(map
->page
);
2302 up_write(&adapter
->maps_lock
);
2304 return found
? 0 : -EINVAL
;
2307 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
2310 struct s390_map_info
*map
, *tmp
;
2312 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
2313 if (!kvm
->arch
.adapters
[i
])
2315 list_for_each_entry_safe(map
, tmp
,
2316 &kvm
->arch
.adapters
[i
]->maps
, list
) {
2317 list_del(&map
->list
);
2318 put_page(map
->page
);
2321 kfree(kvm
->arch
.adapters
[i
]);
2325 static int modify_io_adapter(struct kvm_device
*dev
,
2326 struct kvm_device_attr
*attr
)
2328 struct kvm_s390_io_adapter_req req
;
2329 struct s390_io_adapter
*adapter
;
2332 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2335 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
2339 case KVM_S390_IO_ADAPTER_MASK
:
2340 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
2344 case KVM_S390_IO_ADAPTER_MAP
:
2345 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
2347 case KVM_S390_IO_ADAPTER_UNMAP
:
2348 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
2357 static int clear_io_irq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2360 const u64 isc_mask
= 0xffUL
<< 24; /* all iscs set */
2365 if (attr
->attr
!= sizeof(schid
))
2367 if (copy_from_user(&schid
, (void __user
*) attr
->addr
, sizeof(schid
)))
2371 kfree(kvm_s390_get_io_int(kvm
, isc_mask
, schid
));
2373 * If userspace is conforming to the architecture, we can have at most
2374 * one pending I/O interrupt per subchannel, so this is effectively a
2380 static int modify_ais_mode(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2382 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2383 struct kvm_s390_ais_req req
;
2386 if (!test_kvm_facility(kvm
, 72))
2389 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2392 if (req
.isc
> MAX_ISC
)
2395 trace_kvm_s390_modify_ais_mode(req
.isc
,
2396 (fi
->simm
& AIS_MODE_MASK(req
.isc
)) ?
2397 (fi
->nimm
& AIS_MODE_MASK(req
.isc
)) ?
2398 2 : KVM_S390_AIS_MODE_SINGLE
:
2399 KVM_S390_AIS_MODE_ALL
, req
.mode
);
2401 mutex_lock(&fi
->ais_lock
);
2403 case KVM_S390_AIS_MODE_ALL
:
2404 fi
->simm
&= ~AIS_MODE_MASK(req
.isc
);
2405 fi
->nimm
&= ~AIS_MODE_MASK(req
.isc
);
2407 case KVM_S390_AIS_MODE_SINGLE
:
2408 fi
->simm
|= AIS_MODE_MASK(req
.isc
);
2409 fi
->nimm
&= ~AIS_MODE_MASK(req
.isc
);
2414 mutex_unlock(&fi
->ais_lock
);
2419 static int kvm_s390_inject_airq(struct kvm
*kvm
,
2420 struct s390_io_adapter
*adapter
)
2422 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2423 struct kvm_s390_interrupt s390int
= {
2424 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
2426 .parm64
= isc_to_int_word(adapter
->isc
),
2430 if (!test_kvm_facility(kvm
, 72) || !adapter
->suppressible
)
2431 return kvm_s390_inject_vm(kvm
, &s390int
);
2433 mutex_lock(&fi
->ais_lock
);
2434 if (fi
->nimm
& AIS_MODE_MASK(adapter
->isc
)) {
2435 trace_kvm_s390_airq_suppressed(adapter
->id
, adapter
->isc
);
2439 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
2440 if (!ret
&& (fi
->simm
& AIS_MODE_MASK(adapter
->isc
))) {
2441 fi
->nimm
|= AIS_MODE_MASK(adapter
->isc
);
2442 trace_kvm_s390_modify_ais_mode(adapter
->isc
,
2443 KVM_S390_AIS_MODE_SINGLE
, 2);
2446 mutex_unlock(&fi
->ais_lock
);
2450 static int flic_inject_airq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2452 unsigned int id
= attr
->attr
;
2453 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2458 return kvm_s390_inject_airq(kvm
, adapter
);
2461 static int flic_ais_mode_set_all(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2463 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2464 struct kvm_s390_ais_all ais
;
2466 if (!test_kvm_facility(kvm
, 72))
2469 if (copy_from_user(&ais
, (void __user
*)attr
->addr
, sizeof(ais
)))
2472 mutex_lock(&fi
->ais_lock
);
2473 fi
->simm
= ais
.simm
;
2474 fi
->nimm
= ais
.nimm
;
2475 mutex_unlock(&fi
->ais_lock
);
2480 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2484 struct kvm_vcpu
*vcpu
;
2486 switch (attr
->group
) {
2487 case KVM_DEV_FLIC_ENQUEUE
:
2488 r
= enqueue_floating_irq(dev
, attr
);
2490 case KVM_DEV_FLIC_CLEAR_IRQS
:
2491 kvm_s390_clear_float_irqs(dev
->kvm
);
2493 case KVM_DEV_FLIC_APF_ENABLE
:
2494 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
2496 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2497 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
2499 * Make sure no async faults are in transition when
2500 * clearing the queues. So we don't need to worry
2501 * about late coming workers.
2503 synchronize_srcu(&dev
->kvm
->srcu
);
2504 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
2505 kvm_clear_async_pf_completion_queue(vcpu
);
2507 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2508 r
= register_io_adapter(dev
, attr
);
2510 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2511 r
= modify_io_adapter(dev
, attr
);
2513 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2514 r
= clear_io_irq(dev
->kvm
, attr
);
2516 case KVM_DEV_FLIC_AISM
:
2517 r
= modify_ais_mode(dev
->kvm
, attr
);
2519 case KVM_DEV_FLIC_AIRQ_INJECT
:
2520 r
= flic_inject_airq(dev
->kvm
, attr
);
2522 case KVM_DEV_FLIC_AISM_ALL
:
2523 r
= flic_ais_mode_set_all(dev
->kvm
, attr
);
2532 static int flic_has_attr(struct kvm_device
*dev
,
2533 struct kvm_device_attr
*attr
)
2535 switch (attr
->group
) {
2536 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2537 case KVM_DEV_FLIC_ENQUEUE
:
2538 case KVM_DEV_FLIC_CLEAR_IRQS
:
2539 case KVM_DEV_FLIC_APF_ENABLE
:
2540 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2541 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2542 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2543 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2544 case KVM_DEV_FLIC_AISM
:
2545 case KVM_DEV_FLIC_AIRQ_INJECT
:
2546 case KVM_DEV_FLIC_AISM_ALL
:
2552 static int flic_create(struct kvm_device
*dev
, u32 type
)
2556 if (dev
->kvm
->arch
.flic
)
2558 dev
->kvm
->arch
.flic
= dev
;
2562 static void flic_destroy(struct kvm_device
*dev
)
2564 dev
->kvm
->arch
.flic
= NULL
;
2568 /* s390 floating irq controller (flic) */
2569 struct kvm_device_ops kvm_flic_ops
= {
2571 .get_attr
= flic_get_attr
,
2572 .set_attr
= flic_set_attr
,
2573 .has_attr
= flic_has_attr
,
2574 .create
= flic_create
,
2575 .destroy
= flic_destroy
,
2578 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
2582 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
2584 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
2587 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
2590 struct s390_map_info
*map
;
2595 list_for_each_entry(map
, &adapter
->maps
, list
) {
2596 if (map
->guest_addr
== addr
)
2602 static int adapter_indicators_set(struct kvm
*kvm
,
2603 struct s390_io_adapter
*adapter
,
2604 struct kvm_s390_adapter_int
*adapter_int
)
2607 int summary_set
, idx
;
2608 struct s390_map_info
*info
;
2611 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
2614 map
= page_address(info
->page
);
2615 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
2617 idx
= srcu_read_lock(&kvm
->srcu
);
2618 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2619 set_page_dirty_lock(info
->page
);
2620 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
2622 srcu_read_unlock(&kvm
->srcu
, idx
);
2625 map
= page_address(info
->page
);
2626 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
2628 summary_set
= test_and_set_bit(bit
, map
);
2629 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2630 set_page_dirty_lock(info
->page
);
2631 srcu_read_unlock(&kvm
->srcu
, idx
);
2632 return summary_set
? 0 : 1;
2636 * < 0 - not injected due to error
2637 * = 0 - coalesced, summary indicator already active
2638 * > 0 - injected interrupt
2640 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
2641 struct kvm
*kvm
, int irq_source_id
, int level
,
2645 struct s390_io_adapter
*adapter
;
2647 /* We're only interested in the 0->1 transition. */
2650 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
2653 down_read(&adapter
->maps_lock
);
2654 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
2655 up_read(&adapter
->maps_lock
);
2656 if ((ret
> 0) && !adapter
->masked
) {
2657 ret
= kvm_s390_inject_airq(kvm
, adapter
);
2665 * Inject the machine check to the guest.
2667 void kvm_s390_reinject_machine_check(struct kvm_vcpu
*vcpu
,
2668 struct mcck_volatile_info
*mcck_info
)
2670 struct kvm_s390_interrupt_info inti
;
2671 struct kvm_s390_irq irq
;
2672 struct kvm_s390_mchk_info
*mchk
;
2674 __u64 cr14
= 0; /* upper bits are not used */
2677 mci
.val
= mcck_info
->mcic
;
2679 cr14
|= CR14_RECOVERY_SUBMASK
;
2681 cr14
|= CR14_DEGRADATION_SUBMASK
;
2683 cr14
|= CR14_WARNING_SUBMASK
;
2685 mchk
= mci
.ck
? &inti
.mchk
: &irq
.u
.mchk
;
2687 mchk
->mcic
= mcck_info
->mcic
;
2688 mchk
->ext_damage_code
= mcck_info
->ext_damage_code
;
2689 mchk
->failing_storage_address
= mcck_info
->failing_storage_address
;
2691 /* Inject the floating machine check */
2692 inti
.type
= KVM_S390_MCHK
;
2693 rc
= __inject_vm(vcpu
->kvm
, &inti
);
2695 /* Inject the machine check to specified vcpu */
2696 irq
.type
= KVM_S390_MCHK
;
2697 rc
= kvm_s390_inject_vcpu(vcpu
, &irq
);
2702 int kvm_set_routing_entry(struct kvm
*kvm
,
2703 struct kvm_kernel_irq_routing_entry
*e
,
2704 const struct kvm_irq_routing_entry
*ue
)
2709 case KVM_IRQ_ROUTING_S390_ADAPTER
:
2710 e
->set
= set_adapter_int
;
2711 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
2712 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
2713 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
2714 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
2715 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
2725 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
2726 int irq_source_id
, int level
, bool line_status
)
2731 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
, void __user
*irqstate
, int len
)
2733 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2734 struct kvm_s390_irq
*buf
;
2742 if (copy_from_user((void *) buf
, irqstate
, len
)) {
2748 * Don't allow setting the interrupt state
2749 * when there are already interrupts pending
2751 spin_lock(&li
->lock
);
2752 if (li
->pending_irqs
) {
2757 for (n
= 0; n
< len
/ sizeof(*buf
); n
++) {
2758 r
= do_inject_vcpu(vcpu
, &buf
[n
]);
2764 spin_unlock(&li
->lock
);
2771 static void store_local_irq(struct kvm_s390_local_interrupt
*li
,
2772 struct kvm_s390_irq
*irq
,
2773 unsigned long irq_type
)
2776 case IRQ_PEND_MCHK_EX
:
2777 case IRQ_PEND_MCHK_REP
:
2778 irq
->type
= KVM_S390_MCHK
;
2779 irq
->u
.mchk
= li
->irq
.mchk
;
2782 irq
->type
= KVM_S390_PROGRAM_INT
;
2783 irq
->u
.pgm
= li
->irq
.pgm
;
2785 case IRQ_PEND_PFAULT_INIT
:
2786 irq
->type
= KVM_S390_INT_PFAULT_INIT
;
2787 irq
->u
.ext
= li
->irq
.ext
;
2789 case IRQ_PEND_EXT_EXTERNAL
:
2790 irq
->type
= KVM_S390_INT_EXTERNAL_CALL
;
2791 irq
->u
.extcall
= li
->irq
.extcall
;
2793 case IRQ_PEND_EXT_CLOCK_COMP
:
2794 irq
->type
= KVM_S390_INT_CLOCK_COMP
;
2796 case IRQ_PEND_EXT_CPU_TIMER
:
2797 irq
->type
= KVM_S390_INT_CPU_TIMER
;
2799 case IRQ_PEND_SIGP_STOP
:
2800 irq
->type
= KVM_S390_SIGP_STOP
;
2801 irq
->u
.stop
= li
->irq
.stop
;
2803 case IRQ_PEND_RESTART
:
2804 irq
->type
= KVM_S390_RESTART
;
2806 case IRQ_PEND_SET_PREFIX
:
2807 irq
->type
= KVM_S390_SIGP_SET_PREFIX
;
2808 irq
->u
.prefix
= li
->irq
.prefix
;
2813 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
, __u8 __user
*buf
, int len
)
2816 unsigned long sigp_emerg_pending
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
2817 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2818 unsigned long pending_irqs
;
2819 struct kvm_s390_irq irq
;
2820 unsigned long irq_type
;
2824 spin_lock(&li
->lock
);
2825 pending_irqs
= li
->pending_irqs
;
2826 memcpy(&sigp_emerg_pending
, &li
->sigp_emerg_pending
,
2827 sizeof(sigp_emerg_pending
));
2828 spin_unlock(&li
->lock
);
2830 for_each_set_bit(irq_type
, &pending_irqs
, IRQ_PEND_COUNT
) {
2831 memset(&irq
, 0, sizeof(irq
));
2832 if (irq_type
== IRQ_PEND_EXT_EMERGENCY
)
2834 if (n
+ sizeof(irq
) > len
)
2836 store_local_irq(&vcpu
->arch
.local_int
, &irq
, irq_type
);
2837 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2842 if (test_bit(IRQ_PEND_EXT_EMERGENCY
, &pending_irqs
)) {
2843 for_each_set_bit(cpuaddr
, sigp_emerg_pending
, KVM_MAX_VCPUS
) {
2844 memset(&irq
, 0, sizeof(irq
));
2845 if (n
+ sizeof(irq
) > len
)
2847 irq
.type
= KVM_S390_INT_EMERGENCY
;
2848 irq
.u
.emerg
.code
= cpuaddr
;
2849 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2855 if (sca_ext_call_pending(vcpu
, &scn
)) {
2856 if (n
+ sizeof(irq
) > len
)
2858 memset(&irq
, 0, sizeof(irq
));
2859 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
2860 irq
.u
.extcall
.code
= scn
;
2861 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2869 void kvm_s390_gisa_clear(struct kvm
*kvm
)
2871 if (kvm
->arch
.gisa
) {
2872 memset(kvm
->arch
.gisa
, 0, sizeof(struct kvm_s390_gisa
));
2873 kvm
->arch
.gisa
->next_alert
= (u32
)(u64
)kvm
->arch
.gisa
;
2874 VM_EVENT(kvm
, 3, "gisa 0x%pK cleared", kvm
->arch
.gisa
);
2878 void kvm_s390_gisa_init(struct kvm
*kvm
)
2880 if (css_general_characteristics
.aiv
) {
2881 kvm
->arch
.gisa
= &kvm
->arch
.sie_page2
->gisa
;
2882 VM_EVENT(kvm
, 3, "gisa 0x%pK initialized", kvm
->arch
.gisa
);
2883 kvm_s390_gisa_clear(kvm
);
2887 void kvm_s390_gisa_destroy(struct kvm
*kvm
)
2889 if (!kvm
->arch
.gisa
)
2891 kvm
->arch
.gisa
= NULL
;