1 // SPDX-License-Identifier: GPL-2.0
3 * handling kvm guest interrupts
5 * Copyright IBM Corp. 2008, 2015
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
10 #include <linux/interrupt.h>
11 #include <linux/kvm_host.h>
12 #include <linux/hrtimer.h>
13 #include <linux/mmu_context.h>
14 #include <linux/signal.h>
15 #include <linux/slab.h>
16 #include <linux/bitmap.h>
17 #include <linux/vmalloc.h>
18 #include <asm/asm-offsets.h>
20 #include <linux/uaccess.h>
24 #include <asm/switch_to.h>
28 #include "trace-s390.h"
30 #define PFAULT_INIT 0x0600
31 #define PFAULT_DONE 0x0680
32 #define VIRTIO_PARAM 0x0d00
34 /* handle external calls via sigp interpretation facility */
35 static int sca_ext_call_pending(struct kvm_vcpu
*vcpu
, int *src_id
)
39 if (!kvm_s390_test_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
))
42 BUG_ON(!kvm_s390_use_sca_entries());
43 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
44 if (vcpu
->kvm
->arch
.use_esca
) {
45 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
46 union esca_sigp_ctrl sigp_ctrl
=
47 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
52 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
53 union bsca_sigp_ctrl sigp_ctrl
=
54 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
59 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
67 static int sca_inject_ext_call(struct kvm_vcpu
*vcpu
, int src_id
)
71 BUG_ON(!kvm_s390_use_sca_entries());
72 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
73 if (vcpu
->kvm
->arch
.use_esca
) {
74 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
75 union esca_sigp_ctrl
*sigp_ctrl
=
76 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
77 union esca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
83 expect
= old_val
.value
;
84 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
86 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
87 union bsca_sigp_ctrl
*sigp_ctrl
=
88 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
89 union bsca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
95 expect
= old_val
.value
;
96 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
98 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
101 /* another external call is pending */
104 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
);
108 static void sca_clear_ext_call(struct kvm_vcpu
*vcpu
)
112 if (!kvm_s390_use_sca_entries())
114 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
);
115 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
116 if (vcpu
->kvm
->arch
.use_esca
) {
117 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
118 union esca_sigp_ctrl
*sigp_ctrl
=
119 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
120 union esca_sigp_ctrl old
= *sigp_ctrl
;
123 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
125 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
126 union bsca_sigp_ctrl
*sigp_ctrl
=
127 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
128 union bsca_sigp_ctrl old
= *sigp_ctrl
;
131 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
133 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
134 WARN_ON(rc
!= expect
); /* cannot clear? */
137 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
139 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
142 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
144 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
147 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
149 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
152 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
154 return psw_extint_disabled(vcpu
) &&
155 psw_ioint_disabled(vcpu
) &&
156 psw_mchk_disabled(vcpu
);
159 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
161 if (psw_extint_disabled(vcpu
) ||
162 !(vcpu
->arch
.sie_block
->gcr
[0] & CR0_CLOCK_COMPARATOR_SUBMASK
))
164 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
165 /* No timer interrupts when single stepping */
170 static int ckc_irq_pending(struct kvm_vcpu
*vcpu
)
172 const u64 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
173 const u64 ckc
= vcpu
->arch
.sie_block
->ckc
;
175 if (vcpu
->arch
.sie_block
->gcr
[0] & CR0_CLOCK_COMPARATOR_SIGN
) {
176 if ((s64
)ckc
>= (s64
)now
)
178 } else if (ckc
>= now
) {
181 return ckc_interrupts_enabled(vcpu
);
184 static int cpu_timer_interrupts_enabled(struct kvm_vcpu
*vcpu
)
186 return !psw_extint_disabled(vcpu
) &&
187 (vcpu
->arch
.sie_block
->gcr
[0] & CR0_CPU_TIMER_SUBMASK
);
190 static int cpu_timer_irq_pending(struct kvm_vcpu
*vcpu
)
192 if (!cpu_timer_interrupts_enabled(vcpu
))
194 return kvm_s390_get_cpu_timer(vcpu
) >> 63;
197 static uint64_t isc_to_isc_bits(int isc
)
199 return (0x80 >> isc
) << 24;
202 static inline u32
isc_to_int_word(u8 isc
)
204 return ((u32
)isc
<< 27) | 0x80000000;
207 static inline u8
int_word_to_isc(u32 int_word
)
209 return (int_word
& 0x38000000) >> 27;
213 * To use atomic bitmap functions, we have to provide a bitmap address
214 * that is u64 aligned. However, the ipm might be u32 aligned.
215 * Therefore, we logically start the bitmap at the very beginning of the
216 * struct and fixup the bit number.
218 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
220 static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
222 set_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
225 static inline u8
kvm_s390_gisa_get_ipm(struct kvm_s390_gisa
*gisa
)
227 return READ_ONCE(gisa
->ipm
);
230 static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
232 clear_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
235 static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
237 return test_and_clear_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
240 static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu
*vcpu
)
242 return vcpu
->kvm
->arch
.float_int
.pending_irqs
|
243 vcpu
->arch
.local_int
.pending_irqs
;
246 static inline unsigned long pending_irqs(struct kvm_vcpu
*vcpu
)
248 return pending_irqs_no_gisa(vcpu
) |
249 kvm_s390_gisa_get_ipm(vcpu
->kvm
->arch
.gisa
) << IRQ_PEND_IO_ISC_7
;
252 static inline int isc_to_irq_type(unsigned long isc
)
254 return IRQ_PEND_IO_ISC_0
- isc
;
257 static inline int irq_type_to_isc(unsigned long irq_type
)
259 return IRQ_PEND_IO_ISC_0
- irq_type
;
262 static unsigned long disable_iscs(struct kvm_vcpu
*vcpu
,
263 unsigned long active_mask
)
267 for (i
= 0; i
<= MAX_ISC
; i
++)
268 if (!(vcpu
->arch
.sie_block
->gcr
[6] & isc_to_isc_bits(i
)))
269 active_mask
&= ~(1UL << (isc_to_irq_type(i
)));
274 static unsigned long deliverable_irqs(struct kvm_vcpu
*vcpu
)
276 unsigned long active_mask
;
278 active_mask
= pending_irqs(vcpu
);
282 if (psw_extint_disabled(vcpu
))
283 active_mask
&= ~IRQ_PEND_EXT_MASK
;
284 if (psw_ioint_disabled(vcpu
))
285 active_mask
&= ~IRQ_PEND_IO_MASK
;
287 active_mask
= disable_iscs(vcpu
, active_mask
);
288 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_EXTERNAL_CALL_SUBMASK
))
289 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
290 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_EMERGENCY_SIGNAL_SUBMASK
))
291 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
292 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_CLOCK_COMPARATOR_SUBMASK
))
293 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
294 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_CPU_TIMER_SUBMASK
))
295 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
296 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_SERVICE_SIGNAL_SUBMASK
))
297 __clear_bit(IRQ_PEND_EXT_SERVICE
, &active_mask
);
298 if (psw_mchk_disabled(vcpu
))
299 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
301 * Check both floating and local interrupt's cr14 because
302 * bit IRQ_PEND_MCHK_REP could be set in both cases.
304 if (!(vcpu
->arch
.sie_block
->gcr
[14] &
305 (vcpu
->kvm
->arch
.float_int
.mchk
.cr14
|
306 vcpu
->arch
.local_int
.irq
.mchk
.cr14
)))
307 __clear_bit(IRQ_PEND_MCHK_REP
, &active_mask
);
310 * STOP irqs will never be actively delivered. They are triggered via
311 * intercept requests and cleared when the stop intercept is performed.
313 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
318 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
320 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_WAIT
);
321 set_bit(vcpu
->vcpu_id
, vcpu
->kvm
->arch
.float_int
.idle_mask
);
324 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
326 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_WAIT
);
327 clear_bit(vcpu
->vcpu_id
, vcpu
->kvm
->arch
.float_int
.idle_mask
);
330 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
332 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
|
334 vcpu
->arch
.sie_block
->lctl
= 0x0000;
335 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
337 if (guestdbg_enabled(vcpu
)) {
338 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
339 LCTL_CR10
| LCTL_CR11
);
340 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
344 static void set_intercept_indicators_io(struct kvm_vcpu
*vcpu
)
346 if (!(pending_irqs_no_gisa(vcpu
) & IRQ_PEND_IO_MASK
))
348 else if (psw_ioint_disabled(vcpu
))
349 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_IO_INT
);
351 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
354 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
356 if (!(pending_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
358 if (psw_extint_disabled(vcpu
))
359 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
361 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
364 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
366 if (!(pending_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
368 if (psw_mchk_disabled(vcpu
))
369 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
371 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
374 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
376 if (kvm_s390_is_stop_irq_pending(vcpu
))
377 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOP_INT
);
380 /* Set interception request for non-deliverable interrupts */
381 static void set_intercept_indicators(struct kvm_vcpu
*vcpu
)
383 set_intercept_indicators_io(vcpu
);
384 set_intercept_indicators_ext(vcpu
);
385 set_intercept_indicators_mchk(vcpu
);
386 set_intercept_indicators_stop(vcpu
);
389 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
391 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
394 vcpu
->stat
.deliver_cputm
++;
395 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
398 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
399 (u16
*)__LC_EXT_INT_CODE
);
400 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
401 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
402 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
403 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
404 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
405 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
406 return rc
? -EFAULT
: 0;
409 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
411 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
414 vcpu
->stat
.deliver_ckc
++;
415 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
418 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
419 (u16 __user
*)__LC_EXT_INT_CODE
);
420 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
421 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
422 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
423 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
424 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
425 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
426 return rc
? -EFAULT
: 0;
429 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
431 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
432 struct kvm_s390_ext_info ext
;
435 spin_lock(&li
->lock
);
437 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
438 li
->irq
.ext
.ext_params2
= 0;
439 spin_unlock(&li
->lock
);
441 VCPU_EVENT(vcpu
, 4, "deliver: pfault init token 0x%llx",
443 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
444 KVM_S390_INT_PFAULT_INIT
,
447 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
448 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
449 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
450 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
451 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
452 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
453 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
454 return rc
? -EFAULT
: 0;
457 static int __write_machine_check(struct kvm_vcpu
*vcpu
,
458 struct kvm_s390_mchk_info
*mchk
)
460 unsigned long ext_sa_addr
;
462 freg_t fprs
[NUM_FPRS
];
466 mci
.val
= mchk
->mcic
;
467 /* take care of lazy register loading */
469 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
470 if (MACHINE_HAS_GS
&& vcpu
->arch
.gs_enabled
)
471 save_gs_cb(current
->thread
.gs_cb
);
473 /* Extended save area */
474 rc
= read_guest_lc(vcpu
, __LC_MCESAD
, &ext_sa_addr
,
475 sizeof(unsigned long));
476 /* Only bits 0 through 63-LC are used for address formation */
477 lc
= ext_sa_addr
& MCESA_LC_MASK
;
478 if (test_kvm_facility(vcpu
->kvm
, 133)) {
482 ext_sa_addr
&= ~0x3ffUL
;
485 ext_sa_addr
&= ~0x7ffUL
;
488 ext_sa_addr
&= ~0xfffUL
;
495 ext_sa_addr
&= ~0x3ffUL
;
498 if (!rc
&& mci
.vr
&& ext_sa_addr
&& test_kvm_facility(vcpu
->kvm
, 129)) {
499 if (write_guest_abs(vcpu
, ext_sa_addr
, vcpu
->run
->s
.regs
.vrs
,
505 if (!rc
&& mci
.gs
&& ext_sa_addr
&& test_kvm_facility(vcpu
->kvm
, 133)
506 && (lc
== 11 || lc
== 12)) {
507 if (write_guest_abs(vcpu
, ext_sa_addr
+ 1024,
508 &vcpu
->run
->s
.regs
.gscb
, 32))
514 /* General interruption information */
515 rc
|= put_guest_lc(vcpu
, 1, (u8 __user
*) __LC_AR_MODE_ID
);
516 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
517 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
518 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
519 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
520 rc
|= put_guest_lc(vcpu
, mci
.val
, (u64 __user
*) __LC_MCCK_CODE
);
522 /* Register-save areas */
523 if (MACHINE_HAS_VX
) {
524 convert_vx_to_fp(fprs
, (__vector128
*) vcpu
->run
->s
.regs
.vrs
);
525 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
, fprs
, 128);
527 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
,
528 vcpu
->run
->s
.regs
.fprs
, 128);
530 rc
|= write_guest_lc(vcpu
, __LC_GPREGS_SAVE_AREA
,
531 vcpu
->run
->s
.regs
.gprs
, 128);
532 rc
|= put_guest_lc(vcpu
, current
->thread
.fpu
.fpc
,
533 (u32 __user
*) __LC_FP_CREG_SAVE_AREA
);
534 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->todpr
,
535 (u32 __user
*) __LC_TOD_PROGREG_SAVE_AREA
);
536 rc
|= put_guest_lc(vcpu
, kvm_s390_get_cpu_timer(vcpu
),
537 (u64 __user
*) __LC_CPU_TIMER_SAVE_AREA
);
538 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->ckc
>> 8,
539 (u64 __user
*) __LC_CLOCK_COMP_SAVE_AREA
);
540 rc
|= write_guest_lc(vcpu
, __LC_AREGS_SAVE_AREA
,
541 &vcpu
->run
->s
.regs
.acrs
, 64);
542 rc
|= write_guest_lc(vcpu
, __LC_CREGS_SAVE_AREA
,
543 &vcpu
->arch
.sie_block
->gcr
, 128);
545 /* Extended interruption information */
546 rc
|= put_guest_lc(vcpu
, mchk
->ext_damage_code
,
547 (u32 __user
*) __LC_EXT_DAMAGE_CODE
);
548 rc
|= put_guest_lc(vcpu
, mchk
->failing_storage_address
,
549 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
550 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
, &mchk
->fixed_logout
,
551 sizeof(mchk
->fixed_logout
));
552 return rc
? -EFAULT
: 0;
555 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
557 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
558 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
559 struct kvm_s390_mchk_info mchk
= {};
563 spin_lock(&fi
->lock
);
564 spin_lock(&li
->lock
);
565 if (test_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
) ||
566 test_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
)) {
568 * If there was an exigent machine check pending, then any
569 * repressible machine checks that might have been pending
570 * are indicated along with it, so always clear bits for
571 * repressible and exigent interrupts
574 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
575 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
576 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
580 * We indicate floating repressible conditions along with
581 * other pending conditions. Channel Report Pending and Channel
582 * Subsystem damage are the only two and and are indicated by
583 * bits in mcic and masked in cr14.
585 if (test_and_clear_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
586 mchk
.mcic
|= fi
->mchk
.mcic
;
587 mchk
.cr14
|= fi
->mchk
.cr14
;
588 memset(&fi
->mchk
, 0, sizeof(mchk
));
591 spin_unlock(&li
->lock
);
592 spin_unlock(&fi
->lock
);
595 VCPU_EVENT(vcpu
, 3, "deliver: machine check mcic 0x%llx",
597 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
599 mchk
.cr14
, mchk
.mcic
);
600 vcpu
->stat
.deliver_machine_check
++;
601 rc
= __write_machine_check(vcpu
, &mchk
);
606 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
608 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
611 VCPU_EVENT(vcpu
, 3, "%s", "deliver: cpu restart");
612 vcpu
->stat
.deliver_restart_signal
++;
613 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
615 rc
= write_guest_lc(vcpu
,
616 offsetof(struct lowcore
, restart_old_psw
),
617 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
618 rc
|= read_guest_lc(vcpu
, offsetof(struct lowcore
, restart_psw
),
619 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
620 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
621 return rc
? -EFAULT
: 0;
624 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
626 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
627 struct kvm_s390_prefix_info prefix
;
629 spin_lock(&li
->lock
);
630 prefix
= li
->irq
.prefix
;
631 li
->irq
.prefix
.address
= 0;
632 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
633 spin_unlock(&li
->lock
);
635 vcpu
->stat
.deliver_prefix_signal
++;
636 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
637 KVM_S390_SIGP_SET_PREFIX
,
640 kvm_s390_set_prefix(vcpu
, prefix
.address
);
644 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
646 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
650 spin_lock(&li
->lock
);
651 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
652 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
653 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
654 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
655 spin_unlock(&li
->lock
);
657 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp emerg");
658 vcpu
->stat
.deliver_emergency_signal
++;
659 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
662 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
663 (u16
*)__LC_EXT_INT_CODE
);
664 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
665 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
666 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
667 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
668 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
669 return rc
? -EFAULT
: 0;
672 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
674 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
675 struct kvm_s390_extcall_info extcall
;
678 spin_lock(&li
->lock
);
679 extcall
= li
->irq
.extcall
;
680 li
->irq
.extcall
.code
= 0;
681 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
682 spin_unlock(&li
->lock
);
684 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp ext call");
685 vcpu
->stat
.deliver_external_call
++;
686 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
687 KVM_S390_INT_EXTERNAL_CALL
,
690 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
691 (u16
*)__LC_EXT_INT_CODE
);
692 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
693 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
694 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
695 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
697 return rc
? -EFAULT
: 0;
700 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
702 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
703 struct kvm_s390_pgm_info pgm_info
;
704 int rc
= 0, nullifying
= false;
707 spin_lock(&li
->lock
);
708 pgm_info
= li
->irq
.pgm
;
709 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
710 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
711 spin_unlock(&li
->lock
);
713 ilen
= pgm_info
.flags
& KVM_S390_PGM_FLAGS_ILC_MASK
;
714 VCPU_EVENT(vcpu
, 3, "deliver: program irq code 0x%x, ilen:%d",
715 pgm_info
.code
, ilen
);
716 vcpu
->stat
.deliver_program
++;
717 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
720 switch (pgm_info
.code
& ~PGM_PER
) {
721 case PGM_AFX_TRANSLATION
:
722 case PGM_ASX_TRANSLATION
:
723 case PGM_EX_TRANSLATION
:
724 case PGM_LFX_TRANSLATION
:
725 case PGM_LSTE_SEQUENCE
:
726 case PGM_LSX_TRANSLATION
:
727 case PGM_LX_TRANSLATION
:
728 case PGM_PRIMARY_AUTHORITY
:
729 case PGM_SECONDARY_AUTHORITY
:
732 case PGM_SPACE_SWITCH
:
733 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
734 (u64
*)__LC_TRANS_EXC_CODE
);
736 case PGM_ALEN_TRANSLATION
:
737 case PGM_ALE_SEQUENCE
:
738 case PGM_ASTE_INSTANCE
:
739 case PGM_ASTE_SEQUENCE
:
740 case PGM_ASTE_VALIDITY
:
741 case PGM_EXTENDED_AUTHORITY
:
742 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
743 (u8
*)__LC_EXC_ACCESS_ID
);
747 case PGM_PAGE_TRANSLATION
:
748 case PGM_REGION_FIRST_TRANS
:
749 case PGM_REGION_SECOND_TRANS
:
750 case PGM_REGION_THIRD_TRANS
:
751 case PGM_SEGMENT_TRANSLATION
:
752 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
753 (u64
*)__LC_TRANS_EXC_CODE
);
754 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
755 (u8
*)__LC_EXC_ACCESS_ID
);
756 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
757 (u8
*)__LC_OP_ACCESS_ID
);
761 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
762 (u16
*)__LC_MON_CLASS_NR
);
763 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
764 (u64
*)__LC_MON_CODE
);
766 case PGM_VECTOR_PROCESSING
:
768 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
769 (u32
*)__LC_DATA_EXC_CODE
);
772 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
773 (u64
*)__LC_TRANS_EXC_CODE
);
774 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
775 (u8
*)__LC_EXC_ACCESS_ID
);
778 case PGM_STACK_EMPTY
:
779 case PGM_STACK_SPECIFICATION
:
781 case PGM_STACK_OPERATION
:
782 case PGM_TRACE_TABEL
:
783 case PGM_CRYPTO_OPERATION
:
788 if (pgm_info
.code
& PGM_PER
) {
789 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
790 (u8
*) __LC_PER_CODE
);
791 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
792 (u8
*)__LC_PER_ATMID
);
793 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
794 (u64
*) __LC_PER_ADDRESS
);
795 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
796 (u8
*) __LC_PER_ACCESS_ID
);
799 if (nullifying
&& !(pgm_info
.flags
& KVM_S390_PGM_FLAGS_NO_REWIND
))
800 kvm_s390_rewind_psw(vcpu
, ilen
);
802 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
803 rc
|= put_guest_lc(vcpu
, ilen
, (u16
*) __LC_PGM_ILC
);
804 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->gbea
,
805 (u64
*) __LC_LAST_BREAK
);
806 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
807 (u16
*)__LC_PGM_INT_CODE
);
808 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
809 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
810 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
811 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
812 return rc
? -EFAULT
: 0;
815 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
)
817 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
818 struct kvm_s390_ext_info ext
;
821 spin_lock(&fi
->lock
);
822 if (!(test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
))) {
823 spin_unlock(&fi
->lock
);
826 ext
= fi
->srv_signal
;
827 memset(&fi
->srv_signal
, 0, sizeof(ext
));
828 clear_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
829 spin_unlock(&fi
->lock
);
831 VCPU_EVENT(vcpu
, 4, "deliver: sclp parameter 0x%x",
833 vcpu
->stat
.deliver_service_signal
++;
834 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
837 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
838 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
839 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
840 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
841 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
842 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
843 rc
|= put_guest_lc(vcpu
, ext
.ext_params
,
844 (u32
*)__LC_EXT_PARAMS
);
846 return rc
? -EFAULT
: 0;
849 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
)
851 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
852 struct kvm_s390_interrupt_info
*inti
;
855 spin_lock(&fi
->lock
);
856 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_PFAULT
],
857 struct kvm_s390_interrupt_info
,
860 list_del(&inti
->list
);
861 fi
->counters
[FIRQ_CNTR_PFAULT
] -= 1;
863 if (list_empty(&fi
->lists
[FIRQ_LIST_PFAULT
]))
864 clear_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
865 spin_unlock(&fi
->lock
);
868 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
869 KVM_S390_INT_PFAULT_DONE
, 0,
870 inti
->ext
.ext_params2
);
871 VCPU_EVENT(vcpu
, 4, "deliver: pfault done token 0x%llx",
872 inti
->ext
.ext_params2
);
874 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
875 (u16
*)__LC_EXT_INT_CODE
);
876 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
,
877 (u16
*)__LC_EXT_CPU_ADDR
);
878 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
879 &vcpu
->arch
.sie_block
->gpsw
,
881 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
882 &vcpu
->arch
.sie_block
->gpsw
,
884 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
885 (u64
*)__LC_EXT_PARAMS2
);
888 return rc
? -EFAULT
: 0;
891 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
)
893 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
894 struct kvm_s390_interrupt_info
*inti
;
897 spin_lock(&fi
->lock
);
898 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_VIRTIO
],
899 struct kvm_s390_interrupt_info
,
903 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
904 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
905 vcpu
->stat
.deliver_virtio
++;
906 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
908 inti
->ext
.ext_params
,
909 inti
->ext
.ext_params2
);
910 list_del(&inti
->list
);
911 fi
->counters
[FIRQ_CNTR_VIRTIO
] -= 1;
913 if (list_empty(&fi
->lists
[FIRQ_LIST_VIRTIO
]))
914 clear_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
915 spin_unlock(&fi
->lock
);
918 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
919 (u16
*)__LC_EXT_INT_CODE
);
920 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
,
921 (u16
*)__LC_EXT_CPU_ADDR
);
922 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
923 &vcpu
->arch
.sie_block
->gpsw
,
925 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
926 &vcpu
->arch
.sie_block
->gpsw
,
928 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
929 (u32
*)__LC_EXT_PARAMS
);
930 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
931 (u64
*)__LC_EXT_PARAMS2
);
934 return rc
? -EFAULT
: 0;
937 static int __do_deliver_io(struct kvm_vcpu
*vcpu
, struct kvm_s390_io_info
*io
)
941 rc
= put_guest_lc(vcpu
, io
->subchannel_id
, (u16
*)__LC_SUBCHANNEL_ID
);
942 rc
|= put_guest_lc(vcpu
, io
->subchannel_nr
, (u16
*)__LC_SUBCHANNEL_NR
);
943 rc
|= put_guest_lc(vcpu
, io
->io_int_parm
, (u32
*)__LC_IO_INT_PARM
);
944 rc
|= put_guest_lc(vcpu
, io
->io_int_word
, (u32
*)__LC_IO_INT_WORD
);
945 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
946 &vcpu
->arch
.sie_block
->gpsw
,
948 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
949 &vcpu
->arch
.sie_block
->gpsw
,
951 return rc
? -EFAULT
: 0;
954 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
955 unsigned long irq_type
)
957 struct list_head
*isc_list
;
958 struct kvm_s390_float_interrupt
*fi
;
959 struct kvm_s390_interrupt_info
*inti
= NULL
;
960 struct kvm_s390_io_info io
;
964 fi
= &vcpu
->kvm
->arch
.float_int
;
966 spin_lock(&fi
->lock
);
967 isc
= irq_type_to_isc(irq_type
);
968 isc_list
= &fi
->lists
[isc
];
969 inti
= list_first_entry_or_null(isc_list
,
970 struct kvm_s390_interrupt_info
,
973 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
974 VCPU_EVENT(vcpu
, 4, "%s", "deliver: I/O (AI)");
976 VCPU_EVENT(vcpu
, 4, "deliver: I/O %x ss %x schid %04x",
977 inti
->io
.subchannel_id
>> 8,
978 inti
->io
.subchannel_id
>> 1 & 0x3,
979 inti
->io
.subchannel_nr
);
981 vcpu
->stat
.deliver_io
++;
982 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
984 ((__u32
)inti
->io
.subchannel_id
<< 16) |
985 inti
->io
.subchannel_nr
,
986 ((__u64
)inti
->io
.io_int_parm
<< 32) |
987 inti
->io
.io_int_word
);
988 list_del(&inti
->list
);
989 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
991 if (list_empty(isc_list
))
992 clear_bit(irq_type
, &fi
->pending_irqs
);
993 spin_unlock(&fi
->lock
);
996 rc
= __do_deliver_io(vcpu
, &(inti
->io
));
1001 if (vcpu
->kvm
->arch
.gisa
&&
1002 kvm_s390_gisa_tac_ipm_gisc(vcpu
->kvm
->arch
.gisa
, isc
)) {
1004 * in case an adapter interrupt was not delivered
1005 * in SIE context KVM will handle the delivery
1007 VCPU_EVENT(vcpu
, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc
);
1008 memset(&io
, 0, sizeof(io
));
1009 io
.io_int_word
= isc_to_int_word(isc
);
1010 vcpu
->stat
.deliver_io
++;
1011 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
1012 KVM_S390_INT_IO(1, 0, 0, 0),
1013 ((__u32
)io
.subchannel_id
<< 16) |
1015 ((__u64
)io
.io_int_parm
<< 32) |
1017 rc
= __do_deliver_io(vcpu
, &io
);
1023 /* Check whether an external call is pending (deliverable or not) */
1024 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
1026 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1028 if (!sclp
.has_sigpif
)
1029 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
1031 return sca_ext_call_pending(vcpu
, NULL
);
1034 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
1036 if (deliverable_irqs(vcpu
))
1039 if (kvm_cpu_has_pending_timer(vcpu
))
1042 /* external call pending and deliverable */
1043 if (kvm_s390_ext_call_pending(vcpu
) &&
1044 !psw_extint_disabled(vcpu
) &&
1045 (vcpu
->arch
.sie_block
->gcr
[0] & CR0_EXTERNAL_CALL_SUBMASK
))
1048 if (!exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
1053 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1055 return ckc_irq_pending(vcpu
) || cpu_timer_irq_pending(vcpu
);
1058 static u64
__calculate_sltime(struct kvm_vcpu
*vcpu
)
1060 const u64 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
1061 const u64 ckc
= vcpu
->arch
.sie_block
->ckc
;
1062 u64 cputm
, sltime
= 0;
1064 if (ckc_interrupts_enabled(vcpu
)) {
1065 if (vcpu
->arch
.sie_block
->gcr
[0] & CR0_CLOCK_COMPARATOR_SIGN
) {
1066 if ((s64
)now
< (s64
)ckc
)
1067 sltime
= tod_to_ns((s64
)ckc
- (s64
)now
);
1068 } else if (now
< ckc
) {
1069 sltime
= tod_to_ns(ckc
- now
);
1071 /* already expired */
1074 if (cpu_timer_interrupts_enabled(vcpu
)) {
1075 cputm
= kvm_s390_get_cpu_timer(vcpu
);
1076 /* already expired? */
1079 return min(sltime
, tod_to_ns(cputm
));
1081 } else if (cpu_timer_interrupts_enabled(vcpu
)) {
1082 sltime
= kvm_s390_get_cpu_timer(vcpu
);
1083 /* already expired? */
1090 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
1094 vcpu
->stat
.exit_wait_state
++;
1097 if (kvm_arch_vcpu_runnable(vcpu
))
1100 if (psw_interrupts_disabled(vcpu
)) {
1101 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
1102 return -EOPNOTSUPP
; /* disabled wait */
1105 if (!ckc_interrupts_enabled(vcpu
) &&
1106 !cpu_timer_interrupts_enabled(vcpu
)) {
1107 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
1108 __set_cpu_idle(vcpu
);
1112 sltime
= __calculate_sltime(vcpu
);
1116 __set_cpu_idle(vcpu
);
1117 hrtimer_start(&vcpu
->arch
.ckc_timer
, sltime
, HRTIMER_MODE_REL
);
1118 VCPU_EVENT(vcpu
, 4, "enabled wait: %llu ns", sltime
);
1120 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1121 kvm_vcpu_block(vcpu
);
1122 __unset_cpu_idle(vcpu
);
1123 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1125 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
1129 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
1132 * We cannot move this into the if, as the CPU might be already
1133 * in kvm_vcpu_block without having the waitqueue set (polling)
1135 vcpu
->valid_wakeup
= true;
1137 * This is mostly to document, that the read in swait_active could
1138 * be moved before other stores, leading to subtle races.
1139 * All current users do not store or use an atomic like update
1141 smp_mb__after_atomic();
1142 if (swait_active(&vcpu
->wq
)) {
1144 * The vcpu gave up the cpu voluntarily, mark it as a good
1147 vcpu
->preempted
= true;
1148 swake_up_one(&vcpu
->wq
);
1149 vcpu
->stat
.halt_wakeup
++;
1152 * The VCPU might not be sleeping but is executing the VSIE. Let's
1153 * kick it, so it leaves the SIE to process the request.
1155 kvm_s390_vsie_kick(vcpu
);
1158 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
1160 struct kvm_vcpu
*vcpu
;
1163 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
1164 sltime
= __calculate_sltime(vcpu
);
1167 * If the monotonic clock runs faster than the tod clock we might be
1168 * woken up too early and have to go back to sleep to avoid deadlocks.
1170 if (sltime
&& hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
1171 return HRTIMER_RESTART
;
1172 kvm_s390_vcpu_wakeup(vcpu
);
1173 return HRTIMER_NORESTART
;
1176 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
1178 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1180 spin_lock(&li
->lock
);
1181 li
->pending_irqs
= 0;
1182 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
1183 memset(&li
->irq
, 0, sizeof(li
->irq
));
1184 spin_unlock(&li
->lock
);
1186 sca_clear_ext_call(vcpu
);
1189 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
1191 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1193 unsigned long irq_type
;
1196 __reset_intercept_indicators(vcpu
);
1198 /* pending ckc conditions might have been invalidated */
1199 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1200 if (ckc_irq_pending(vcpu
))
1201 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1203 /* pending cpu timer conditions might have been invalidated */
1204 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1205 if (cpu_timer_irq_pending(vcpu
))
1206 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1208 while ((irqs
= deliverable_irqs(vcpu
)) && !rc
) {
1209 /* bits are in the reverse order of interrupt priority */
1210 irq_type
= find_last_bit(&irqs
, IRQ_PEND_COUNT
);
1212 case IRQ_PEND_IO_ISC_0
:
1213 case IRQ_PEND_IO_ISC_1
:
1214 case IRQ_PEND_IO_ISC_2
:
1215 case IRQ_PEND_IO_ISC_3
:
1216 case IRQ_PEND_IO_ISC_4
:
1217 case IRQ_PEND_IO_ISC_5
:
1218 case IRQ_PEND_IO_ISC_6
:
1219 case IRQ_PEND_IO_ISC_7
:
1220 rc
= __deliver_io(vcpu
, irq_type
);
1222 case IRQ_PEND_MCHK_EX
:
1223 case IRQ_PEND_MCHK_REP
:
1224 rc
= __deliver_machine_check(vcpu
);
1227 rc
= __deliver_prog(vcpu
);
1229 case IRQ_PEND_EXT_EMERGENCY
:
1230 rc
= __deliver_emergency_signal(vcpu
);
1232 case IRQ_PEND_EXT_EXTERNAL
:
1233 rc
= __deliver_external_call(vcpu
);
1235 case IRQ_PEND_EXT_CLOCK_COMP
:
1236 rc
= __deliver_ckc(vcpu
);
1238 case IRQ_PEND_EXT_CPU_TIMER
:
1239 rc
= __deliver_cpu_timer(vcpu
);
1241 case IRQ_PEND_RESTART
:
1242 rc
= __deliver_restart(vcpu
);
1244 case IRQ_PEND_SET_PREFIX
:
1245 rc
= __deliver_set_prefix(vcpu
);
1247 case IRQ_PEND_PFAULT_INIT
:
1248 rc
= __deliver_pfault_init(vcpu
);
1250 case IRQ_PEND_EXT_SERVICE
:
1251 rc
= __deliver_service(vcpu
);
1253 case IRQ_PEND_PFAULT_DONE
:
1254 rc
= __deliver_pfault_done(vcpu
);
1256 case IRQ_PEND_VIRTIO
:
1257 rc
= __deliver_virtio(vcpu
);
1260 WARN_ONCE(1, "Unknown pending irq type %ld", irq_type
);
1261 clear_bit(irq_type
, &li
->pending_irqs
);
1265 set_intercept_indicators(vcpu
);
1270 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1272 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1274 vcpu
->stat
.inject_program
++;
1275 VCPU_EVENT(vcpu
, 3, "inject: program irq code 0x%x", irq
->u
.pgm
.code
);
1276 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
1277 irq
->u
.pgm
.code
, 0);
1279 if (!(irq
->u
.pgm
.flags
& KVM_S390_PGM_FLAGS_ILC_VALID
)) {
1280 /* auto detection if no valid ILC was given */
1281 irq
->u
.pgm
.flags
&= ~KVM_S390_PGM_FLAGS_ILC_MASK
;
1282 irq
->u
.pgm
.flags
|= kvm_s390_get_ilen(vcpu
);
1283 irq
->u
.pgm
.flags
|= KVM_S390_PGM_FLAGS_ILC_VALID
;
1286 if (irq
->u
.pgm
.code
== PGM_PER
) {
1287 li
->irq
.pgm
.code
|= PGM_PER
;
1288 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1289 /* only modify PER related information */
1290 li
->irq
.pgm
.per_address
= irq
->u
.pgm
.per_address
;
1291 li
->irq
.pgm
.per_code
= irq
->u
.pgm
.per_code
;
1292 li
->irq
.pgm
.per_atmid
= irq
->u
.pgm
.per_atmid
;
1293 li
->irq
.pgm
.per_access_id
= irq
->u
.pgm
.per_access_id
;
1294 } else if (!(irq
->u
.pgm
.code
& PGM_PER
)) {
1295 li
->irq
.pgm
.code
= (li
->irq
.pgm
.code
& PGM_PER
) |
1297 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1298 /* only modify non-PER information */
1299 li
->irq
.pgm
.trans_exc_code
= irq
->u
.pgm
.trans_exc_code
;
1300 li
->irq
.pgm
.mon_code
= irq
->u
.pgm
.mon_code
;
1301 li
->irq
.pgm
.data_exc_code
= irq
->u
.pgm
.data_exc_code
;
1302 li
->irq
.pgm
.mon_class_nr
= irq
->u
.pgm
.mon_class_nr
;
1303 li
->irq
.pgm
.exc_access_id
= irq
->u
.pgm
.exc_access_id
;
1304 li
->irq
.pgm
.op_access_id
= irq
->u
.pgm
.op_access_id
;
1306 li
->irq
.pgm
= irq
->u
.pgm
;
1308 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
1312 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1314 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1316 vcpu
->stat
.inject_pfault_init
++;
1317 VCPU_EVENT(vcpu
, 4, "inject: pfault init parameter block at 0x%llx",
1318 irq
->u
.ext
.ext_params2
);
1319 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
1320 irq
->u
.ext
.ext_params
,
1321 irq
->u
.ext
.ext_params2
);
1323 li
->irq
.ext
= irq
->u
.ext
;
1324 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1325 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1329 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1331 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1332 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1333 uint16_t src_id
= irq
->u
.extcall
.code
;
1335 vcpu
->stat
.inject_external_call
++;
1336 VCPU_EVENT(vcpu
, 4, "inject: external call source-cpu:%u",
1338 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1341 /* sending vcpu invalid */
1342 if (kvm_get_vcpu_by_id(vcpu
->kvm
, src_id
) == NULL
)
1345 if (sclp
.has_sigpif
)
1346 return sca_inject_ext_call(vcpu
, src_id
);
1348 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1350 *extcall
= irq
->u
.extcall
;
1351 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1355 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1357 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1358 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1360 vcpu
->stat
.inject_set_prefix
++;
1361 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x",
1362 irq
->u
.prefix
.address
);
1363 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1364 irq
->u
.prefix
.address
, 0);
1366 if (!is_vcpu_stopped(vcpu
))
1369 *prefix
= irq
->u
.prefix
;
1370 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1374 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1375 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1377 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1378 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1381 vcpu
->stat
.inject_stop_signal
++;
1382 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0);
1384 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1387 if (is_vcpu_stopped(vcpu
)) {
1388 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1389 rc
= kvm_s390_store_status_unloaded(vcpu
,
1390 KVM_S390_STORE_STATUS_NOADDR
);
1394 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1396 stop
->flags
= irq
->u
.stop
.flags
;
1397 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOP_INT
);
1401 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1402 struct kvm_s390_irq
*irq
)
1404 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1406 vcpu
->stat
.inject_restart
++;
1407 VCPU_EVENT(vcpu
, 3, "%s", "inject: restart int");
1408 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
1410 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1414 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1415 struct kvm_s390_irq
*irq
)
1417 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1419 vcpu
->stat
.inject_emergency_signal
++;
1420 VCPU_EVENT(vcpu
, 4, "inject: emergency from cpu %u",
1422 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1423 irq
->u
.emerg
.code
, 0);
1425 /* sending vcpu invalid */
1426 if (kvm_get_vcpu_by_id(vcpu
->kvm
, irq
->u
.emerg
.code
) == NULL
)
1429 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1430 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1431 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1435 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1437 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1438 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1440 vcpu
->stat
.inject_mchk
++;
1441 VCPU_EVENT(vcpu
, 3, "inject: machine check mcic 0x%llx",
1443 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1447 * Because repressible machine checks can be indicated along with
1448 * exigent machine checks (PoP, Chapter 11, Interruption action)
1449 * we need to combine cr14, mcic and external damage code.
1450 * Failing storage address and the logout area should not be or'ed
1451 * together, we just indicate the last occurrence of the corresponding
1454 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1455 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1456 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1457 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1458 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1459 sizeof(mchk
->fixed_logout
));
1460 if (mchk
->mcic
& MCHK_EX_MASK
)
1461 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1462 else if (mchk
->mcic
& MCHK_REP_MASK
)
1463 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1467 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1469 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1471 vcpu
->stat
.inject_ckc
++;
1472 VCPU_EVENT(vcpu
, 3, "%s", "inject: clock comparator external");
1473 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1476 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1477 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1481 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1483 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1485 vcpu
->stat
.inject_cputm
++;
1486 VCPU_EVENT(vcpu
, 3, "%s", "inject: cpu timer external");
1487 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1490 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1491 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1495 static struct kvm_s390_interrupt_info
*get_io_int(struct kvm
*kvm
,
1498 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1499 struct list_head
*isc_list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1500 struct kvm_s390_interrupt_info
*iter
;
1501 u16 id
= (schid
& 0xffff0000U
) >> 16;
1502 u16 nr
= schid
& 0x0000ffffU
;
1504 spin_lock(&fi
->lock
);
1505 list_for_each_entry(iter
, isc_list
, list
) {
1506 if (schid
&& (id
!= iter
->io
.subchannel_id
||
1507 nr
!= iter
->io
.subchannel_nr
))
1509 /* found an appropriate entry */
1510 list_del_init(&iter
->list
);
1511 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1512 if (list_empty(isc_list
))
1513 clear_bit(isc_to_irq_type(isc
), &fi
->pending_irqs
);
1514 spin_unlock(&fi
->lock
);
1517 spin_unlock(&fi
->lock
);
1521 static struct kvm_s390_interrupt_info
*get_top_io_int(struct kvm
*kvm
,
1522 u64 isc_mask
, u32 schid
)
1524 struct kvm_s390_interrupt_info
*inti
= NULL
;
1527 for (isc
= 0; isc
<= MAX_ISC
&& !inti
; isc
++) {
1528 if (isc_mask
& isc_to_isc_bits(isc
))
1529 inti
= get_io_int(kvm
, isc
, schid
);
1534 static int get_top_gisa_isc(struct kvm
*kvm
, u64 isc_mask
, u32 schid
)
1536 unsigned long active_mask
;
1541 if (!kvm
->arch
.gisa
)
1544 active_mask
= (isc_mask
& kvm_s390_gisa_get_ipm(kvm
->arch
.gisa
) << 24) << 32;
1545 while (active_mask
) {
1546 isc
= __fls(active_mask
) ^ (BITS_PER_LONG
- 1);
1547 if (kvm_s390_gisa_tac_ipm_gisc(kvm
->arch
.gisa
, isc
))
1549 clear_bit_inv(isc
, &active_mask
);
1556 * Dequeue and return an I/O interrupt matching any of the interruption
1557 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1558 * Take into account the interrupts pending in the interrupt list and in GISA.
1560 * Note that for a guest that does not enable I/O interrupts
1561 * but relies on TPI, a flood of classic interrupts may starve
1562 * out adapter interrupts on the same isc. Linux does not do
1563 * that, and it is possible to work around the issue by configuring
1564 * different iscs for classic and adapter interrupts in the guest,
1565 * but we may want to revisit this in the future.
1567 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1568 u64 isc_mask
, u32 schid
)
1570 struct kvm_s390_interrupt_info
*inti
, *tmp_inti
;
1573 inti
= get_top_io_int(kvm
, isc_mask
, schid
);
1575 isc
= get_top_gisa_isc(kvm
, isc_mask
, schid
);
1581 /* AI in GISA but no classical IO int */
1584 /* both types of interrupts present */
1585 if (int_word_to_isc(inti
->io
.io_int_word
) <= isc
) {
1586 /* classical IO int with higher priority */
1587 kvm_s390_gisa_set_ipm_gisc(kvm
->arch
.gisa
, isc
);
1591 tmp_inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1593 tmp_inti
->type
= KVM_S390_INT_IO(1, 0, 0, 0);
1594 tmp_inti
->io
.io_int_word
= isc_to_int_word(isc
);
1596 kvm_s390_reinject_io_int(kvm
, inti
);
1599 kvm_s390_gisa_set_ipm_gisc(kvm
->arch
.gisa
, isc
);
1604 #define SCCB_MASK 0xFFFFFFF8
1605 #define SCCB_EVENT_PENDING 0x3
1607 static int __inject_service(struct kvm
*kvm
,
1608 struct kvm_s390_interrupt_info
*inti
)
1610 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1612 kvm
->stat
.inject_service_signal
++;
1613 spin_lock(&fi
->lock
);
1614 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_EVENT_PENDING
;
1616 * Early versions of the QEMU s390 bios will inject several
1617 * service interrupts after another without handling a
1618 * condition code indicating busy.
1619 * We will silently ignore those superfluous sccb values.
1620 * A future version of QEMU will take care of serialization
1623 if (fi
->srv_signal
.ext_params
& SCCB_MASK
)
1625 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_MASK
;
1626 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1628 spin_unlock(&fi
->lock
);
1633 static int __inject_virtio(struct kvm
*kvm
,
1634 struct kvm_s390_interrupt_info
*inti
)
1636 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1638 kvm
->stat
.inject_virtio
++;
1639 spin_lock(&fi
->lock
);
1640 if (fi
->counters
[FIRQ_CNTR_VIRTIO
] >= KVM_S390_MAX_VIRTIO_IRQS
) {
1641 spin_unlock(&fi
->lock
);
1644 fi
->counters
[FIRQ_CNTR_VIRTIO
] += 1;
1645 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_VIRTIO
]);
1646 set_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1647 spin_unlock(&fi
->lock
);
1651 static int __inject_pfault_done(struct kvm
*kvm
,
1652 struct kvm_s390_interrupt_info
*inti
)
1654 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1656 kvm
->stat
.inject_pfault_done
++;
1657 spin_lock(&fi
->lock
);
1658 if (fi
->counters
[FIRQ_CNTR_PFAULT
] >=
1659 (ASYNC_PF_PER_VCPU
* KVM_MAX_VCPUS
)) {
1660 spin_unlock(&fi
->lock
);
1663 fi
->counters
[FIRQ_CNTR_PFAULT
] += 1;
1664 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_PFAULT
]);
1665 set_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1666 spin_unlock(&fi
->lock
);
1670 #define CR_PENDING_SUBCLASS 28
1671 static int __inject_float_mchk(struct kvm
*kvm
,
1672 struct kvm_s390_interrupt_info
*inti
)
1674 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1676 kvm
->stat
.inject_float_mchk
++;
1677 spin_lock(&fi
->lock
);
1678 fi
->mchk
.cr14
|= inti
->mchk
.cr14
& (1UL << CR_PENDING_SUBCLASS
);
1679 fi
->mchk
.mcic
|= inti
->mchk
.mcic
;
1680 set_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
);
1681 spin_unlock(&fi
->lock
);
1686 static int __inject_io(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1688 struct kvm_s390_float_interrupt
*fi
;
1689 struct list_head
*list
;
1692 kvm
->stat
.inject_io
++;
1693 isc
= int_word_to_isc(inti
->io
.io_int_word
);
1695 if (kvm
->arch
.gisa
&& inti
->type
& KVM_S390_INT_IO_AI_MASK
) {
1696 VM_EVENT(kvm
, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc
);
1697 kvm_s390_gisa_set_ipm_gisc(kvm
->arch
.gisa
, isc
);
1702 fi
= &kvm
->arch
.float_int
;
1703 spin_lock(&fi
->lock
);
1704 if (fi
->counters
[FIRQ_CNTR_IO
] >= KVM_S390_MAX_FLOAT_IRQS
) {
1705 spin_unlock(&fi
->lock
);
1708 fi
->counters
[FIRQ_CNTR_IO
] += 1;
1710 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
1711 VM_EVENT(kvm
, 4, "%s", "inject: I/O (AI)");
1713 VM_EVENT(kvm
, 4, "inject: I/O %x ss %x schid %04x",
1714 inti
->io
.subchannel_id
>> 8,
1715 inti
->io
.subchannel_id
>> 1 & 0x3,
1716 inti
->io
.subchannel_nr
);
1717 list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1718 list_add_tail(&inti
->list
, list
);
1719 set_bit(isc_to_irq_type(isc
), &fi
->pending_irqs
);
1720 spin_unlock(&fi
->lock
);
1725 * Find a destination VCPU for a floating irq and kick it.
1727 static void __floating_irq_kick(struct kvm
*kvm
, u64 type
)
1729 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1730 struct kvm_vcpu
*dst_vcpu
;
1731 int sigcpu
, online_vcpus
, nr_tries
= 0;
1733 online_vcpus
= atomic_read(&kvm
->online_vcpus
);
1737 /* find idle VCPUs first, then round robin */
1738 sigcpu
= find_first_bit(fi
->idle_mask
, online_vcpus
);
1739 if (sigcpu
== online_vcpus
) {
1741 sigcpu
= fi
->next_rr_cpu
;
1742 fi
->next_rr_cpu
= (fi
->next_rr_cpu
+ 1) % online_vcpus
;
1743 /* avoid endless loops if all vcpus are stopped */
1744 if (nr_tries
++ >= online_vcpus
)
1746 } while (is_vcpu_stopped(kvm_get_vcpu(kvm
, sigcpu
)));
1748 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1750 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1753 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_STOP_INT
);
1755 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1756 if (!(type
& KVM_S390_INT_IO_AI_MASK
&& kvm
->arch
.gisa
))
1757 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_IO_INT
);
1760 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_EXT_INT
);
1763 kvm_s390_vcpu_wakeup(dst_vcpu
);
1766 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1768 u64 type
= READ_ONCE(inti
->type
);
1773 rc
= __inject_float_mchk(kvm
, inti
);
1775 case KVM_S390_INT_VIRTIO
:
1776 rc
= __inject_virtio(kvm
, inti
);
1778 case KVM_S390_INT_SERVICE
:
1779 rc
= __inject_service(kvm
, inti
);
1781 case KVM_S390_INT_PFAULT_DONE
:
1782 rc
= __inject_pfault_done(kvm
, inti
);
1784 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1785 rc
= __inject_io(kvm
, inti
);
1793 __floating_irq_kick(kvm
, type
);
1797 int kvm_s390_inject_vm(struct kvm
*kvm
,
1798 struct kvm_s390_interrupt
*s390int
)
1800 struct kvm_s390_interrupt_info
*inti
;
1803 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1807 inti
->type
= s390int
->type
;
1808 switch (inti
->type
) {
1809 case KVM_S390_INT_VIRTIO
:
1810 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1811 s390int
->parm
, s390int
->parm64
);
1812 inti
->ext
.ext_params
= s390int
->parm
;
1813 inti
->ext
.ext_params2
= s390int
->parm64
;
1815 case KVM_S390_INT_SERVICE
:
1816 VM_EVENT(kvm
, 4, "inject: sclp parm:%x", s390int
->parm
);
1817 inti
->ext
.ext_params
= s390int
->parm
;
1819 case KVM_S390_INT_PFAULT_DONE
:
1820 inti
->ext
.ext_params2
= s390int
->parm64
;
1823 VM_EVENT(kvm
, 3, "inject: machine check mcic 0x%llx",
1825 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1826 inti
->mchk
.mcic
= s390int
->parm64
;
1828 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1829 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1830 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1831 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1832 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1838 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1841 rc
= __inject_vm(kvm
, inti
);
1847 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
1848 struct kvm_s390_interrupt_info
*inti
)
1850 return __inject_vm(kvm
, inti
);
1853 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1854 struct kvm_s390_irq
*irq
)
1856 irq
->type
= s390int
->type
;
1857 switch (irq
->type
) {
1858 case KVM_S390_PROGRAM_INT
:
1859 if (s390int
->parm
& 0xffff0000)
1861 irq
->u
.pgm
.code
= s390int
->parm
;
1863 case KVM_S390_SIGP_SET_PREFIX
:
1864 irq
->u
.prefix
.address
= s390int
->parm
;
1866 case KVM_S390_SIGP_STOP
:
1867 irq
->u
.stop
.flags
= s390int
->parm
;
1869 case KVM_S390_INT_EXTERNAL_CALL
:
1870 if (s390int
->parm
& 0xffff0000)
1872 irq
->u
.extcall
.code
= s390int
->parm
;
1874 case KVM_S390_INT_EMERGENCY
:
1875 if (s390int
->parm
& 0xffff0000)
1877 irq
->u
.emerg
.code
= s390int
->parm
;
1880 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1886 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
1888 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1890 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1893 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
1895 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1897 spin_lock(&li
->lock
);
1898 li
->irq
.stop
.flags
= 0;
1899 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1900 spin_unlock(&li
->lock
);
1903 static int do_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1907 switch (irq
->type
) {
1908 case KVM_S390_PROGRAM_INT
:
1909 rc
= __inject_prog(vcpu
, irq
);
1911 case KVM_S390_SIGP_SET_PREFIX
:
1912 rc
= __inject_set_prefix(vcpu
, irq
);
1914 case KVM_S390_SIGP_STOP
:
1915 rc
= __inject_sigp_stop(vcpu
, irq
);
1917 case KVM_S390_RESTART
:
1918 rc
= __inject_sigp_restart(vcpu
, irq
);
1920 case KVM_S390_INT_CLOCK_COMP
:
1921 rc
= __inject_ckc(vcpu
);
1923 case KVM_S390_INT_CPU_TIMER
:
1924 rc
= __inject_cpu_timer(vcpu
);
1926 case KVM_S390_INT_EXTERNAL_CALL
:
1927 rc
= __inject_extcall(vcpu
, irq
);
1929 case KVM_S390_INT_EMERGENCY
:
1930 rc
= __inject_sigp_emergency(vcpu
, irq
);
1933 rc
= __inject_mchk(vcpu
, irq
);
1935 case KVM_S390_INT_PFAULT_INIT
:
1936 rc
= __inject_pfault_init(vcpu
, irq
);
1938 case KVM_S390_INT_VIRTIO
:
1939 case KVM_S390_INT_SERVICE
:
1940 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1948 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1950 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1953 spin_lock(&li
->lock
);
1954 rc
= do_inject_vcpu(vcpu
, irq
);
1955 spin_unlock(&li
->lock
);
1957 kvm_s390_vcpu_wakeup(vcpu
);
1961 static inline void clear_irq_list(struct list_head
*_list
)
1963 struct kvm_s390_interrupt_info
*inti
, *n
;
1965 list_for_each_entry_safe(inti
, n
, _list
, list
) {
1966 list_del(&inti
->list
);
1971 static void inti_to_irq(struct kvm_s390_interrupt_info
*inti
,
1972 struct kvm_s390_irq
*irq
)
1974 irq
->type
= inti
->type
;
1975 switch (inti
->type
) {
1976 case KVM_S390_INT_PFAULT_INIT
:
1977 case KVM_S390_INT_PFAULT_DONE
:
1978 case KVM_S390_INT_VIRTIO
:
1979 irq
->u
.ext
= inti
->ext
;
1981 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1982 irq
->u
.io
= inti
->io
;
1987 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1989 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1992 spin_lock(&fi
->lock
);
1993 fi
->pending_irqs
= 0;
1994 memset(&fi
->srv_signal
, 0, sizeof(fi
->srv_signal
));
1995 memset(&fi
->mchk
, 0, sizeof(fi
->mchk
));
1996 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1997 clear_irq_list(&fi
->lists
[i
]);
1998 for (i
= 0; i
< FIRQ_MAX_COUNT
; i
++)
1999 fi
->counters
[i
] = 0;
2000 spin_unlock(&fi
->lock
);
2001 kvm_s390_gisa_clear(kvm
);
2004 static int get_all_floating_irqs(struct kvm
*kvm
, u8 __user
*usrbuf
, u64 len
)
2006 struct kvm_s390_interrupt_info
*inti
;
2007 struct kvm_s390_float_interrupt
*fi
;
2008 struct kvm_s390_irq
*buf
;
2009 struct kvm_s390_irq
*irq
;
2015 if (len
> KVM_S390_FLIC_MAX_BUFFER
|| len
== 0)
2019 * We are already using -ENOMEM to signal
2020 * userspace it may retry with a bigger buffer,
2021 * so we need to use something else for this case
2027 max_irqs
= len
/ sizeof(struct kvm_s390_irq
);
2029 if (kvm
->arch
.gisa
&&
2030 kvm_s390_gisa_get_ipm(kvm
->arch
.gisa
)) {
2031 for (i
= 0; i
<= MAX_ISC
; i
++) {
2032 if (n
== max_irqs
) {
2033 /* signal userspace to try again */
2037 if (kvm_s390_gisa_tac_ipm_gisc(kvm
->arch
.gisa
, i
)) {
2038 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2039 irq
->type
= KVM_S390_INT_IO(1, 0, 0, 0);
2040 irq
->u
.io
.io_int_word
= isc_to_int_word(i
);
2045 fi
= &kvm
->arch
.float_int
;
2046 spin_lock(&fi
->lock
);
2047 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++) {
2048 list_for_each_entry(inti
, &fi
->lists
[i
], list
) {
2049 if (n
== max_irqs
) {
2050 /* signal userspace to try again */
2054 inti_to_irq(inti
, &buf
[n
]);
2058 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
)) {
2059 if (n
== max_irqs
) {
2060 /* signal userspace to try again */
2064 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2065 irq
->type
= KVM_S390_INT_SERVICE
;
2066 irq
->u
.ext
= fi
->srv_signal
;
2069 if (test_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
2070 if (n
== max_irqs
) {
2071 /* signal userspace to try again */
2075 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2076 irq
->type
= KVM_S390_MCHK
;
2077 irq
->u
.mchk
= fi
->mchk
;
2082 spin_unlock(&fi
->lock
);
2084 if (!ret
&& n
> 0) {
2085 if (copy_to_user(usrbuf
, buf
, sizeof(struct kvm_s390_irq
) * n
))
2090 return ret
< 0 ? ret
: n
;
2093 static int flic_ais_mode_get_all(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2095 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2096 struct kvm_s390_ais_all ais
;
2098 if (attr
->attr
< sizeof(ais
))
2101 if (!test_kvm_facility(kvm
, 72))
2104 mutex_lock(&fi
->ais_lock
);
2105 ais
.simm
= fi
->simm
;
2106 ais
.nimm
= fi
->nimm
;
2107 mutex_unlock(&fi
->ais_lock
);
2109 if (copy_to_user((void __user
*)attr
->addr
, &ais
, sizeof(ais
)))
2115 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2119 switch (attr
->group
) {
2120 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2121 r
= get_all_floating_irqs(dev
->kvm
, (u8 __user
*) attr
->addr
,
2124 case KVM_DEV_FLIC_AISM_ALL
:
2125 r
= flic_ais_mode_get_all(dev
->kvm
, attr
);
2134 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
2137 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
2138 void *target
= NULL
;
2139 void __user
*source
;
2142 if (get_user(inti
->type
, (u64 __user
*)addr
))
2145 switch (inti
->type
) {
2146 case KVM_S390_INT_PFAULT_INIT
:
2147 case KVM_S390_INT_PFAULT_DONE
:
2148 case KVM_S390_INT_VIRTIO
:
2149 case KVM_S390_INT_SERVICE
:
2150 target
= (void *) &inti
->ext
;
2151 source
= &uptr
->u
.ext
;
2152 size
= sizeof(inti
->ext
);
2154 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
2155 target
= (void *) &inti
->io
;
2156 source
= &uptr
->u
.io
;
2157 size
= sizeof(inti
->io
);
2160 target
= (void *) &inti
->mchk
;
2161 source
= &uptr
->u
.mchk
;
2162 size
= sizeof(inti
->mchk
);
2168 if (copy_from_user(target
, source
, size
))
2174 static int enqueue_floating_irq(struct kvm_device
*dev
,
2175 struct kvm_device_attr
*attr
)
2177 struct kvm_s390_interrupt_info
*inti
= NULL
;
2179 int len
= attr
->attr
;
2181 if (len
% sizeof(struct kvm_s390_irq
) != 0)
2183 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
2186 while (len
>= sizeof(struct kvm_s390_irq
)) {
2187 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
2191 r
= copy_irq_from_user(inti
, attr
->addr
);
2196 r
= __inject_vm(dev
->kvm
, inti
);
2201 len
-= sizeof(struct kvm_s390_irq
);
2202 attr
->addr
+= sizeof(struct kvm_s390_irq
);
2208 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
2210 if (id
>= MAX_S390_IO_ADAPTERS
)
2212 return kvm
->arch
.adapters
[id
];
2215 static int register_io_adapter(struct kvm_device
*dev
,
2216 struct kvm_device_attr
*attr
)
2218 struct s390_io_adapter
*adapter
;
2219 struct kvm_s390_io_adapter adapter_info
;
2221 if (copy_from_user(&adapter_info
,
2222 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
2225 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
2226 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
2229 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
2233 INIT_LIST_HEAD(&adapter
->maps
);
2234 init_rwsem(&adapter
->maps_lock
);
2235 atomic_set(&adapter
->nr_maps
, 0);
2236 adapter
->id
= adapter_info
.id
;
2237 adapter
->isc
= adapter_info
.isc
;
2238 adapter
->maskable
= adapter_info
.maskable
;
2239 adapter
->masked
= false;
2240 adapter
->swap
= adapter_info
.swap
;
2241 adapter
->suppressible
= (adapter_info
.flags
) &
2242 KVM_S390_ADAPTER_SUPPRESSIBLE
;
2243 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
2248 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
2251 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2253 if (!adapter
|| !adapter
->maskable
)
2255 ret
= adapter
->masked
;
2256 adapter
->masked
= masked
;
2260 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
2262 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2263 struct s390_map_info
*map
;
2266 if (!adapter
|| !addr
)
2269 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
2274 INIT_LIST_HEAD(&map
->list
);
2275 map
->guest_addr
= addr
;
2276 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
2277 if (map
->addr
== -EFAULT
) {
2281 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
2285 down_write(&adapter
->maps_lock
);
2286 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
2287 list_add_tail(&map
->list
, &adapter
->maps
);
2290 put_page(map
->page
);
2293 up_write(&adapter
->maps_lock
);
2300 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
2302 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2303 struct s390_map_info
*map
, *tmp
;
2306 if (!adapter
|| !addr
)
2309 down_write(&adapter
->maps_lock
);
2310 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
2311 if (map
->guest_addr
== addr
) {
2313 atomic_dec(&adapter
->nr_maps
);
2314 list_del(&map
->list
);
2315 put_page(map
->page
);
2320 up_write(&adapter
->maps_lock
);
2322 return found
? 0 : -EINVAL
;
2325 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
2328 struct s390_map_info
*map
, *tmp
;
2330 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
2331 if (!kvm
->arch
.adapters
[i
])
2333 list_for_each_entry_safe(map
, tmp
,
2334 &kvm
->arch
.adapters
[i
]->maps
, list
) {
2335 list_del(&map
->list
);
2336 put_page(map
->page
);
2339 kfree(kvm
->arch
.adapters
[i
]);
2343 static int modify_io_adapter(struct kvm_device
*dev
,
2344 struct kvm_device_attr
*attr
)
2346 struct kvm_s390_io_adapter_req req
;
2347 struct s390_io_adapter
*adapter
;
2350 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2353 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
2357 case KVM_S390_IO_ADAPTER_MASK
:
2358 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
2362 case KVM_S390_IO_ADAPTER_MAP
:
2363 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
2365 case KVM_S390_IO_ADAPTER_UNMAP
:
2366 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
2375 static int clear_io_irq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2378 const u64 isc_mask
= 0xffUL
<< 24; /* all iscs set */
2383 if (attr
->attr
!= sizeof(schid
))
2385 if (copy_from_user(&schid
, (void __user
*) attr
->addr
, sizeof(schid
)))
2389 kfree(kvm_s390_get_io_int(kvm
, isc_mask
, schid
));
2391 * If userspace is conforming to the architecture, we can have at most
2392 * one pending I/O interrupt per subchannel, so this is effectively a
2398 static int modify_ais_mode(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2400 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2401 struct kvm_s390_ais_req req
;
2404 if (!test_kvm_facility(kvm
, 72))
2407 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2410 if (req
.isc
> MAX_ISC
)
2413 trace_kvm_s390_modify_ais_mode(req
.isc
,
2414 (fi
->simm
& AIS_MODE_MASK(req
.isc
)) ?
2415 (fi
->nimm
& AIS_MODE_MASK(req
.isc
)) ?
2416 2 : KVM_S390_AIS_MODE_SINGLE
:
2417 KVM_S390_AIS_MODE_ALL
, req
.mode
);
2419 mutex_lock(&fi
->ais_lock
);
2421 case KVM_S390_AIS_MODE_ALL
:
2422 fi
->simm
&= ~AIS_MODE_MASK(req
.isc
);
2423 fi
->nimm
&= ~AIS_MODE_MASK(req
.isc
);
2425 case KVM_S390_AIS_MODE_SINGLE
:
2426 fi
->simm
|= AIS_MODE_MASK(req
.isc
);
2427 fi
->nimm
&= ~AIS_MODE_MASK(req
.isc
);
2432 mutex_unlock(&fi
->ais_lock
);
2437 static int kvm_s390_inject_airq(struct kvm
*kvm
,
2438 struct s390_io_adapter
*adapter
)
2440 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2441 struct kvm_s390_interrupt s390int
= {
2442 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
2444 .parm64
= isc_to_int_word(adapter
->isc
),
2448 if (!test_kvm_facility(kvm
, 72) || !adapter
->suppressible
)
2449 return kvm_s390_inject_vm(kvm
, &s390int
);
2451 mutex_lock(&fi
->ais_lock
);
2452 if (fi
->nimm
& AIS_MODE_MASK(adapter
->isc
)) {
2453 trace_kvm_s390_airq_suppressed(adapter
->id
, adapter
->isc
);
2457 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
2458 if (!ret
&& (fi
->simm
& AIS_MODE_MASK(adapter
->isc
))) {
2459 fi
->nimm
|= AIS_MODE_MASK(adapter
->isc
);
2460 trace_kvm_s390_modify_ais_mode(adapter
->isc
,
2461 KVM_S390_AIS_MODE_SINGLE
, 2);
2464 mutex_unlock(&fi
->ais_lock
);
2468 static int flic_inject_airq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2470 unsigned int id
= attr
->attr
;
2471 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2476 return kvm_s390_inject_airq(kvm
, adapter
);
2479 static int flic_ais_mode_set_all(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2481 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2482 struct kvm_s390_ais_all ais
;
2484 if (!test_kvm_facility(kvm
, 72))
2487 if (copy_from_user(&ais
, (void __user
*)attr
->addr
, sizeof(ais
)))
2490 mutex_lock(&fi
->ais_lock
);
2491 fi
->simm
= ais
.simm
;
2492 fi
->nimm
= ais
.nimm
;
2493 mutex_unlock(&fi
->ais_lock
);
2498 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2502 struct kvm_vcpu
*vcpu
;
2504 switch (attr
->group
) {
2505 case KVM_DEV_FLIC_ENQUEUE
:
2506 r
= enqueue_floating_irq(dev
, attr
);
2508 case KVM_DEV_FLIC_CLEAR_IRQS
:
2509 kvm_s390_clear_float_irqs(dev
->kvm
);
2511 case KVM_DEV_FLIC_APF_ENABLE
:
2512 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
2514 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2515 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
2517 * Make sure no async faults are in transition when
2518 * clearing the queues. So we don't need to worry
2519 * about late coming workers.
2521 synchronize_srcu(&dev
->kvm
->srcu
);
2522 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
2523 kvm_clear_async_pf_completion_queue(vcpu
);
2525 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2526 r
= register_io_adapter(dev
, attr
);
2528 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2529 r
= modify_io_adapter(dev
, attr
);
2531 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2532 r
= clear_io_irq(dev
->kvm
, attr
);
2534 case KVM_DEV_FLIC_AISM
:
2535 r
= modify_ais_mode(dev
->kvm
, attr
);
2537 case KVM_DEV_FLIC_AIRQ_INJECT
:
2538 r
= flic_inject_airq(dev
->kvm
, attr
);
2540 case KVM_DEV_FLIC_AISM_ALL
:
2541 r
= flic_ais_mode_set_all(dev
->kvm
, attr
);
2550 static int flic_has_attr(struct kvm_device
*dev
,
2551 struct kvm_device_attr
*attr
)
2553 switch (attr
->group
) {
2554 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2555 case KVM_DEV_FLIC_ENQUEUE
:
2556 case KVM_DEV_FLIC_CLEAR_IRQS
:
2557 case KVM_DEV_FLIC_APF_ENABLE
:
2558 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2559 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2560 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2561 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2562 case KVM_DEV_FLIC_AISM
:
2563 case KVM_DEV_FLIC_AIRQ_INJECT
:
2564 case KVM_DEV_FLIC_AISM_ALL
:
2570 static int flic_create(struct kvm_device
*dev
, u32 type
)
2574 if (dev
->kvm
->arch
.flic
)
2576 dev
->kvm
->arch
.flic
= dev
;
2580 static void flic_destroy(struct kvm_device
*dev
)
2582 dev
->kvm
->arch
.flic
= NULL
;
2586 /* s390 floating irq controller (flic) */
2587 struct kvm_device_ops kvm_flic_ops
= {
2589 .get_attr
= flic_get_attr
,
2590 .set_attr
= flic_set_attr
,
2591 .has_attr
= flic_has_attr
,
2592 .create
= flic_create
,
2593 .destroy
= flic_destroy
,
2596 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
2600 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
2602 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
2605 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
2608 struct s390_map_info
*map
;
2613 list_for_each_entry(map
, &adapter
->maps
, list
) {
2614 if (map
->guest_addr
== addr
)
2620 static int adapter_indicators_set(struct kvm
*kvm
,
2621 struct s390_io_adapter
*adapter
,
2622 struct kvm_s390_adapter_int
*adapter_int
)
2625 int summary_set
, idx
;
2626 struct s390_map_info
*info
;
2629 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
2632 map
= page_address(info
->page
);
2633 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
2635 idx
= srcu_read_lock(&kvm
->srcu
);
2636 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2637 set_page_dirty_lock(info
->page
);
2638 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
2640 srcu_read_unlock(&kvm
->srcu
, idx
);
2643 map
= page_address(info
->page
);
2644 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
2646 summary_set
= test_and_set_bit(bit
, map
);
2647 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2648 set_page_dirty_lock(info
->page
);
2649 srcu_read_unlock(&kvm
->srcu
, idx
);
2650 return summary_set
? 0 : 1;
2654 * < 0 - not injected due to error
2655 * = 0 - coalesced, summary indicator already active
2656 * > 0 - injected interrupt
2658 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
2659 struct kvm
*kvm
, int irq_source_id
, int level
,
2663 struct s390_io_adapter
*adapter
;
2665 /* We're only interested in the 0->1 transition. */
2668 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
2671 down_read(&adapter
->maps_lock
);
2672 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
2673 up_read(&adapter
->maps_lock
);
2674 if ((ret
> 0) && !adapter
->masked
) {
2675 ret
= kvm_s390_inject_airq(kvm
, adapter
);
2683 * Inject the machine check to the guest.
2685 void kvm_s390_reinject_machine_check(struct kvm_vcpu
*vcpu
,
2686 struct mcck_volatile_info
*mcck_info
)
2688 struct kvm_s390_interrupt_info inti
;
2689 struct kvm_s390_irq irq
;
2690 struct kvm_s390_mchk_info
*mchk
;
2692 __u64 cr14
= 0; /* upper bits are not used */
2695 mci
.val
= mcck_info
->mcic
;
2697 cr14
|= CR14_RECOVERY_SUBMASK
;
2699 cr14
|= CR14_DEGRADATION_SUBMASK
;
2701 cr14
|= CR14_WARNING_SUBMASK
;
2703 mchk
= mci
.ck
? &inti
.mchk
: &irq
.u
.mchk
;
2705 mchk
->mcic
= mcck_info
->mcic
;
2706 mchk
->ext_damage_code
= mcck_info
->ext_damage_code
;
2707 mchk
->failing_storage_address
= mcck_info
->failing_storage_address
;
2709 /* Inject the floating machine check */
2710 inti
.type
= KVM_S390_MCHK
;
2711 rc
= __inject_vm(vcpu
->kvm
, &inti
);
2713 /* Inject the machine check to specified vcpu */
2714 irq
.type
= KVM_S390_MCHK
;
2715 rc
= kvm_s390_inject_vcpu(vcpu
, &irq
);
2720 int kvm_set_routing_entry(struct kvm
*kvm
,
2721 struct kvm_kernel_irq_routing_entry
*e
,
2722 const struct kvm_irq_routing_entry
*ue
)
2727 case KVM_IRQ_ROUTING_S390_ADAPTER
:
2728 e
->set
= set_adapter_int
;
2729 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
2730 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
2731 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
2732 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
2733 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
2743 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
2744 int irq_source_id
, int level
, bool line_status
)
2749 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
, void __user
*irqstate
, int len
)
2751 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2752 struct kvm_s390_irq
*buf
;
2760 if (copy_from_user((void *) buf
, irqstate
, len
)) {
2766 * Don't allow setting the interrupt state
2767 * when there are already interrupts pending
2769 spin_lock(&li
->lock
);
2770 if (li
->pending_irqs
) {
2775 for (n
= 0; n
< len
/ sizeof(*buf
); n
++) {
2776 r
= do_inject_vcpu(vcpu
, &buf
[n
]);
2782 spin_unlock(&li
->lock
);
2789 static void store_local_irq(struct kvm_s390_local_interrupt
*li
,
2790 struct kvm_s390_irq
*irq
,
2791 unsigned long irq_type
)
2794 case IRQ_PEND_MCHK_EX
:
2795 case IRQ_PEND_MCHK_REP
:
2796 irq
->type
= KVM_S390_MCHK
;
2797 irq
->u
.mchk
= li
->irq
.mchk
;
2800 irq
->type
= KVM_S390_PROGRAM_INT
;
2801 irq
->u
.pgm
= li
->irq
.pgm
;
2803 case IRQ_PEND_PFAULT_INIT
:
2804 irq
->type
= KVM_S390_INT_PFAULT_INIT
;
2805 irq
->u
.ext
= li
->irq
.ext
;
2807 case IRQ_PEND_EXT_EXTERNAL
:
2808 irq
->type
= KVM_S390_INT_EXTERNAL_CALL
;
2809 irq
->u
.extcall
= li
->irq
.extcall
;
2811 case IRQ_PEND_EXT_CLOCK_COMP
:
2812 irq
->type
= KVM_S390_INT_CLOCK_COMP
;
2814 case IRQ_PEND_EXT_CPU_TIMER
:
2815 irq
->type
= KVM_S390_INT_CPU_TIMER
;
2817 case IRQ_PEND_SIGP_STOP
:
2818 irq
->type
= KVM_S390_SIGP_STOP
;
2819 irq
->u
.stop
= li
->irq
.stop
;
2821 case IRQ_PEND_RESTART
:
2822 irq
->type
= KVM_S390_RESTART
;
2824 case IRQ_PEND_SET_PREFIX
:
2825 irq
->type
= KVM_S390_SIGP_SET_PREFIX
;
2826 irq
->u
.prefix
= li
->irq
.prefix
;
2831 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
, __u8 __user
*buf
, int len
)
2834 unsigned long sigp_emerg_pending
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
2835 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2836 unsigned long pending_irqs
;
2837 struct kvm_s390_irq irq
;
2838 unsigned long irq_type
;
2842 spin_lock(&li
->lock
);
2843 pending_irqs
= li
->pending_irqs
;
2844 memcpy(&sigp_emerg_pending
, &li
->sigp_emerg_pending
,
2845 sizeof(sigp_emerg_pending
));
2846 spin_unlock(&li
->lock
);
2848 for_each_set_bit(irq_type
, &pending_irqs
, IRQ_PEND_COUNT
) {
2849 memset(&irq
, 0, sizeof(irq
));
2850 if (irq_type
== IRQ_PEND_EXT_EMERGENCY
)
2852 if (n
+ sizeof(irq
) > len
)
2854 store_local_irq(&vcpu
->arch
.local_int
, &irq
, irq_type
);
2855 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2860 if (test_bit(IRQ_PEND_EXT_EMERGENCY
, &pending_irqs
)) {
2861 for_each_set_bit(cpuaddr
, sigp_emerg_pending
, KVM_MAX_VCPUS
) {
2862 memset(&irq
, 0, sizeof(irq
));
2863 if (n
+ sizeof(irq
) > len
)
2865 irq
.type
= KVM_S390_INT_EMERGENCY
;
2866 irq
.u
.emerg
.code
= cpuaddr
;
2867 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2873 if (sca_ext_call_pending(vcpu
, &scn
)) {
2874 if (n
+ sizeof(irq
) > len
)
2876 memset(&irq
, 0, sizeof(irq
));
2877 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
2878 irq
.u
.extcall
.code
= scn
;
2879 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2887 void kvm_s390_gisa_clear(struct kvm
*kvm
)
2889 if (kvm
->arch
.gisa
) {
2890 memset(kvm
->arch
.gisa
, 0, sizeof(struct kvm_s390_gisa
));
2891 kvm
->arch
.gisa
->next_alert
= (u32
)(u64
)kvm
->arch
.gisa
;
2892 VM_EVENT(kvm
, 3, "gisa 0x%pK cleared", kvm
->arch
.gisa
);
2896 void kvm_s390_gisa_init(struct kvm
*kvm
)
2898 if (css_general_characteristics
.aiv
) {
2899 kvm
->arch
.gisa
= &kvm
->arch
.sie_page2
->gisa
;
2900 VM_EVENT(kvm
, 3, "gisa 0x%pK initialized", kvm
->arch
.gisa
);
2901 kvm_s390_gisa_clear(kvm
);
2905 void kvm_s390_gisa_destroy(struct kvm
*kvm
)
2907 if (!kvm
->arch
.gisa
)
2909 kvm
->arch
.gisa
= NULL
;