1 // SPDX-License-Identifier: GPL-2.0
3 * handling kvm guest interrupts
5 * Copyright IBM Corp. 2008, 2020
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
10 #define KMSG_COMPONENT "kvm-s390"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/nospec.h>
18 #include <linux/signal.h>
19 #include <linux/slab.h>
20 #include <linux/bitmap.h>
21 #include <linux/vmalloc.h>
22 #include <asm/asm-offsets.h>
24 #include <linux/uaccess.h>
28 #include <asm/switch_to.h>
33 #include "trace-s390.h"
35 #define PFAULT_INIT 0x0600
36 #define PFAULT_DONE 0x0680
37 #define VIRTIO_PARAM 0x0d00
39 static struct kvm_s390_gib
*gib
;
41 /* handle external calls via sigp interpretation facility */
42 static int sca_ext_call_pending(struct kvm_vcpu
*vcpu
, int *src_id
)
46 if (!kvm_s390_test_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
))
49 BUG_ON(!kvm_s390_use_sca_entries());
50 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
51 if (vcpu
->kvm
->arch
.use_esca
) {
52 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
53 union esca_sigp_ctrl sigp_ctrl
=
54 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
59 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
60 union bsca_sigp_ctrl sigp_ctrl
=
61 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
66 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
74 static int sca_inject_ext_call(struct kvm_vcpu
*vcpu
, int src_id
)
78 BUG_ON(!kvm_s390_use_sca_entries());
79 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
80 if (vcpu
->kvm
->arch
.use_esca
) {
81 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
82 union esca_sigp_ctrl
*sigp_ctrl
=
83 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
84 union esca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
90 expect
= old_val
.value
;
91 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
93 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
94 union bsca_sigp_ctrl
*sigp_ctrl
=
95 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
96 union bsca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
102 expect
= old_val
.value
;
103 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
105 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
108 /* another external call is pending */
111 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
);
115 static void sca_clear_ext_call(struct kvm_vcpu
*vcpu
)
119 if (!kvm_s390_use_sca_entries())
121 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
);
122 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
123 if (vcpu
->kvm
->arch
.use_esca
) {
124 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
125 union esca_sigp_ctrl
*sigp_ctrl
=
126 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
127 union esca_sigp_ctrl old
= *sigp_ctrl
;
130 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
132 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
133 union bsca_sigp_ctrl
*sigp_ctrl
=
134 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
135 union bsca_sigp_ctrl old
= *sigp_ctrl
;
138 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
140 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
141 WARN_ON(rc
!= expect
); /* cannot clear? */
144 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
146 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
149 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
151 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
154 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
156 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
159 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
161 return psw_extint_disabled(vcpu
) &&
162 psw_ioint_disabled(vcpu
) &&
163 psw_mchk_disabled(vcpu
);
166 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
168 if (psw_extint_disabled(vcpu
) ||
169 !(vcpu
->arch
.sie_block
->gcr
[0] & CR0_CLOCK_COMPARATOR_SUBMASK
))
171 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
172 /* No timer interrupts when single stepping */
177 static int ckc_irq_pending(struct kvm_vcpu
*vcpu
)
179 const u64 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
180 const u64 ckc
= vcpu
->arch
.sie_block
->ckc
;
182 if (vcpu
->arch
.sie_block
->gcr
[0] & CR0_CLOCK_COMPARATOR_SIGN
) {
183 if ((s64
)ckc
>= (s64
)now
)
185 } else if (ckc
>= now
) {
188 return ckc_interrupts_enabled(vcpu
);
191 static int cpu_timer_interrupts_enabled(struct kvm_vcpu
*vcpu
)
193 return !psw_extint_disabled(vcpu
) &&
194 (vcpu
->arch
.sie_block
->gcr
[0] & CR0_CPU_TIMER_SUBMASK
);
197 static int cpu_timer_irq_pending(struct kvm_vcpu
*vcpu
)
199 if (!cpu_timer_interrupts_enabled(vcpu
))
201 return kvm_s390_get_cpu_timer(vcpu
) >> 63;
204 static uint64_t isc_to_isc_bits(int isc
)
206 return (0x80 >> isc
) << 24;
209 static inline u32
isc_to_int_word(u8 isc
)
211 return ((u32
)isc
<< 27) | 0x80000000;
214 static inline u8
int_word_to_isc(u32 int_word
)
216 return (int_word
& 0x38000000) >> 27;
220 * To use atomic bitmap functions, we have to provide a bitmap address
221 * that is u64 aligned. However, the ipm might be u32 aligned.
222 * Therefore, we logically start the bitmap at the very beginning of the
223 * struct and fixup the bit number.
225 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
228 * gisa_set_iam - change the GISA interruption alert mask
230 * @gisa: gisa to operate on
231 * @iam: new IAM value to use
233 * Change the IAM atomically with the next alert address and the IPM
234 * of the GISA if the GISA is not part of the GIB alert list. All three
235 * fields are located in the first long word of the GISA.
237 * Returns: 0 on success
238 * -EBUSY in case the gisa is part of the alert list
240 static inline int gisa_set_iam(struct kvm_s390_gisa
*gisa
, u8 iam
)
245 word
= READ_ONCE(gisa
->u64
.word
[0]);
246 if ((u64
)gisa
!= word
>> 32)
248 _word
= (word
& ~0xffUL
) | iam
;
249 } while (cmpxchg(&gisa
->u64
.word
[0], word
, _word
) != word
);
255 * gisa_clear_ipm - clear the GISA interruption pending mask
257 * @gisa: gisa to operate on
259 * Clear the IPM atomically with the next alert address and the IAM
260 * of the GISA unconditionally. All three fields are located in the
261 * first long word of the GISA.
263 static inline void gisa_clear_ipm(struct kvm_s390_gisa
*gisa
)
268 word
= READ_ONCE(gisa
->u64
.word
[0]);
269 _word
= word
& ~(0xffUL
<< 24);
270 } while (cmpxchg(&gisa
->u64
.word
[0], word
, _word
) != word
);
274 * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
276 * @gi: gisa interrupt struct to work on
278 * Atomically restores the interruption alert mask if none of the
279 * relevant ISCs are pending and return the IPM.
281 * Returns: the relevant pending ISCs
283 static inline u8
gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt
*gi
)
285 u8 pending_mask
, alert_mask
;
289 word
= READ_ONCE(gi
->origin
->u64
.word
[0]);
290 alert_mask
= READ_ONCE(gi
->alert
.mask
);
291 pending_mask
= (u8
)(word
>> 24) & alert_mask
;
294 _word
= (word
& ~0xffUL
) | alert_mask
;
295 } while (cmpxchg(&gi
->origin
->u64
.word
[0], word
, _word
) != word
);
300 static inline int gisa_in_alert_list(struct kvm_s390_gisa
*gisa
)
302 return READ_ONCE(gisa
->next_alert
) != (u32
)(u64
)gisa
;
305 static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
307 set_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
310 static inline u8
gisa_get_ipm(struct kvm_s390_gisa
*gisa
)
312 return READ_ONCE(gisa
->ipm
);
315 static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
317 clear_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
320 static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
322 return test_and_clear_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
325 static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu
*vcpu
)
327 unsigned long pending
= vcpu
->kvm
->arch
.float_int
.pending_irqs
|
328 vcpu
->arch
.local_int
.pending_irqs
;
330 pending
&= ~vcpu
->kvm
->arch
.float_int
.masked_irqs
;
334 static inline unsigned long pending_irqs(struct kvm_vcpu
*vcpu
)
336 struct kvm_s390_gisa_interrupt
*gi
= &vcpu
->kvm
->arch
.gisa_int
;
337 unsigned long pending_mask
;
339 pending_mask
= pending_irqs_no_gisa(vcpu
);
341 pending_mask
|= gisa_get_ipm(gi
->origin
) << IRQ_PEND_IO_ISC_7
;
345 static inline int isc_to_irq_type(unsigned long isc
)
347 return IRQ_PEND_IO_ISC_0
- isc
;
350 static inline int irq_type_to_isc(unsigned long irq_type
)
352 return IRQ_PEND_IO_ISC_0
- irq_type
;
355 static unsigned long disable_iscs(struct kvm_vcpu
*vcpu
,
356 unsigned long active_mask
)
360 for (i
= 0; i
<= MAX_ISC
; i
++)
361 if (!(vcpu
->arch
.sie_block
->gcr
[6] & isc_to_isc_bits(i
)))
362 active_mask
&= ~(1UL << (isc_to_irq_type(i
)));
367 static unsigned long deliverable_irqs(struct kvm_vcpu
*vcpu
)
369 unsigned long active_mask
;
371 active_mask
= pending_irqs(vcpu
);
375 if (psw_extint_disabled(vcpu
))
376 active_mask
&= ~IRQ_PEND_EXT_MASK
;
377 if (psw_ioint_disabled(vcpu
))
378 active_mask
&= ~IRQ_PEND_IO_MASK
;
380 active_mask
= disable_iscs(vcpu
, active_mask
);
381 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_EXTERNAL_CALL_SUBMASK
))
382 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
383 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_EMERGENCY_SIGNAL_SUBMASK
))
384 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
385 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_CLOCK_COMPARATOR_SUBMASK
))
386 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
387 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_CPU_TIMER_SUBMASK
))
388 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
389 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_SERVICE_SIGNAL_SUBMASK
)) {
390 __clear_bit(IRQ_PEND_EXT_SERVICE
, &active_mask
);
391 __clear_bit(IRQ_PEND_EXT_SERVICE_EV
, &active_mask
);
393 if (psw_mchk_disabled(vcpu
))
394 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
395 /* PV guest cpus can have a single interruption injected at a time. */
396 if (kvm_s390_pv_cpu_get_handle(vcpu
) &&
397 vcpu
->arch
.sie_block
->iictl
!= IICTL_CODE_NONE
)
398 active_mask
&= ~(IRQ_PEND_EXT_II_MASK
|
402 * Check both floating and local interrupt's cr14 because
403 * bit IRQ_PEND_MCHK_REP could be set in both cases.
405 if (!(vcpu
->arch
.sie_block
->gcr
[14] &
406 (vcpu
->kvm
->arch
.float_int
.mchk
.cr14
|
407 vcpu
->arch
.local_int
.irq
.mchk
.cr14
)))
408 __clear_bit(IRQ_PEND_MCHK_REP
, &active_mask
);
411 * STOP irqs will never be actively delivered. They are triggered via
412 * intercept requests and cleared when the stop intercept is performed.
414 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
419 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
421 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_WAIT
);
422 set_bit(vcpu
->vcpu_id
, vcpu
->kvm
->arch
.idle_mask
);
425 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
427 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_WAIT
);
428 clear_bit(vcpu
->vcpu_id
, vcpu
->kvm
->arch
.idle_mask
);
431 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
433 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
|
435 vcpu
->arch
.sie_block
->lctl
= 0x0000;
436 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
438 if (guestdbg_enabled(vcpu
)) {
439 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
440 LCTL_CR10
| LCTL_CR11
);
441 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
445 static void set_intercept_indicators_io(struct kvm_vcpu
*vcpu
)
447 if (!(pending_irqs_no_gisa(vcpu
) & IRQ_PEND_IO_MASK
))
449 if (psw_ioint_disabled(vcpu
))
450 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_IO_INT
);
452 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
455 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
457 if (!(pending_irqs_no_gisa(vcpu
) & IRQ_PEND_EXT_MASK
))
459 if (psw_extint_disabled(vcpu
))
460 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
462 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
465 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
467 if (!(pending_irqs_no_gisa(vcpu
) & IRQ_PEND_MCHK_MASK
))
469 if (psw_mchk_disabled(vcpu
))
470 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
472 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
475 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
477 if (kvm_s390_is_stop_irq_pending(vcpu
))
478 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOP_INT
);
481 /* Set interception request for non-deliverable interrupts */
482 static void set_intercept_indicators(struct kvm_vcpu
*vcpu
)
484 set_intercept_indicators_io(vcpu
);
485 set_intercept_indicators_ext(vcpu
);
486 set_intercept_indicators_mchk(vcpu
);
487 set_intercept_indicators_stop(vcpu
);
490 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
492 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
495 vcpu
->stat
.deliver_cputm
++;
496 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
498 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
499 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_EXT
;
500 vcpu
->arch
.sie_block
->eic
= EXT_IRQ_CPU_TIMER
;
502 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
503 (u16
*)__LC_EXT_INT_CODE
);
504 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
505 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
506 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
507 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
508 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
510 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
511 return rc
? -EFAULT
: 0;
514 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
516 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
519 vcpu
->stat
.deliver_ckc
++;
520 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
522 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
523 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_EXT
;
524 vcpu
->arch
.sie_block
->eic
= EXT_IRQ_CLK_COMP
;
526 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
527 (u16 __user
*)__LC_EXT_INT_CODE
);
528 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
529 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
530 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
531 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
532 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
534 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
535 return rc
? -EFAULT
: 0;
538 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
540 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
541 struct kvm_s390_ext_info ext
;
544 spin_lock(&li
->lock
);
546 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
547 li
->irq
.ext
.ext_params2
= 0;
548 spin_unlock(&li
->lock
);
550 VCPU_EVENT(vcpu
, 4, "deliver: pfault init token 0x%llx",
552 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
553 KVM_S390_INT_PFAULT_INIT
,
556 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
557 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
558 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
559 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
560 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
561 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
562 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
563 return rc
? -EFAULT
: 0;
566 static int __write_machine_check(struct kvm_vcpu
*vcpu
,
567 struct kvm_s390_mchk_info
*mchk
)
569 unsigned long ext_sa_addr
;
571 freg_t fprs
[NUM_FPRS
];
576 * All other possible payload for a machine check (e.g. the register
577 * contents in the save area) will be handled by the ultravisor, as
578 * the hypervisor does not not have the needed information for
581 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
582 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_MCHK
;
583 vcpu
->arch
.sie_block
->mcic
= mchk
->mcic
;
584 vcpu
->arch
.sie_block
->faddr
= mchk
->failing_storage_address
;
585 vcpu
->arch
.sie_block
->edc
= mchk
->ext_damage_code
;
589 mci
.val
= mchk
->mcic
;
590 /* take care of lazy register loading */
592 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
593 if (MACHINE_HAS_GS
&& vcpu
->arch
.gs_enabled
)
594 save_gs_cb(current
->thread
.gs_cb
);
596 /* Extended save area */
597 rc
= read_guest_lc(vcpu
, __LC_MCESAD
, &ext_sa_addr
,
598 sizeof(unsigned long));
599 /* Only bits 0 through 63-LC are used for address formation */
600 lc
= ext_sa_addr
& MCESA_LC_MASK
;
601 if (test_kvm_facility(vcpu
->kvm
, 133)) {
605 ext_sa_addr
&= ~0x3ffUL
;
608 ext_sa_addr
&= ~0x7ffUL
;
611 ext_sa_addr
&= ~0xfffUL
;
618 ext_sa_addr
&= ~0x3ffUL
;
621 if (!rc
&& mci
.vr
&& ext_sa_addr
&& test_kvm_facility(vcpu
->kvm
, 129)) {
622 if (write_guest_abs(vcpu
, ext_sa_addr
, vcpu
->run
->s
.regs
.vrs
,
628 if (!rc
&& mci
.gs
&& ext_sa_addr
&& test_kvm_facility(vcpu
->kvm
, 133)
629 && (lc
== 11 || lc
== 12)) {
630 if (write_guest_abs(vcpu
, ext_sa_addr
+ 1024,
631 &vcpu
->run
->s
.regs
.gscb
, 32))
637 /* General interruption information */
638 rc
|= put_guest_lc(vcpu
, 1, (u8 __user
*) __LC_AR_MODE_ID
);
639 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
640 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
641 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
642 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
643 rc
|= put_guest_lc(vcpu
, mci
.val
, (u64 __user
*) __LC_MCCK_CODE
);
645 /* Register-save areas */
646 if (MACHINE_HAS_VX
) {
647 convert_vx_to_fp(fprs
, (__vector128
*) vcpu
->run
->s
.regs
.vrs
);
648 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
, fprs
, 128);
650 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
,
651 vcpu
->run
->s
.regs
.fprs
, 128);
653 rc
|= write_guest_lc(vcpu
, __LC_GPREGS_SAVE_AREA
,
654 vcpu
->run
->s
.regs
.gprs
, 128);
655 rc
|= put_guest_lc(vcpu
, current
->thread
.fpu
.fpc
,
656 (u32 __user
*) __LC_FP_CREG_SAVE_AREA
);
657 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->todpr
,
658 (u32 __user
*) __LC_TOD_PROGREG_SAVE_AREA
);
659 rc
|= put_guest_lc(vcpu
, kvm_s390_get_cpu_timer(vcpu
),
660 (u64 __user
*) __LC_CPU_TIMER_SAVE_AREA
);
661 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->ckc
>> 8,
662 (u64 __user
*) __LC_CLOCK_COMP_SAVE_AREA
);
663 rc
|= write_guest_lc(vcpu
, __LC_AREGS_SAVE_AREA
,
664 &vcpu
->run
->s
.regs
.acrs
, 64);
665 rc
|= write_guest_lc(vcpu
, __LC_CREGS_SAVE_AREA
,
666 &vcpu
->arch
.sie_block
->gcr
, 128);
668 /* Extended interruption information */
669 rc
|= put_guest_lc(vcpu
, mchk
->ext_damage_code
,
670 (u32 __user
*) __LC_EXT_DAMAGE_CODE
);
671 rc
|= put_guest_lc(vcpu
, mchk
->failing_storage_address
,
672 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
673 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
, &mchk
->fixed_logout
,
674 sizeof(mchk
->fixed_logout
));
675 return rc
? -EFAULT
: 0;
678 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
680 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
681 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
682 struct kvm_s390_mchk_info mchk
= {};
686 spin_lock(&fi
->lock
);
687 spin_lock(&li
->lock
);
688 if (test_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
) ||
689 test_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
)) {
691 * If there was an exigent machine check pending, then any
692 * repressible machine checks that might have been pending
693 * are indicated along with it, so always clear bits for
694 * repressible and exigent interrupts
697 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
698 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
699 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
703 * We indicate floating repressible conditions along with
704 * other pending conditions. Channel Report Pending and Channel
705 * Subsystem damage are the only two and and are indicated by
706 * bits in mcic and masked in cr14.
708 if (test_and_clear_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
709 mchk
.mcic
|= fi
->mchk
.mcic
;
710 mchk
.cr14
|= fi
->mchk
.cr14
;
711 memset(&fi
->mchk
, 0, sizeof(mchk
));
714 spin_unlock(&li
->lock
);
715 spin_unlock(&fi
->lock
);
718 VCPU_EVENT(vcpu
, 3, "deliver: machine check mcic 0x%llx",
720 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
722 mchk
.cr14
, mchk
.mcic
);
723 vcpu
->stat
.deliver_machine_check
++;
724 rc
= __write_machine_check(vcpu
, &mchk
);
729 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
731 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
734 VCPU_EVENT(vcpu
, 3, "%s", "deliver: cpu restart");
735 vcpu
->stat
.deliver_restart_signal
++;
736 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
738 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
739 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_RESTART
;
741 rc
= write_guest_lc(vcpu
,
742 offsetof(struct lowcore
, restart_old_psw
),
743 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
744 rc
|= read_guest_lc(vcpu
, offsetof(struct lowcore
, restart_psw
),
745 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
747 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
748 return rc
? -EFAULT
: 0;
751 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
753 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
754 struct kvm_s390_prefix_info prefix
;
756 spin_lock(&li
->lock
);
757 prefix
= li
->irq
.prefix
;
758 li
->irq
.prefix
.address
= 0;
759 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
760 spin_unlock(&li
->lock
);
762 vcpu
->stat
.deliver_prefix_signal
++;
763 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
764 KVM_S390_SIGP_SET_PREFIX
,
767 kvm_s390_set_prefix(vcpu
, prefix
.address
);
771 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
773 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
777 spin_lock(&li
->lock
);
778 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
779 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
780 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
781 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
782 spin_unlock(&li
->lock
);
784 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp emerg");
785 vcpu
->stat
.deliver_emergency_signal
++;
786 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
788 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
789 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_EXT
;
790 vcpu
->arch
.sie_block
->eic
= EXT_IRQ_EMERGENCY_SIG
;
791 vcpu
->arch
.sie_block
->extcpuaddr
= cpu_addr
;
795 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
796 (u16
*)__LC_EXT_INT_CODE
);
797 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
798 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
799 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
800 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
801 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
802 return rc
? -EFAULT
: 0;
805 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
807 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
808 struct kvm_s390_extcall_info extcall
;
811 spin_lock(&li
->lock
);
812 extcall
= li
->irq
.extcall
;
813 li
->irq
.extcall
.code
= 0;
814 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
815 spin_unlock(&li
->lock
);
817 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp ext call");
818 vcpu
->stat
.deliver_external_call
++;
819 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
820 KVM_S390_INT_EXTERNAL_CALL
,
822 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
823 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_EXT
;
824 vcpu
->arch
.sie_block
->eic
= EXT_IRQ_EXTERNAL_CALL
;
825 vcpu
->arch
.sie_block
->extcpuaddr
= extcall
.code
;
829 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
830 (u16
*)__LC_EXT_INT_CODE
);
831 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
832 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
833 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
834 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
836 return rc
? -EFAULT
: 0;
839 static int __deliver_prog_pv(struct kvm_vcpu
*vcpu
, u16 code
)
842 case PGM_SPECIFICATION
:
843 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_SPECIFICATION
;
846 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_OPERAND
;
854 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
856 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
857 struct kvm_s390_pgm_info pgm_info
;
858 int rc
= 0, nullifying
= false;
861 spin_lock(&li
->lock
);
862 pgm_info
= li
->irq
.pgm
;
863 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
864 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
865 spin_unlock(&li
->lock
);
867 ilen
= pgm_info
.flags
& KVM_S390_PGM_FLAGS_ILC_MASK
;
868 VCPU_EVENT(vcpu
, 3, "deliver: program irq code 0x%x, ilen:%d",
869 pgm_info
.code
, ilen
);
870 vcpu
->stat
.deliver_program
++;
871 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
874 /* PER is handled by the ultravisor */
875 if (kvm_s390_pv_cpu_is_protected(vcpu
))
876 return __deliver_prog_pv(vcpu
, pgm_info
.code
& ~PGM_PER
);
878 switch (pgm_info
.code
& ~PGM_PER
) {
879 case PGM_AFX_TRANSLATION
:
880 case PGM_ASX_TRANSLATION
:
881 case PGM_EX_TRANSLATION
:
882 case PGM_LFX_TRANSLATION
:
883 case PGM_LSTE_SEQUENCE
:
884 case PGM_LSX_TRANSLATION
:
885 case PGM_LX_TRANSLATION
:
886 case PGM_PRIMARY_AUTHORITY
:
887 case PGM_SECONDARY_AUTHORITY
:
890 case PGM_SPACE_SWITCH
:
891 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
892 (u64
*)__LC_TRANS_EXC_CODE
);
894 case PGM_ALEN_TRANSLATION
:
895 case PGM_ALE_SEQUENCE
:
896 case PGM_ASTE_INSTANCE
:
897 case PGM_ASTE_SEQUENCE
:
898 case PGM_ASTE_VALIDITY
:
899 case PGM_EXTENDED_AUTHORITY
:
900 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
901 (u8
*)__LC_EXC_ACCESS_ID
);
905 case PGM_PAGE_TRANSLATION
:
906 case PGM_REGION_FIRST_TRANS
:
907 case PGM_REGION_SECOND_TRANS
:
908 case PGM_REGION_THIRD_TRANS
:
909 case PGM_SEGMENT_TRANSLATION
:
910 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
911 (u64
*)__LC_TRANS_EXC_CODE
);
912 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
913 (u8
*)__LC_EXC_ACCESS_ID
);
914 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
915 (u8
*)__LC_OP_ACCESS_ID
);
919 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
920 (u16
*)__LC_MON_CLASS_NR
);
921 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
922 (u64
*)__LC_MON_CODE
);
924 case PGM_VECTOR_PROCESSING
:
926 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
927 (u32
*)__LC_DATA_EXC_CODE
);
930 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
931 (u64
*)__LC_TRANS_EXC_CODE
);
932 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
933 (u8
*)__LC_EXC_ACCESS_ID
);
936 case PGM_STACK_EMPTY
:
937 case PGM_STACK_SPECIFICATION
:
939 case PGM_STACK_OPERATION
:
940 case PGM_TRACE_TABEL
:
941 case PGM_CRYPTO_OPERATION
:
946 if (pgm_info
.code
& PGM_PER
) {
947 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
948 (u8
*) __LC_PER_CODE
);
949 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
950 (u8
*)__LC_PER_ATMID
);
951 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
952 (u64
*) __LC_PER_ADDRESS
);
953 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
954 (u8
*) __LC_PER_ACCESS_ID
);
957 if (nullifying
&& !(pgm_info
.flags
& KVM_S390_PGM_FLAGS_NO_REWIND
))
958 kvm_s390_rewind_psw(vcpu
, ilen
);
960 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
961 rc
|= put_guest_lc(vcpu
, ilen
, (u16
*) __LC_PGM_ILC
);
962 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->gbea
,
963 (u64
*) __LC_LAST_BREAK
);
964 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
965 (u16
*)__LC_PGM_INT_CODE
);
966 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
967 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
968 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
969 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
970 return rc
? -EFAULT
: 0;
973 #define SCCB_MASK 0xFFFFFFF8
974 #define SCCB_EVENT_PENDING 0x3
976 static int write_sclp(struct kvm_vcpu
*vcpu
, u32 parm
)
980 if (kvm_s390_pv_cpu_get_handle(vcpu
)) {
981 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_EXT
;
982 vcpu
->arch
.sie_block
->eic
= EXT_IRQ_SERVICE_SIG
;
983 vcpu
->arch
.sie_block
->eiparams
= parm
;
987 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
988 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
989 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
990 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
991 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
992 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
993 rc
|= put_guest_lc(vcpu
, parm
,
994 (u32
*)__LC_EXT_PARAMS
);
996 return rc
? -EFAULT
: 0;
999 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
)
1001 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
1002 struct kvm_s390_ext_info ext
;
1004 spin_lock(&fi
->lock
);
1005 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->masked_irqs
) ||
1006 !(test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
))) {
1007 spin_unlock(&fi
->lock
);
1010 ext
= fi
->srv_signal
;
1011 memset(&fi
->srv_signal
, 0, sizeof(ext
));
1012 clear_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1013 clear_bit(IRQ_PEND_EXT_SERVICE_EV
, &fi
->pending_irqs
);
1014 if (kvm_s390_pv_cpu_is_protected(vcpu
))
1015 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->masked_irqs
);
1016 spin_unlock(&fi
->lock
);
1018 VCPU_EVENT(vcpu
, 4, "deliver: sclp parameter 0x%x",
1020 vcpu
->stat
.deliver_service_signal
++;
1021 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
1024 return write_sclp(vcpu
, ext
.ext_params
);
1027 static int __must_check
__deliver_service_ev(struct kvm_vcpu
*vcpu
)
1029 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
1030 struct kvm_s390_ext_info ext
;
1032 spin_lock(&fi
->lock
);
1033 if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV
, &fi
->pending_irqs
))) {
1034 spin_unlock(&fi
->lock
);
1037 ext
= fi
->srv_signal
;
1038 /* only clear the event bit */
1039 fi
->srv_signal
.ext_params
&= ~SCCB_EVENT_PENDING
;
1040 clear_bit(IRQ_PEND_EXT_SERVICE_EV
, &fi
->pending_irqs
);
1041 spin_unlock(&fi
->lock
);
1043 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sclp parameter event");
1044 vcpu
->stat
.deliver_service_signal
++;
1045 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
1048 return write_sclp(vcpu
, SCCB_EVENT_PENDING
);
1051 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
)
1053 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
1054 struct kvm_s390_interrupt_info
*inti
;
1057 spin_lock(&fi
->lock
);
1058 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_PFAULT
],
1059 struct kvm_s390_interrupt_info
,
1062 list_del(&inti
->list
);
1063 fi
->counters
[FIRQ_CNTR_PFAULT
] -= 1;
1065 if (list_empty(&fi
->lists
[FIRQ_LIST_PFAULT
]))
1066 clear_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1067 spin_unlock(&fi
->lock
);
1070 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
1071 KVM_S390_INT_PFAULT_DONE
, 0,
1072 inti
->ext
.ext_params2
);
1073 VCPU_EVENT(vcpu
, 4, "deliver: pfault done token 0x%llx",
1074 inti
->ext
.ext_params2
);
1076 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
1077 (u16
*)__LC_EXT_INT_CODE
);
1078 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
,
1079 (u16
*)__LC_EXT_CPU_ADDR
);
1080 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
1081 &vcpu
->arch
.sie_block
->gpsw
,
1083 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
1084 &vcpu
->arch
.sie_block
->gpsw
,
1086 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
1087 (u64
*)__LC_EXT_PARAMS2
);
1090 return rc
? -EFAULT
: 0;
1093 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
)
1095 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
1096 struct kvm_s390_interrupt_info
*inti
;
1099 spin_lock(&fi
->lock
);
1100 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_VIRTIO
],
1101 struct kvm_s390_interrupt_info
,
1105 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
1106 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
1107 vcpu
->stat
.deliver_virtio
++;
1108 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
1110 inti
->ext
.ext_params
,
1111 inti
->ext
.ext_params2
);
1112 list_del(&inti
->list
);
1113 fi
->counters
[FIRQ_CNTR_VIRTIO
] -= 1;
1115 if (list_empty(&fi
->lists
[FIRQ_LIST_VIRTIO
]))
1116 clear_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1117 spin_unlock(&fi
->lock
);
1120 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
1121 (u16
*)__LC_EXT_INT_CODE
);
1122 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
,
1123 (u16
*)__LC_EXT_CPU_ADDR
);
1124 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
1125 &vcpu
->arch
.sie_block
->gpsw
,
1127 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
1128 &vcpu
->arch
.sie_block
->gpsw
,
1130 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
1131 (u32
*)__LC_EXT_PARAMS
);
1132 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
1133 (u64
*)__LC_EXT_PARAMS2
);
1136 return rc
? -EFAULT
: 0;
1139 static int __do_deliver_io(struct kvm_vcpu
*vcpu
, struct kvm_s390_io_info
*io
)
1143 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
1144 vcpu
->arch
.sie_block
->iictl
= IICTL_CODE_IO
;
1145 vcpu
->arch
.sie_block
->subchannel_id
= io
->subchannel_id
;
1146 vcpu
->arch
.sie_block
->subchannel_nr
= io
->subchannel_nr
;
1147 vcpu
->arch
.sie_block
->io_int_parm
= io
->io_int_parm
;
1148 vcpu
->arch
.sie_block
->io_int_word
= io
->io_int_word
;
1152 rc
= put_guest_lc(vcpu
, io
->subchannel_id
, (u16
*)__LC_SUBCHANNEL_ID
);
1153 rc
|= put_guest_lc(vcpu
, io
->subchannel_nr
, (u16
*)__LC_SUBCHANNEL_NR
);
1154 rc
|= put_guest_lc(vcpu
, io
->io_int_parm
, (u32
*)__LC_IO_INT_PARM
);
1155 rc
|= put_guest_lc(vcpu
, io
->io_int_word
, (u32
*)__LC_IO_INT_WORD
);
1156 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
1157 &vcpu
->arch
.sie_block
->gpsw
,
1159 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
1160 &vcpu
->arch
.sie_block
->gpsw
,
1162 return rc
? -EFAULT
: 0;
1165 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
1166 unsigned long irq_type
)
1168 struct list_head
*isc_list
;
1169 struct kvm_s390_float_interrupt
*fi
;
1170 struct kvm_s390_gisa_interrupt
*gi
= &vcpu
->kvm
->arch
.gisa_int
;
1171 struct kvm_s390_interrupt_info
*inti
= NULL
;
1172 struct kvm_s390_io_info io
;
1176 fi
= &vcpu
->kvm
->arch
.float_int
;
1178 spin_lock(&fi
->lock
);
1179 isc
= irq_type_to_isc(irq_type
);
1180 isc_list
= &fi
->lists
[isc
];
1181 inti
= list_first_entry_or_null(isc_list
,
1182 struct kvm_s390_interrupt_info
,
1185 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
1186 VCPU_EVENT(vcpu
, 4, "%s", "deliver: I/O (AI)");
1188 VCPU_EVENT(vcpu
, 4, "deliver: I/O %x ss %x schid %04x",
1189 inti
->io
.subchannel_id
>> 8,
1190 inti
->io
.subchannel_id
>> 1 & 0x3,
1191 inti
->io
.subchannel_nr
);
1193 vcpu
->stat
.deliver_io
++;
1194 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
1196 ((__u32
)inti
->io
.subchannel_id
<< 16) |
1197 inti
->io
.subchannel_nr
,
1198 ((__u64
)inti
->io
.io_int_parm
<< 32) |
1199 inti
->io
.io_int_word
);
1200 list_del(&inti
->list
);
1201 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1203 if (list_empty(isc_list
))
1204 clear_bit(irq_type
, &fi
->pending_irqs
);
1205 spin_unlock(&fi
->lock
);
1208 rc
= __do_deliver_io(vcpu
, &(inti
->io
));
1213 if (gi
->origin
&& gisa_tac_ipm_gisc(gi
->origin
, isc
)) {
1215 * in case an adapter interrupt was not delivered
1216 * in SIE context KVM will handle the delivery
1218 VCPU_EVENT(vcpu
, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc
);
1219 memset(&io
, 0, sizeof(io
));
1220 io
.io_int_word
= isc_to_int_word(isc
);
1221 vcpu
->stat
.deliver_io
++;
1222 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
1223 KVM_S390_INT_IO(1, 0, 0, 0),
1224 ((__u32
)io
.subchannel_id
<< 16) |
1226 ((__u64
)io
.io_int_parm
<< 32) |
1228 rc
= __do_deliver_io(vcpu
, &io
);
1234 /* Check whether an external call is pending (deliverable or not) */
1235 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
1237 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1239 if (!sclp
.has_sigpif
)
1240 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
1242 return sca_ext_call_pending(vcpu
, NULL
);
1245 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
1247 if (deliverable_irqs(vcpu
))
1250 if (kvm_cpu_has_pending_timer(vcpu
))
1253 /* external call pending and deliverable */
1254 if (kvm_s390_ext_call_pending(vcpu
) &&
1255 !psw_extint_disabled(vcpu
) &&
1256 (vcpu
->arch
.sie_block
->gcr
[0] & CR0_EXTERNAL_CALL_SUBMASK
))
1259 if (!exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
1264 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1266 return ckc_irq_pending(vcpu
) || cpu_timer_irq_pending(vcpu
);
1269 static u64
__calculate_sltime(struct kvm_vcpu
*vcpu
)
1271 const u64 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
1272 const u64 ckc
= vcpu
->arch
.sie_block
->ckc
;
1273 u64 cputm
, sltime
= 0;
1275 if (ckc_interrupts_enabled(vcpu
)) {
1276 if (vcpu
->arch
.sie_block
->gcr
[0] & CR0_CLOCK_COMPARATOR_SIGN
) {
1277 if ((s64
)now
< (s64
)ckc
)
1278 sltime
= tod_to_ns((s64
)ckc
- (s64
)now
);
1279 } else if (now
< ckc
) {
1280 sltime
= tod_to_ns(ckc
- now
);
1282 /* already expired */
1285 if (cpu_timer_interrupts_enabled(vcpu
)) {
1286 cputm
= kvm_s390_get_cpu_timer(vcpu
);
1287 /* already expired? */
1290 return min(sltime
, tod_to_ns(cputm
));
1292 } else if (cpu_timer_interrupts_enabled(vcpu
)) {
1293 sltime
= kvm_s390_get_cpu_timer(vcpu
);
1294 /* already expired? */
1301 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
1303 struct kvm_s390_gisa_interrupt
*gi
= &vcpu
->kvm
->arch
.gisa_int
;
1306 vcpu
->stat
.exit_wait_state
++;
1309 if (kvm_arch_vcpu_runnable(vcpu
))
1312 if (psw_interrupts_disabled(vcpu
)) {
1313 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
1314 return -EOPNOTSUPP
; /* disabled wait */
1318 (gisa_get_ipm_or_restore_iam(gi
) &
1319 vcpu
->arch
.sie_block
->gcr
[6] >> 24))
1322 if (!ckc_interrupts_enabled(vcpu
) &&
1323 !cpu_timer_interrupts_enabled(vcpu
)) {
1324 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
1325 __set_cpu_idle(vcpu
);
1329 sltime
= __calculate_sltime(vcpu
);
1333 __set_cpu_idle(vcpu
);
1334 hrtimer_start(&vcpu
->arch
.ckc_timer
, sltime
, HRTIMER_MODE_REL
);
1335 VCPU_EVENT(vcpu
, 4, "enabled wait: %llu ns", sltime
);
1337 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1338 kvm_vcpu_block(vcpu
);
1339 __unset_cpu_idle(vcpu
);
1340 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1342 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
1346 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
1348 vcpu
->valid_wakeup
= true;
1349 kvm_vcpu_wake_up(vcpu
);
1352 * The VCPU might not be sleeping but rather executing VSIE. Let's
1353 * kick it, so it leaves the SIE to process the request.
1355 kvm_s390_vsie_kick(vcpu
);
1358 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
1360 struct kvm_vcpu
*vcpu
;
1363 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
1364 sltime
= __calculate_sltime(vcpu
);
1367 * If the monotonic clock runs faster than the tod clock we might be
1368 * woken up too early and have to go back to sleep to avoid deadlocks.
1370 if (sltime
&& hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
1371 return HRTIMER_RESTART
;
1372 kvm_s390_vcpu_wakeup(vcpu
);
1373 return HRTIMER_NORESTART
;
1376 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
1378 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1380 spin_lock(&li
->lock
);
1381 li
->pending_irqs
= 0;
1382 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
1383 memset(&li
->irq
, 0, sizeof(li
->irq
));
1384 spin_unlock(&li
->lock
);
1386 sca_clear_ext_call(vcpu
);
1389 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
1391 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1393 unsigned long irq_type
;
1396 __reset_intercept_indicators(vcpu
);
1398 /* pending ckc conditions might have been invalidated */
1399 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1400 if (ckc_irq_pending(vcpu
))
1401 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1403 /* pending cpu timer conditions might have been invalidated */
1404 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1405 if (cpu_timer_irq_pending(vcpu
))
1406 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1408 while ((irqs
= deliverable_irqs(vcpu
)) && !rc
) {
1409 /* bits are in the reverse order of interrupt priority */
1410 irq_type
= find_last_bit(&irqs
, IRQ_PEND_COUNT
);
1412 case IRQ_PEND_IO_ISC_0
:
1413 case IRQ_PEND_IO_ISC_1
:
1414 case IRQ_PEND_IO_ISC_2
:
1415 case IRQ_PEND_IO_ISC_3
:
1416 case IRQ_PEND_IO_ISC_4
:
1417 case IRQ_PEND_IO_ISC_5
:
1418 case IRQ_PEND_IO_ISC_6
:
1419 case IRQ_PEND_IO_ISC_7
:
1420 rc
= __deliver_io(vcpu
, irq_type
);
1422 case IRQ_PEND_MCHK_EX
:
1423 case IRQ_PEND_MCHK_REP
:
1424 rc
= __deliver_machine_check(vcpu
);
1427 rc
= __deliver_prog(vcpu
);
1429 case IRQ_PEND_EXT_EMERGENCY
:
1430 rc
= __deliver_emergency_signal(vcpu
);
1432 case IRQ_PEND_EXT_EXTERNAL
:
1433 rc
= __deliver_external_call(vcpu
);
1435 case IRQ_PEND_EXT_CLOCK_COMP
:
1436 rc
= __deliver_ckc(vcpu
);
1438 case IRQ_PEND_EXT_CPU_TIMER
:
1439 rc
= __deliver_cpu_timer(vcpu
);
1441 case IRQ_PEND_RESTART
:
1442 rc
= __deliver_restart(vcpu
);
1444 case IRQ_PEND_SET_PREFIX
:
1445 rc
= __deliver_set_prefix(vcpu
);
1447 case IRQ_PEND_PFAULT_INIT
:
1448 rc
= __deliver_pfault_init(vcpu
);
1450 case IRQ_PEND_EXT_SERVICE
:
1451 rc
= __deliver_service(vcpu
);
1453 case IRQ_PEND_EXT_SERVICE_EV
:
1454 rc
= __deliver_service_ev(vcpu
);
1456 case IRQ_PEND_PFAULT_DONE
:
1457 rc
= __deliver_pfault_done(vcpu
);
1459 case IRQ_PEND_VIRTIO
:
1460 rc
= __deliver_virtio(vcpu
);
1463 WARN_ONCE(1, "Unknown pending irq type %ld", irq_type
);
1464 clear_bit(irq_type
, &li
->pending_irqs
);
1468 set_intercept_indicators(vcpu
);
1473 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1475 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1477 vcpu
->stat
.inject_program
++;
1478 VCPU_EVENT(vcpu
, 3, "inject: program irq code 0x%x", irq
->u
.pgm
.code
);
1479 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
1480 irq
->u
.pgm
.code
, 0);
1482 if (!(irq
->u
.pgm
.flags
& KVM_S390_PGM_FLAGS_ILC_VALID
)) {
1483 /* auto detection if no valid ILC was given */
1484 irq
->u
.pgm
.flags
&= ~KVM_S390_PGM_FLAGS_ILC_MASK
;
1485 irq
->u
.pgm
.flags
|= kvm_s390_get_ilen(vcpu
);
1486 irq
->u
.pgm
.flags
|= KVM_S390_PGM_FLAGS_ILC_VALID
;
1489 if (irq
->u
.pgm
.code
== PGM_PER
) {
1490 li
->irq
.pgm
.code
|= PGM_PER
;
1491 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1492 /* only modify PER related information */
1493 li
->irq
.pgm
.per_address
= irq
->u
.pgm
.per_address
;
1494 li
->irq
.pgm
.per_code
= irq
->u
.pgm
.per_code
;
1495 li
->irq
.pgm
.per_atmid
= irq
->u
.pgm
.per_atmid
;
1496 li
->irq
.pgm
.per_access_id
= irq
->u
.pgm
.per_access_id
;
1497 } else if (!(irq
->u
.pgm
.code
& PGM_PER
)) {
1498 li
->irq
.pgm
.code
= (li
->irq
.pgm
.code
& PGM_PER
) |
1500 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1501 /* only modify non-PER information */
1502 li
->irq
.pgm
.trans_exc_code
= irq
->u
.pgm
.trans_exc_code
;
1503 li
->irq
.pgm
.mon_code
= irq
->u
.pgm
.mon_code
;
1504 li
->irq
.pgm
.data_exc_code
= irq
->u
.pgm
.data_exc_code
;
1505 li
->irq
.pgm
.mon_class_nr
= irq
->u
.pgm
.mon_class_nr
;
1506 li
->irq
.pgm
.exc_access_id
= irq
->u
.pgm
.exc_access_id
;
1507 li
->irq
.pgm
.op_access_id
= irq
->u
.pgm
.op_access_id
;
1509 li
->irq
.pgm
= irq
->u
.pgm
;
1511 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
1515 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1517 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1519 vcpu
->stat
.inject_pfault_init
++;
1520 VCPU_EVENT(vcpu
, 4, "inject: pfault init parameter block at 0x%llx",
1521 irq
->u
.ext
.ext_params2
);
1522 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
1523 irq
->u
.ext
.ext_params
,
1524 irq
->u
.ext
.ext_params2
);
1526 li
->irq
.ext
= irq
->u
.ext
;
1527 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1528 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1532 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1534 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1535 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1536 uint16_t src_id
= irq
->u
.extcall
.code
;
1538 vcpu
->stat
.inject_external_call
++;
1539 VCPU_EVENT(vcpu
, 4, "inject: external call source-cpu:%u",
1541 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1544 /* sending vcpu invalid */
1545 if (kvm_get_vcpu_by_id(vcpu
->kvm
, src_id
) == NULL
)
1548 if (sclp
.has_sigpif
&& !kvm_s390_pv_cpu_get_handle(vcpu
))
1549 return sca_inject_ext_call(vcpu
, src_id
);
1551 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1553 *extcall
= irq
->u
.extcall
;
1554 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1558 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1560 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1561 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1563 vcpu
->stat
.inject_set_prefix
++;
1564 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x",
1565 irq
->u
.prefix
.address
);
1566 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1567 irq
->u
.prefix
.address
, 0);
1569 if (!is_vcpu_stopped(vcpu
))
1572 *prefix
= irq
->u
.prefix
;
1573 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1577 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1578 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1580 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1581 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1584 vcpu
->stat
.inject_stop_signal
++;
1585 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0);
1587 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1590 if (is_vcpu_stopped(vcpu
)) {
1591 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1592 rc
= kvm_s390_store_status_unloaded(vcpu
,
1593 KVM_S390_STORE_STATUS_NOADDR
);
1597 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1599 stop
->flags
= irq
->u
.stop
.flags
;
1600 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOP_INT
);
1604 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
)
1606 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1608 vcpu
->stat
.inject_restart
++;
1609 VCPU_EVENT(vcpu
, 3, "%s", "inject: restart int");
1610 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
1612 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1616 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1617 struct kvm_s390_irq
*irq
)
1619 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1621 vcpu
->stat
.inject_emergency_signal
++;
1622 VCPU_EVENT(vcpu
, 4, "inject: emergency from cpu %u",
1624 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1625 irq
->u
.emerg
.code
, 0);
1627 /* sending vcpu invalid */
1628 if (kvm_get_vcpu_by_id(vcpu
->kvm
, irq
->u
.emerg
.code
) == NULL
)
1631 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1632 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1633 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1637 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1639 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1640 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1642 vcpu
->stat
.inject_mchk
++;
1643 VCPU_EVENT(vcpu
, 3, "inject: machine check mcic 0x%llx",
1645 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1649 * Because repressible machine checks can be indicated along with
1650 * exigent machine checks (PoP, Chapter 11, Interruption action)
1651 * we need to combine cr14, mcic and external damage code.
1652 * Failing storage address and the logout area should not be or'ed
1653 * together, we just indicate the last occurrence of the corresponding
1656 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1657 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1658 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1659 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1660 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1661 sizeof(mchk
->fixed_logout
));
1662 if (mchk
->mcic
& MCHK_EX_MASK
)
1663 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1664 else if (mchk
->mcic
& MCHK_REP_MASK
)
1665 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1669 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1671 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1673 vcpu
->stat
.inject_ckc
++;
1674 VCPU_EVENT(vcpu
, 3, "%s", "inject: clock comparator external");
1675 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1678 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1679 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1683 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1685 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1687 vcpu
->stat
.inject_cputm
++;
1688 VCPU_EVENT(vcpu
, 3, "%s", "inject: cpu timer external");
1689 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1692 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1693 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1697 static struct kvm_s390_interrupt_info
*get_io_int(struct kvm
*kvm
,
1700 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1701 struct list_head
*isc_list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1702 struct kvm_s390_interrupt_info
*iter
;
1703 u16 id
= (schid
& 0xffff0000U
) >> 16;
1704 u16 nr
= schid
& 0x0000ffffU
;
1706 spin_lock(&fi
->lock
);
1707 list_for_each_entry(iter
, isc_list
, list
) {
1708 if (schid
&& (id
!= iter
->io
.subchannel_id
||
1709 nr
!= iter
->io
.subchannel_nr
))
1711 /* found an appropriate entry */
1712 list_del_init(&iter
->list
);
1713 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1714 if (list_empty(isc_list
))
1715 clear_bit(isc_to_irq_type(isc
), &fi
->pending_irqs
);
1716 spin_unlock(&fi
->lock
);
1719 spin_unlock(&fi
->lock
);
1723 static struct kvm_s390_interrupt_info
*get_top_io_int(struct kvm
*kvm
,
1724 u64 isc_mask
, u32 schid
)
1726 struct kvm_s390_interrupt_info
*inti
= NULL
;
1729 for (isc
= 0; isc
<= MAX_ISC
&& !inti
; isc
++) {
1730 if (isc_mask
& isc_to_isc_bits(isc
))
1731 inti
= get_io_int(kvm
, isc
, schid
);
1736 static int get_top_gisa_isc(struct kvm
*kvm
, u64 isc_mask
, u32 schid
)
1738 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
1739 unsigned long active_mask
;
1747 active_mask
= (isc_mask
& gisa_get_ipm(gi
->origin
) << 24) << 32;
1748 while (active_mask
) {
1749 isc
= __fls(active_mask
) ^ (BITS_PER_LONG
- 1);
1750 if (gisa_tac_ipm_gisc(gi
->origin
, isc
))
1752 clear_bit_inv(isc
, &active_mask
);
1759 * Dequeue and return an I/O interrupt matching any of the interruption
1760 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1761 * Take into account the interrupts pending in the interrupt list and in GISA.
1763 * Note that for a guest that does not enable I/O interrupts
1764 * but relies on TPI, a flood of classic interrupts may starve
1765 * out adapter interrupts on the same isc. Linux does not do
1766 * that, and it is possible to work around the issue by configuring
1767 * different iscs for classic and adapter interrupts in the guest,
1768 * but we may want to revisit this in the future.
1770 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1771 u64 isc_mask
, u32 schid
)
1773 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
1774 struct kvm_s390_interrupt_info
*inti
, *tmp_inti
;
1777 inti
= get_top_io_int(kvm
, isc_mask
, schid
);
1779 isc
= get_top_gisa_isc(kvm
, isc_mask
, schid
);
1785 /* AI in GISA but no classical IO int */
1788 /* both types of interrupts present */
1789 if (int_word_to_isc(inti
->io
.io_int_word
) <= isc
) {
1790 /* classical IO int with higher priority */
1791 gisa_set_ipm_gisc(gi
->origin
, isc
);
1795 tmp_inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1797 tmp_inti
->type
= KVM_S390_INT_IO(1, 0, 0, 0);
1798 tmp_inti
->io
.io_int_word
= isc_to_int_word(isc
);
1800 kvm_s390_reinject_io_int(kvm
, inti
);
1803 gisa_set_ipm_gisc(gi
->origin
, isc
);
1808 static int __inject_service(struct kvm
*kvm
,
1809 struct kvm_s390_interrupt_info
*inti
)
1811 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1813 kvm
->stat
.inject_service_signal
++;
1814 spin_lock(&fi
->lock
);
1815 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_EVENT_PENDING
;
1817 /* We always allow events, track them separately from the sccb ints */
1818 if (fi
->srv_signal
.ext_params
& SCCB_EVENT_PENDING
)
1819 set_bit(IRQ_PEND_EXT_SERVICE_EV
, &fi
->pending_irqs
);
1822 * Early versions of the QEMU s390 bios will inject several
1823 * service interrupts after another without handling a
1824 * condition code indicating busy.
1825 * We will silently ignore those superfluous sccb values.
1826 * A future version of QEMU will take care of serialization
1829 if (fi
->srv_signal
.ext_params
& SCCB_MASK
)
1831 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_MASK
;
1832 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1834 spin_unlock(&fi
->lock
);
1839 static int __inject_virtio(struct kvm
*kvm
,
1840 struct kvm_s390_interrupt_info
*inti
)
1842 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1844 kvm
->stat
.inject_virtio
++;
1845 spin_lock(&fi
->lock
);
1846 if (fi
->counters
[FIRQ_CNTR_VIRTIO
] >= KVM_S390_MAX_VIRTIO_IRQS
) {
1847 spin_unlock(&fi
->lock
);
1850 fi
->counters
[FIRQ_CNTR_VIRTIO
] += 1;
1851 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_VIRTIO
]);
1852 set_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1853 spin_unlock(&fi
->lock
);
1857 static int __inject_pfault_done(struct kvm
*kvm
,
1858 struct kvm_s390_interrupt_info
*inti
)
1860 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1862 kvm
->stat
.inject_pfault_done
++;
1863 spin_lock(&fi
->lock
);
1864 if (fi
->counters
[FIRQ_CNTR_PFAULT
] >=
1865 (ASYNC_PF_PER_VCPU
* KVM_MAX_VCPUS
)) {
1866 spin_unlock(&fi
->lock
);
1869 fi
->counters
[FIRQ_CNTR_PFAULT
] += 1;
1870 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_PFAULT
]);
1871 set_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1872 spin_unlock(&fi
->lock
);
1876 #define CR_PENDING_SUBCLASS 28
1877 static int __inject_float_mchk(struct kvm
*kvm
,
1878 struct kvm_s390_interrupt_info
*inti
)
1880 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1882 kvm
->stat
.inject_float_mchk
++;
1883 spin_lock(&fi
->lock
);
1884 fi
->mchk
.cr14
|= inti
->mchk
.cr14
& (1UL << CR_PENDING_SUBCLASS
);
1885 fi
->mchk
.mcic
|= inti
->mchk
.mcic
;
1886 set_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
);
1887 spin_unlock(&fi
->lock
);
1892 static int __inject_io(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1894 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
1895 struct kvm_s390_float_interrupt
*fi
;
1896 struct list_head
*list
;
1899 kvm
->stat
.inject_io
++;
1900 isc
= int_word_to_isc(inti
->io
.io_int_word
);
1903 * Do not make use of gisa in protected mode. We do not use the lock
1904 * checking variant as this is just a performance optimization and we
1905 * do not hold the lock here. This is ok as the code will pick
1906 * interrupts from both "lists" for delivery.
1908 if (!kvm_s390_pv_get_handle(kvm
) &&
1909 gi
->origin
&& inti
->type
& KVM_S390_INT_IO_AI_MASK
) {
1910 VM_EVENT(kvm
, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc
);
1911 gisa_set_ipm_gisc(gi
->origin
, isc
);
1916 fi
= &kvm
->arch
.float_int
;
1917 spin_lock(&fi
->lock
);
1918 if (fi
->counters
[FIRQ_CNTR_IO
] >= KVM_S390_MAX_FLOAT_IRQS
) {
1919 spin_unlock(&fi
->lock
);
1922 fi
->counters
[FIRQ_CNTR_IO
] += 1;
1924 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
1925 VM_EVENT(kvm
, 4, "%s", "inject: I/O (AI)");
1927 VM_EVENT(kvm
, 4, "inject: I/O %x ss %x schid %04x",
1928 inti
->io
.subchannel_id
>> 8,
1929 inti
->io
.subchannel_id
>> 1 & 0x3,
1930 inti
->io
.subchannel_nr
);
1931 list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1932 list_add_tail(&inti
->list
, list
);
1933 set_bit(isc_to_irq_type(isc
), &fi
->pending_irqs
);
1934 spin_unlock(&fi
->lock
);
1939 * Find a destination VCPU for a floating irq and kick it.
1941 static void __floating_irq_kick(struct kvm
*kvm
, u64 type
)
1943 struct kvm_vcpu
*dst_vcpu
;
1944 int sigcpu
, online_vcpus
, nr_tries
= 0;
1946 online_vcpus
= atomic_read(&kvm
->online_vcpus
);
1950 /* find idle VCPUs first, then round robin */
1951 sigcpu
= find_first_bit(kvm
->arch
.idle_mask
, online_vcpus
);
1952 if (sigcpu
== online_vcpus
) {
1954 sigcpu
= kvm
->arch
.float_int
.next_rr_cpu
++;
1955 kvm
->arch
.float_int
.next_rr_cpu
%= online_vcpus
;
1956 /* avoid endless loops if all vcpus are stopped */
1957 if (nr_tries
++ >= online_vcpus
)
1959 } while (is_vcpu_stopped(kvm_get_vcpu(kvm
, sigcpu
)));
1961 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1963 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1966 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_STOP_INT
);
1968 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1969 if (!(type
& KVM_S390_INT_IO_AI_MASK
&&
1970 kvm
->arch
.gisa_int
.origin
) ||
1971 kvm_s390_pv_cpu_get_handle(dst_vcpu
))
1972 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_IO_INT
);
1975 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_EXT_INT
);
1978 kvm_s390_vcpu_wakeup(dst_vcpu
);
1981 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1983 u64 type
= READ_ONCE(inti
->type
);
1988 rc
= __inject_float_mchk(kvm
, inti
);
1990 case KVM_S390_INT_VIRTIO
:
1991 rc
= __inject_virtio(kvm
, inti
);
1993 case KVM_S390_INT_SERVICE
:
1994 rc
= __inject_service(kvm
, inti
);
1996 case KVM_S390_INT_PFAULT_DONE
:
1997 rc
= __inject_pfault_done(kvm
, inti
);
1999 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
2000 rc
= __inject_io(kvm
, inti
);
2008 __floating_irq_kick(kvm
, type
);
2012 int kvm_s390_inject_vm(struct kvm
*kvm
,
2013 struct kvm_s390_interrupt
*s390int
)
2015 struct kvm_s390_interrupt_info
*inti
;
2018 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
2022 inti
->type
= s390int
->type
;
2023 switch (inti
->type
) {
2024 case KVM_S390_INT_VIRTIO
:
2025 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
2026 s390int
->parm
, s390int
->parm64
);
2027 inti
->ext
.ext_params
= s390int
->parm
;
2028 inti
->ext
.ext_params2
= s390int
->parm64
;
2030 case KVM_S390_INT_SERVICE
:
2031 VM_EVENT(kvm
, 4, "inject: sclp parm:%x", s390int
->parm
);
2032 inti
->ext
.ext_params
= s390int
->parm
;
2034 case KVM_S390_INT_PFAULT_DONE
:
2035 inti
->ext
.ext_params2
= s390int
->parm64
;
2038 VM_EVENT(kvm
, 3, "inject: machine check mcic 0x%llx",
2040 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
2041 inti
->mchk
.mcic
= s390int
->parm64
;
2043 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
2044 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
2045 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
2046 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
2047 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
2053 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
2056 rc
= __inject_vm(kvm
, inti
);
2062 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
2063 struct kvm_s390_interrupt_info
*inti
)
2065 return __inject_vm(kvm
, inti
);
2068 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
2069 struct kvm_s390_irq
*irq
)
2071 irq
->type
= s390int
->type
;
2072 switch (irq
->type
) {
2073 case KVM_S390_PROGRAM_INT
:
2074 if (s390int
->parm
& 0xffff0000)
2076 irq
->u
.pgm
.code
= s390int
->parm
;
2078 case KVM_S390_SIGP_SET_PREFIX
:
2079 irq
->u
.prefix
.address
= s390int
->parm
;
2081 case KVM_S390_SIGP_STOP
:
2082 irq
->u
.stop
.flags
= s390int
->parm
;
2084 case KVM_S390_INT_EXTERNAL_CALL
:
2085 if (s390int
->parm
& 0xffff0000)
2087 irq
->u
.extcall
.code
= s390int
->parm
;
2089 case KVM_S390_INT_EMERGENCY
:
2090 if (s390int
->parm
& 0xffff0000)
2092 irq
->u
.emerg
.code
= s390int
->parm
;
2095 irq
->u
.mchk
.mcic
= s390int
->parm64
;
2097 case KVM_S390_INT_PFAULT_INIT
:
2098 irq
->u
.ext
.ext_params
= s390int
->parm
;
2099 irq
->u
.ext
.ext_params2
= s390int
->parm64
;
2101 case KVM_S390_RESTART
:
2102 case KVM_S390_INT_CLOCK_COMP
:
2103 case KVM_S390_INT_CPU_TIMER
:
2111 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
2113 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2115 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
2118 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
2120 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2122 spin_lock(&li
->lock
);
2123 li
->irq
.stop
.flags
= 0;
2124 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
2125 spin_unlock(&li
->lock
);
2128 static int do_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
2132 switch (irq
->type
) {
2133 case KVM_S390_PROGRAM_INT
:
2134 rc
= __inject_prog(vcpu
, irq
);
2136 case KVM_S390_SIGP_SET_PREFIX
:
2137 rc
= __inject_set_prefix(vcpu
, irq
);
2139 case KVM_S390_SIGP_STOP
:
2140 rc
= __inject_sigp_stop(vcpu
, irq
);
2142 case KVM_S390_RESTART
:
2143 rc
= __inject_sigp_restart(vcpu
);
2145 case KVM_S390_INT_CLOCK_COMP
:
2146 rc
= __inject_ckc(vcpu
);
2148 case KVM_S390_INT_CPU_TIMER
:
2149 rc
= __inject_cpu_timer(vcpu
);
2151 case KVM_S390_INT_EXTERNAL_CALL
:
2152 rc
= __inject_extcall(vcpu
, irq
);
2154 case KVM_S390_INT_EMERGENCY
:
2155 rc
= __inject_sigp_emergency(vcpu
, irq
);
2158 rc
= __inject_mchk(vcpu
, irq
);
2160 case KVM_S390_INT_PFAULT_INIT
:
2161 rc
= __inject_pfault_init(vcpu
, irq
);
2163 case KVM_S390_INT_VIRTIO
:
2164 case KVM_S390_INT_SERVICE
:
2165 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
2173 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
2175 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2178 spin_lock(&li
->lock
);
2179 rc
= do_inject_vcpu(vcpu
, irq
);
2180 spin_unlock(&li
->lock
);
2182 kvm_s390_vcpu_wakeup(vcpu
);
2186 static inline void clear_irq_list(struct list_head
*_list
)
2188 struct kvm_s390_interrupt_info
*inti
, *n
;
2190 list_for_each_entry_safe(inti
, n
, _list
, list
) {
2191 list_del(&inti
->list
);
2196 static void inti_to_irq(struct kvm_s390_interrupt_info
*inti
,
2197 struct kvm_s390_irq
*irq
)
2199 irq
->type
= inti
->type
;
2200 switch (inti
->type
) {
2201 case KVM_S390_INT_PFAULT_INIT
:
2202 case KVM_S390_INT_PFAULT_DONE
:
2203 case KVM_S390_INT_VIRTIO
:
2204 irq
->u
.ext
= inti
->ext
;
2206 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
2207 irq
->u
.io
= inti
->io
;
2212 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
2214 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2217 mutex_lock(&kvm
->lock
);
2218 if (!kvm_s390_pv_is_protected(kvm
))
2219 fi
->masked_irqs
= 0;
2220 mutex_unlock(&kvm
->lock
);
2221 spin_lock(&fi
->lock
);
2222 fi
->pending_irqs
= 0;
2223 memset(&fi
->srv_signal
, 0, sizeof(fi
->srv_signal
));
2224 memset(&fi
->mchk
, 0, sizeof(fi
->mchk
));
2225 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
2226 clear_irq_list(&fi
->lists
[i
]);
2227 for (i
= 0; i
< FIRQ_MAX_COUNT
; i
++)
2228 fi
->counters
[i
] = 0;
2229 spin_unlock(&fi
->lock
);
2230 kvm_s390_gisa_clear(kvm
);
2233 static int get_all_floating_irqs(struct kvm
*kvm
, u8 __user
*usrbuf
, u64 len
)
2235 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
2236 struct kvm_s390_interrupt_info
*inti
;
2237 struct kvm_s390_float_interrupt
*fi
;
2238 struct kvm_s390_irq
*buf
;
2239 struct kvm_s390_irq
*irq
;
2245 if (len
> KVM_S390_FLIC_MAX_BUFFER
|| len
== 0)
2249 * We are already using -ENOMEM to signal
2250 * userspace it may retry with a bigger buffer,
2251 * so we need to use something else for this case
2257 max_irqs
= len
/ sizeof(struct kvm_s390_irq
);
2259 if (gi
->origin
&& gisa_get_ipm(gi
->origin
)) {
2260 for (i
= 0; i
<= MAX_ISC
; i
++) {
2261 if (n
== max_irqs
) {
2262 /* signal userspace to try again */
2266 if (gisa_tac_ipm_gisc(gi
->origin
, i
)) {
2267 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2268 irq
->type
= KVM_S390_INT_IO(1, 0, 0, 0);
2269 irq
->u
.io
.io_int_word
= isc_to_int_word(i
);
2274 fi
= &kvm
->arch
.float_int
;
2275 spin_lock(&fi
->lock
);
2276 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++) {
2277 list_for_each_entry(inti
, &fi
->lists
[i
], list
) {
2278 if (n
== max_irqs
) {
2279 /* signal userspace to try again */
2283 inti_to_irq(inti
, &buf
[n
]);
2287 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
) ||
2288 test_bit(IRQ_PEND_EXT_SERVICE_EV
, &fi
->pending_irqs
)) {
2289 if (n
== max_irqs
) {
2290 /* signal userspace to try again */
2294 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2295 irq
->type
= KVM_S390_INT_SERVICE
;
2296 irq
->u
.ext
= fi
->srv_signal
;
2299 if (test_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
2300 if (n
== max_irqs
) {
2301 /* signal userspace to try again */
2305 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2306 irq
->type
= KVM_S390_MCHK
;
2307 irq
->u
.mchk
= fi
->mchk
;
2312 spin_unlock(&fi
->lock
);
2314 if (!ret
&& n
> 0) {
2315 if (copy_to_user(usrbuf
, buf
, sizeof(struct kvm_s390_irq
) * n
))
2320 return ret
< 0 ? ret
: n
;
2323 static int flic_ais_mode_get_all(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2325 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2326 struct kvm_s390_ais_all ais
;
2328 if (attr
->attr
< sizeof(ais
))
2331 if (!test_kvm_facility(kvm
, 72))
2334 mutex_lock(&fi
->ais_lock
);
2335 ais
.simm
= fi
->simm
;
2336 ais
.nimm
= fi
->nimm
;
2337 mutex_unlock(&fi
->ais_lock
);
2339 if (copy_to_user((void __user
*)attr
->addr
, &ais
, sizeof(ais
)))
2345 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2349 switch (attr
->group
) {
2350 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2351 r
= get_all_floating_irqs(dev
->kvm
, (u8 __user
*) attr
->addr
,
2354 case KVM_DEV_FLIC_AISM_ALL
:
2355 r
= flic_ais_mode_get_all(dev
->kvm
, attr
);
2364 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
2367 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
2368 void *target
= NULL
;
2369 void __user
*source
;
2372 if (get_user(inti
->type
, (u64 __user
*)addr
))
2375 switch (inti
->type
) {
2376 case KVM_S390_INT_PFAULT_INIT
:
2377 case KVM_S390_INT_PFAULT_DONE
:
2378 case KVM_S390_INT_VIRTIO
:
2379 case KVM_S390_INT_SERVICE
:
2380 target
= (void *) &inti
->ext
;
2381 source
= &uptr
->u
.ext
;
2382 size
= sizeof(inti
->ext
);
2384 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
2385 target
= (void *) &inti
->io
;
2386 source
= &uptr
->u
.io
;
2387 size
= sizeof(inti
->io
);
2390 target
= (void *) &inti
->mchk
;
2391 source
= &uptr
->u
.mchk
;
2392 size
= sizeof(inti
->mchk
);
2398 if (copy_from_user(target
, source
, size
))
2404 static int enqueue_floating_irq(struct kvm_device
*dev
,
2405 struct kvm_device_attr
*attr
)
2407 struct kvm_s390_interrupt_info
*inti
= NULL
;
2409 int len
= attr
->attr
;
2411 if (len
% sizeof(struct kvm_s390_irq
) != 0)
2413 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
2416 while (len
>= sizeof(struct kvm_s390_irq
)) {
2417 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
2421 r
= copy_irq_from_user(inti
, attr
->addr
);
2426 r
= __inject_vm(dev
->kvm
, inti
);
2431 len
-= sizeof(struct kvm_s390_irq
);
2432 attr
->addr
+= sizeof(struct kvm_s390_irq
);
2438 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
2440 if (id
>= MAX_S390_IO_ADAPTERS
)
2442 id
= array_index_nospec(id
, MAX_S390_IO_ADAPTERS
);
2443 return kvm
->arch
.adapters
[id
];
2446 static int register_io_adapter(struct kvm_device
*dev
,
2447 struct kvm_device_attr
*attr
)
2449 struct s390_io_adapter
*adapter
;
2450 struct kvm_s390_io_adapter adapter_info
;
2452 if (copy_from_user(&adapter_info
,
2453 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
2456 if (adapter_info
.id
>= MAX_S390_IO_ADAPTERS
)
2459 adapter_info
.id
= array_index_nospec(adapter_info
.id
,
2460 MAX_S390_IO_ADAPTERS
);
2462 if (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
)
2465 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
2469 adapter
->id
= adapter_info
.id
;
2470 adapter
->isc
= adapter_info
.isc
;
2471 adapter
->maskable
= adapter_info
.maskable
;
2472 adapter
->masked
= false;
2473 adapter
->swap
= adapter_info
.swap
;
2474 adapter
->suppressible
= (adapter_info
.flags
) &
2475 KVM_S390_ADAPTER_SUPPRESSIBLE
;
2476 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
2481 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
2484 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2486 if (!adapter
|| !adapter
->maskable
)
2488 ret
= adapter
->masked
;
2489 adapter
->masked
= masked
;
2493 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
2497 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++)
2498 kfree(kvm
->arch
.adapters
[i
]);
2501 static int modify_io_adapter(struct kvm_device
*dev
,
2502 struct kvm_device_attr
*attr
)
2504 struct kvm_s390_io_adapter_req req
;
2505 struct s390_io_adapter
*adapter
;
2508 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2511 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
2515 case KVM_S390_IO_ADAPTER_MASK
:
2516 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
2521 * The following operations are no longer needed and therefore no-ops.
2522 * The gpa to hva translation is done when an IRQ route is set up. The
2523 * set_irq code uses get_user_pages_remote() to do the actual write.
2525 case KVM_S390_IO_ADAPTER_MAP
:
2526 case KVM_S390_IO_ADAPTER_UNMAP
:
2536 static int clear_io_irq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2539 const u64 isc_mask
= 0xffUL
<< 24; /* all iscs set */
2544 if (attr
->attr
!= sizeof(schid
))
2546 if (copy_from_user(&schid
, (void __user
*) attr
->addr
, sizeof(schid
)))
2550 kfree(kvm_s390_get_io_int(kvm
, isc_mask
, schid
));
2552 * If userspace is conforming to the architecture, we can have at most
2553 * one pending I/O interrupt per subchannel, so this is effectively a
2559 static int modify_ais_mode(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2561 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2562 struct kvm_s390_ais_req req
;
2565 if (!test_kvm_facility(kvm
, 72))
2568 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2571 if (req
.isc
> MAX_ISC
)
2574 trace_kvm_s390_modify_ais_mode(req
.isc
,
2575 (fi
->simm
& AIS_MODE_MASK(req
.isc
)) ?
2576 (fi
->nimm
& AIS_MODE_MASK(req
.isc
)) ?
2577 2 : KVM_S390_AIS_MODE_SINGLE
:
2578 KVM_S390_AIS_MODE_ALL
, req
.mode
);
2580 mutex_lock(&fi
->ais_lock
);
2582 case KVM_S390_AIS_MODE_ALL
:
2583 fi
->simm
&= ~AIS_MODE_MASK(req
.isc
);
2584 fi
->nimm
&= ~AIS_MODE_MASK(req
.isc
);
2586 case KVM_S390_AIS_MODE_SINGLE
:
2587 fi
->simm
|= AIS_MODE_MASK(req
.isc
);
2588 fi
->nimm
&= ~AIS_MODE_MASK(req
.isc
);
2593 mutex_unlock(&fi
->ais_lock
);
2598 static int kvm_s390_inject_airq(struct kvm
*kvm
,
2599 struct s390_io_adapter
*adapter
)
2601 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2602 struct kvm_s390_interrupt s390int
= {
2603 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
2605 .parm64
= isc_to_int_word(adapter
->isc
),
2609 if (!test_kvm_facility(kvm
, 72) || !adapter
->suppressible
)
2610 return kvm_s390_inject_vm(kvm
, &s390int
);
2612 mutex_lock(&fi
->ais_lock
);
2613 if (fi
->nimm
& AIS_MODE_MASK(adapter
->isc
)) {
2614 trace_kvm_s390_airq_suppressed(adapter
->id
, adapter
->isc
);
2618 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
2619 if (!ret
&& (fi
->simm
& AIS_MODE_MASK(adapter
->isc
))) {
2620 fi
->nimm
|= AIS_MODE_MASK(adapter
->isc
);
2621 trace_kvm_s390_modify_ais_mode(adapter
->isc
,
2622 KVM_S390_AIS_MODE_SINGLE
, 2);
2625 mutex_unlock(&fi
->ais_lock
);
2629 static int flic_inject_airq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2631 unsigned int id
= attr
->attr
;
2632 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2637 return kvm_s390_inject_airq(kvm
, adapter
);
2640 static int flic_ais_mode_set_all(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2642 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2643 struct kvm_s390_ais_all ais
;
2645 if (!test_kvm_facility(kvm
, 72))
2648 if (copy_from_user(&ais
, (void __user
*)attr
->addr
, sizeof(ais
)))
2651 mutex_lock(&fi
->ais_lock
);
2652 fi
->simm
= ais
.simm
;
2653 fi
->nimm
= ais
.nimm
;
2654 mutex_unlock(&fi
->ais_lock
);
2659 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2663 struct kvm_vcpu
*vcpu
;
2665 switch (attr
->group
) {
2666 case KVM_DEV_FLIC_ENQUEUE
:
2667 r
= enqueue_floating_irq(dev
, attr
);
2669 case KVM_DEV_FLIC_CLEAR_IRQS
:
2670 kvm_s390_clear_float_irqs(dev
->kvm
);
2672 case KVM_DEV_FLIC_APF_ENABLE
:
2673 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
2675 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2676 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
2678 * Make sure no async faults are in transition when
2679 * clearing the queues. So we don't need to worry
2680 * about late coming workers.
2682 synchronize_srcu(&dev
->kvm
->srcu
);
2683 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
2684 kvm_clear_async_pf_completion_queue(vcpu
);
2686 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2687 r
= register_io_adapter(dev
, attr
);
2689 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2690 r
= modify_io_adapter(dev
, attr
);
2692 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2693 r
= clear_io_irq(dev
->kvm
, attr
);
2695 case KVM_DEV_FLIC_AISM
:
2696 r
= modify_ais_mode(dev
->kvm
, attr
);
2698 case KVM_DEV_FLIC_AIRQ_INJECT
:
2699 r
= flic_inject_airq(dev
->kvm
, attr
);
2701 case KVM_DEV_FLIC_AISM_ALL
:
2702 r
= flic_ais_mode_set_all(dev
->kvm
, attr
);
2711 static int flic_has_attr(struct kvm_device
*dev
,
2712 struct kvm_device_attr
*attr
)
2714 switch (attr
->group
) {
2715 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2716 case KVM_DEV_FLIC_ENQUEUE
:
2717 case KVM_DEV_FLIC_CLEAR_IRQS
:
2718 case KVM_DEV_FLIC_APF_ENABLE
:
2719 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2720 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2721 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2722 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2723 case KVM_DEV_FLIC_AISM
:
2724 case KVM_DEV_FLIC_AIRQ_INJECT
:
2725 case KVM_DEV_FLIC_AISM_ALL
:
2731 static int flic_create(struct kvm_device
*dev
, u32 type
)
2735 if (dev
->kvm
->arch
.flic
)
2737 dev
->kvm
->arch
.flic
= dev
;
2741 static void flic_destroy(struct kvm_device
*dev
)
2743 dev
->kvm
->arch
.flic
= NULL
;
2747 /* s390 floating irq controller (flic) */
2748 struct kvm_device_ops kvm_flic_ops
= {
2750 .get_attr
= flic_get_attr
,
2751 .set_attr
= flic_set_attr
,
2752 .has_attr
= flic_has_attr
,
2753 .create
= flic_create
,
2754 .destroy
= flic_destroy
,
2757 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
2761 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
2763 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
2766 static struct page
*get_map_page(struct kvm
*kvm
, u64 uaddr
)
2768 struct page
*page
= NULL
;
2770 down_read(&kvm
->mm
->mmap_sem
);
2771 get_user_pages_remote(NULL
, kvm
->mm
, uaddr
, 1, FOLL_WRITE
,
2773 up_read(&kvm
->mm
->mmap_sem
);
2777 static int adapter_indicators_set(struct kvm
*kvm
,
2778 struct s390_io_adapter
*adapter
,
2779 struct kvm_s390_adapter_int
*adapter_int
)
2782 int summary_set
, idx
;
2783 struct page
*ind_page
, *summary_page
;
2786 ind_page
= get_map_page(kvm
, adapter_int
->ind_addr
);
2789 summary_page
= get_map_page(kvm
, adapter_int
->summary_addr
);
2790 if (!summary_page
) {
2795 idx
= srcu_read_lock(&kvm
->srcu
);
2796 map
= page_address(ind_page
);
2797 bit
= get_ind_bit(adapter_int
->ind_addr
,
2798 adapter_int
->ind_offset
, adapter
->swap
);
2800 mark_page_dirty(kvm
, adapter_int
->ind_addr
>> PAGE_SHIFT
);
2801 set_page_dirty_lock(ind_page
);
2802 map
= page_address(summary_page
);
2803 bit
= get_ind_bit(adapter_int
->summary_addr
,
2804 adapter_int
->summary_offset
, adapter
->swap
);
2805 summary_set
= test_and_set_bit(bit
, map
);
2806 mark_page_dirty(kvm
, adapter_int
->summary_addr
>> PAGE_SHIFT
);
2807 set_page_dirty_lock(summary_page
);
2808 srcu_read_unlock(&kvm
->srcu
, idx
);
2811 put_page(summary_page
);
2812 return summary_set
? 0 : 1;
2816 * < 0 - not injected due to error
2817 * = 0 - coalesced, summary indicator already active
2818 * > 0 - injected interrupt
2820 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
2821 struct kvm
*kvm
, int irq_source_id
, int level
,
2825 struct s390_io_adapter
*adapter
;
2827 /* We're only interested in the 0->1 transition. */
2830 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
2833 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
2834 if ((ret
> 0) && !adapter
->masked
) {
2835 ret
= kvm_s390_inject_airq(kvm
, adapter
);
2843 * Inject the machine check to the guest.
2845 void kvm_s390_reinject_machine_check(struct kvm_vcpu
*vcpu
,
2846 struct mcck_volatile_info
*mcck_info
)
2848 struct kvm_s390_interrupt_info inti
;
2849 struct kvm_s390_irq irq
;
2850 struct kvm_s390_mchk_info
*mchk
;
2852 __u64 cr14
= 0; /* upper bits are not used */
2855 mci
.val
= mcck_info
->mcic
;
2857 cr14
|= CR14_RECOVERY_SUBMASK
;
2859 cr14
|= CR14_DEGRADATION_SUBMASK
;
2861 cr14
|= CR14_WARNING_SUBMASK
;
2863 mchk
= mci
.ck
? &inti
.mchk
: &irq
.u
.mchk
;
2865 mchk
->mcic
= mcck_info
->mcic
;
2866 mchk
->ext_damage_code
= mcck_info
->ext_damage_code
;
2867 mchk
->failing_storage_address
= mcck_info
->failing_storage_address
;
2869 /* Inject the floating machine check */
2870 inti
.type
= KVM_S390_MCHK
;
2871 rc
= __inject_vm(vcpu
->kvm
, &inti
);
2873 /* Inject the machine check to specified vcpu */
2874 irq
.type
= KVM_S390_MCHK
;
2875 rc
= kvm_s390_inject_vcpu(vcpu
, &irq
);
2880 int kvm_set_routing_entry(struct kvm
*kvm
,
2881 struct kvm_kernel_irq_routing_entry
*e
,
2882 const struct kvm_irq_routing_entry
*ue
)
2887 /* we store the userspace addresses instead of the guest addresses */
2888 case KVM_IRQ_ROUTING_S390_ADAPTER
:
2889 e
->set
= set_adapter_int
;
2890 uaddr
= gmap_translate(kvm
->arch
.gmap
, ue
->u
.adapter
.summary_addr
);
2891 if (uaddr
== -EFAULT
)
2893 e
->adapter
.summary_addr
= uaddr
;
2894 uaddr
= gmap_translate(kvm
->arch
.gmap
, ue
->u
.adapter
.ind_addr
);
2895 if (uaddr
== -EFAULT
)
2897 e
->adapter
.ind_addr
= uaddr
;
2898 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
2899 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
2900 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
2907 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
2908 int irq_source_id
, int level
, bool line_status
)
2913 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
, void __user
*irqstate
, int len
)
2915 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2916 struct kvm_s390_irq
*buf
;
2924 if (copy_from_user((void *) buf
, irqstate
, len
)) {
2930 * Don't allow setting the interrupt state
2931 * when there are already interrupts pending
2933 spin_lock(&li
->lock
);
2934 if (li
->pending_irqs
) {
2939 for (n
= 0; n
< len
/ sizeof(*buf
); n
++) {
2940 r
= do_inject_vcpu(vcpu
, &buf
[n
]);
2946 spin_unlock(&li
->lock
);
2953 static void store_local_irq(struct kvm_s390_local_interrupt
*li
,
2954 struct kvm_s390_irq
*irq
,
2955 unsigned long irq_type
)
2958 case IRQ_PEND_MCHK_EX
:
2959 case IRQ_PEND_MCHK_REP
:
2960 irq
->type
= KVM_S390_MCHK
;
2961 irq
->u
.mchk
= li
->irq
.mchk
;
2964 irq
->type
= KVM_S390_PROGRAM_INT
;
2965 irq
->u
.pgm
= li
->irq
.pgm
;
2967 case IRQ_PEND_PFAULT_INIT
:
2968 irq
->type
= KVM_S390_INT_PFAULT_INIT
;
2969 irq
->u
.ext
= li
->irq
.ext
;
2971 case IRQ_PEND_EXT_EXTERNAL
:
2972 irq
->type
= KVM_S390_INT_EXTERNAL_CALL
;
2973 irq
->u
.extcall
= li
->irq
.extcall
;
2975 case IRQ_PEND_EXT_CLOCK_COMP
:
2976 irq
->type
= KVM_S390_INT_CLOCK_COMP
;
2978 case IRQ_PEND_EXT_CPU_TIMER
:
2979 irq
->type
= KVM_S390_INT_CPU_TIMER
;
2981 case IRQ_PEND_SIGP_STOP
:
2982 irq
->type
= KVM_S390_SIGP_STOP
;
2983 irq
->u
.stop
= li
->irq
.stop
;
2985 case IRQ_PEND_RESTART
:
2986 irq
->type
= KVM_S390_RESTART
;
2988 case IRQ_PEND_SET_PREFIX
:
2989 irq
->type
= KVM_S390_SIGP_SET_PREFIX
;
2990 irq
->u
.prefix
= li
->irq
.prefix
;
2995 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
, __u8 __user
*buf
, int len
)
2998 DECLARE_BITMAP(sigp_emerg_pending
, KVM_MAX_VCPUS
);
2999 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
3000 unsigned long pending_irqs
;
3001 struct kvm_s390_irq irq
;
3002 unsigned long irq_type
;
3006 spin_lock(&li
->lock
);
3007 pending_irqs
= li
->pending_irqs
;
3008 memcpy(&sigp_emerg_pending
, &li
->sigp_emerg_pending
,
3009 sizeof(sigp_emerg_pending
));
3010 spin_unlock(&li
->lock
);
3012 for_each_set_bit(irq_type
, &pending_irqs
, IRQ_PEND_COUNT
) {
3013 memset(&irq
, 0, sizeof(irq
));
3014 if (irq_type
== IRQ_PEND_EXT_EMERGENCY
)
3016 if (n
+ sizeof(irq
) > len
)
3018 store_local_irq(&vcpu
->arch
.local_int
, &irq
, irq_type
);
3019 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
3024 if (test_bit(IRQ_PEND_EXT_EMERGENCY
, &pending_irqs
)) {
3025 for_each_set_bit(cpuaddr
, sigp_emerg_pending
, KVM_MAX_VCPUS
) {
3026 memset(&irq
, 0, sizeof(irq
));
3027 if (n
+ sizeof(irq
) > len
)
3029 irq
.type
= KVM_S390_INT_EMERGENCY
;
3030 irq
.u
.emerg
.code
= cpuaddr
;
3031 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
3037 if (sca_ext_call_pending(vcpu
, &scn
)) {
3038 if (n
+ sizeof(irq
) > len
)
3040 memset(&irq
, 0, sizeof(irq
));
3041 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
3042 irq
.u
.extcall
.code
= scn
;
3043 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
3051 static void __airqs_kick_single_vcpu(struct kvm
*kvm
, u8 deliverable_mask
)
3053 int vcpu_id
, online_vcpus
= atomic_read(&kvm
->online_vcpus
);
3054 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
3055 struct kvm_vcpu
*vcpu
;
3057 for_each_set_bit(vcpu_id
, kvm
->arch
.idle_mask
, online_vcpus
) {
3058 vcpu
= kvm_get_vcpu(kvm
, vcpu_id
);
3059 if (psw_ioint_disabled(vcpu
))
3061 deliverable_mask
&= (u8
)(vcpu
->arch
.sie_block
->gcr
[6] >> 24);
3062 if (deliverable_mask
) {
3063 /* lately kicked but not yet running */
3064 if (test_and_set_bit(vcpu_id
, gi
->kicked_mask
))
3066 kvm_s390_vcpu_wakeup(vcpu
);
3072 static enum hrtimer_restart
gisa_vcpu_kicker(struct hrtimer
*timer
)
3074 struct kvm_s390_gisa_interrupt
*gi
=
3075 container_of(timer
, struct kvm_s390_gisa_interrupt
, timer
);
3077 container_of(gi
->origin
, struct sie_page2
, gisa
)->kvm
;
3080 pending_mask
= gisa_get_ipm_or_restore_iam(gi
);
3082 __airqs_kick_single_vcpu(kvm
, pending_mask
);
3083 hrtimer_forward_now(timer
, ns_to_ktime(gi
->expires
));
3084 return HRTIMER_RESTART
;
3087 return HRTIMER_NORESTART
;
3090 #define NULL_GISA_ADDR 0x00000000UL
3091 #define NONE_GISA_ADDR 0x00000001UL
3092 #define GISA_ADDR_MASK 0xfffff000UL
3094 static void process_gib_alert_list(void)
3096 struct kvm_s390_gisa_interrupt
*gi
;
3097 struct kvm_s390_gisa
*gisa
;
3099 u32 final
, origin
= 0UL;
3103 * If the NONE_GISA_ADDR is still stored in the alert list
3104 * origin, we will leave the outer loop. No further GISA has
3105 * been added to the alert list by millicode while processing
3106 * the current alert list.
3108 final
= (origin
& NONE_GISA_ADDR
);
3110 * Cut off the alert list and store the NONE_GISA_ADDR in the
3111 * alert list origin to avoid further GAL interruptions.
3112 * A new alert list can be build up by millicode in parallel
3113 * for guests not in the yet cut-off alert list. When in the
3114 * final loop, store the NULL_GISA_ADDR instead. This will re-
3115 * enable GAL interruptions on the host again.
3117 origin
= xchg(&gib
->alert_list_origin
,
3118 (!final
) ? NONE_GISA_ADDR
: NULL_GISA_ADDR
);
3120 * Loop through the just cut-off alert list and start the
3121 * gisa timers to kick idle vcpus to consume the pending
3122 * interruptions asap.
3124 while (origin
& GISA_ADDR_MASK
) {
3125 gisa
= (struct kvm_s390_gisa
*)(u64
)origin
;
3126 origin
= gisa
->next_alert
;
3127 gisa
->next_alert
= (u32
)(u64
)gisa
;
3128 kvm
= container_of(gisa
, struct sie_page2
, gisa
)->kvm
;
3129 gi
= &kvm
->arch
.gisa_int
;
3130 if (hrtimer_active(&gi
->timer
))
3131 hrtimer_cancel(&gi
->timer
);
3132 hrtimer_start(&gi
->timer
, 0, HRTIMER_MODE_REL
);
3138 void kvm_s390_gisa_clear(struct kvm
*kvm
)
3140 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
3144 gisa_clear_ipm(gi
->origin
);
3145 VM_EVENT(kvm
, 3, "gisa 0x%pK cleared", gi
->origin
);
3148 void kvm_s390_gisa_init(struct kvm
*kvm
)
3150 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
3152 if (!css_general_characteristics
.aiv
)
3154 gi
->origin
= &kvm
->arch
.sie_page2
->gisa
;
3156 spin_lock_init(&gi
->alert
.ref_lock
);
3157 gi
->expires
= 50 * 1000; /* 50 usec */
3158 hrtimer_init(&gi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
3159 gi
->timer
.function
= gisa_vcpu_kicker
;
3160 memset(gi
->origin
, 0, sizeof(struct kvm_s390_gisa
));
3161 gi
->origin
->next_alert
= (u32
)(u64
)gi
->origin
;
3162 VM_EVENT(kvm
, 3, "gisa 0x%pK initialized", gi
->origin
);
3165 void kvm_s390_gisa_destroy(struct kvm
*kvm
)
3167 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
3172 KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
3173 kvm
, gi
->alert
.mask
);
3174 while (gisa_in_alert_list(gi
->origin
))
3176 hrtimer_cancel(&gi
->timer
);
3181 * kvm_s390_gisc_register - register a guest ISC
3183 * @kvm: the kernel vm to work with
3184 * @gisc: the guest interruption sub class to register
3186 * The function extends the vm specific alert mask to use.
3187 * The effective IAM mask in the GISA is updated as well
3188 * in case the GISA is not part of the GIB alert list.
3189 * It will be updated latest when the IAM gets restored
3190 * by gisa_get_ipm_or_restore_iam().
3192 * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3193 * has registered with the channel subsystem.
3194 * -ENODEV in case the vm uses no GISA
3195 * -ERANGE in case the guest ISC is invalid
3197 int kvm_s390_gisc_register(struct kvm
*kvm
, u32 gisc
)
3199 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
3206 spin_lock(&gi
->alert
.ref_lock
);
3207 gi
->alert
.ref_count
[gisc
]++;
3208 if (gi
->alert
.ref_count
[gisc
] == 1) {
3209 gi
->alert
.mask
|= 0x80 >> gisc
;
3210 gisa_set_iam(gi
->origin
, gi
->alert
.mask
);
3212 spin_unlock(&gi
->alert
.ref_lock
);
3216 EXPORT_SYMBOL_GPL(kvm_s390_gisc_register
);
3219 * kvm_s390_gisc_unregister - unregister a guest ISC
3221 * @kvm: the kernel vm to work with
3222 * @gisc: the guest interruption sub class to register
3224 * The function reduces the vm specific alert mask to use.
3225 * The effective IAM mask in the GISA is updated as well
3226 * in case the GISA is not part of the GIB alert list.
3227 * It will be updated latest when the IAM gets restored
3228 * by gisa_get_ipm_or_restore_iam().
3230 * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3231 * has registered with the channel subsystem.
3232 * -ENODEV in case the vm uses no GISA
3233 * -ERANGE in case the guest ISC is invalid
3234 * -EINVAL in case the guest ISC is not registered
3236 int kvm_s390_gisc_unregister(struct kvm
*kvm
, u32 gisc
)
3238 struct kvm_s390_gisa_interrupt
*gi
= &kvm
->arch
.gisa_int
;
3246 spin_lock(&gi
->alert
.ref_lock
);
3247 if (gi
->alert
.ref_count
[gisc
] == 0) {
3251 gi
->alert
.ref_count
[gisc
]--;
3252 if (gi
->alert
.ref_count
[gisc
] == 0) {
3253 gi
->alert
.mask
&= ~(0x80 >> gisc
);
3254 gisa_set_iam(gi
->origin
, gi
->alert
.mask
);
3257 spin_unlock(&gi
->alert
.ref_lock
);
3261 EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister
);
3263 static void gib_alert_irq_handler(struct airq_struct
*airq
, bool floating
)
3265 inc_irq_stat(IRQIO_GAL
);
3266 process_gib_alert_list();
3269 static struct airq_struct gib_alert_irq
= {
3270 .handler
= gib_alert_irq_handler
,
3271 .lsi_ptr
= &gib_alert_irq
.lsi_mask
,
3274 void kvm_s390_gib_destroy(void)
3279 unregister_adapter_interrupt(&gib_alert_irq
);
3280 free_page((unsigned long)gib
);
3284 int kvm_s390_gib_init(u8 nisc
)
3288 if (!css_general_characteristics
.aiv
) {
3289 KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
3293 gib
= (struct kvm_s390_gib
*)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
3299 gib_alert_irq
.isc
= nisc
;
3300 if (register_adapter_interrupt(&gib_alert_irq
)) {
3301 pr_err("Registering the GIB alert interruption handler failed\n");
3307 if (chsc_sgib((u32
)(u64
)gib
)) {
3308 pr_err("Associating the GIB with the AIV facility failed\n");
3309 free_page((unsigned long)gib
);
3315 KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib
, gib
->nisc
);
3319 unregister_adapter_interrupt(&gib_alert_irq
);
3321 free_page((unsigned long)gib
);