1 // SPDX-License-Identifier: GPL-2.0
3 * handling kvm guest interrupts
5 * Copyright IBM Corp. 2008, 2015
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
10 #include <linux/interrupt.h>
11 #include <linux/kvm_host.h>
12 #include <linux/hrtimer.h>
13 #include <linux/mmu_context.h>
14 #include <linux/signal.h>
15 #include <linux/slab.h>
16 #include <linux/bitmap.h>
17 #include <linux/vmalloc.h>
18 #include <asm/asm-offsets.h>
20 #include <linux/uaccess.h>
24 #include <asm/switch_to.h>
28 #include "trace-s390.h"
30 #define PFAULT_INIT 0x0600
31 #define PFAULT_DONE 0x0680
32 #define VIRTIO_PARAM 0x0d00
34 /* handle external calls via sigp interpretation facility */
35 static int sca_ext_call_pending(struct kvm_vcpu
*vcpu
, int *src_id
)
39 if (!kvm_s390_test_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
))
42 BUG_ON(!kvm_s390_use_sca_entries());
43 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
44 if (vcpu
->kvm
->arch
.use_esca
) {
45 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
46 union esca_sigp_ctrl sigp_ctrl
=
47 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
52 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
53 union bsca_sigp_ctrl sigp_ctrl
=
54 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
59 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
67 static int sca_inject_ext_call(struct kvm_vcpu
*vcpu
, int src_id
)
71 BUG_ON(!kvm_s390_use_sca_entries());
72 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
73 if (vcpu
->kvm
->arch
.use_esca
) {
74 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
75 union esca_sigp_ctrl
*sigp_ctrl
=
76 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
77 union esca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
83 expect
= old_val
.value
;
84 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
86 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
87 union bsca_sigp_ctrl
*sigp_ctrl
=
88 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
89 union bsca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
95 expect
= old_val
.value
;
96 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
98 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
101 /* another external call is pending */
104 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
);
108 static void sca_clear_ext_call(struct kvm_vcpu
*vcpu
)
112 if (!kvm_s390_use_sca_entries())
114 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_ECALL_PEND
);
115 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
116 if (vcpu
->kvm
->arch
.use_esca
) {
117 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
118 union esca_sigp_ctrl
*sigp_ctrl
=
119 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
120 union esca_sigp_ctrl old
= *sigp_ctrl
;
123 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
125 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
126 union bsca_sigp_ctrl
*sigp_ctrl
=
127 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
128 union bsca_sigp_ctrl old
= *sigp_ctrl
;
131 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
133 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
134 WARN_ON(rc
!= expect
); /* cannot clear? */
137 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
139 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
142 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
144 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
147 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
149 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
152 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
154 return psw_extint_disabled(vcpu
) &&
155 psw_ioint_disabled(vcpu
) &&
156 psw_mchk_disabled(vcpu
);
159 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
161 if (psw_extint_disabled(vcpu
) ||
162 !(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
164 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
165 /* No timer interrupts when single stepping */
170 static int ckc_irq_pending(struct kvm_vcpu
*vcpu
)
172 if (vcpu
->arch
.sie_block
->ckc
>= kvm_s390_get_tod_clock_fast(vcpu
->kvm
))
174 return ckc_interrupts_enabled(vcpu
);
177 static int cpu_timer_interrupts_enabled(struct kvm_vcpu
*vcpu
)
179 return !psw_extint_disabled(vcpu
) &&
180 (vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
);
183 static int cpu_timer_irq_pending(struct kvm_vcpu
*vcpu
)
185 if (!cpu_timer_interrupts_enabled(vcpu
))
187 return kvm_s390_get_cpu_timer(vcpu
) >> 63;
190 static inline int is_ioirq(unsigned long irq_type
)
192 return ((irq_type
>= IRQ_PEND_IO_ISC_7
) &&
193 (irq_type
<= IRQ_PEND_IO_ISC_0
));
196 static uint64_t isc_to_isc_bits(int isc
)
198 return (0x80 >> isc
) << 24;
201 static inline u32
isc_to_int_word(u8 isc
)
203 return ((u32
)isc
<< 27) | 0x80000000;
206 static inline u8
int_word_to_isc(u32 int_word
)
208 return (int_word
& 0x38000000) >> 27;
212 * To use atomic bitmap functions, we have to provide a bitmap address
213 * that is u64 aligned. However, the ipm might be u32 aligned.
214 * Therefore, we logically start the bitmap at the very beginning of the
215 * struct and fixup the bit number.
217 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
219 static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
221 set_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
224 static inline u8
kvm_s390_gisa_get_ipm(struct kvm_s390_gisa
*gisa
)
226 return READ_ONCE(gisa
->ipm
);
229 static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
231 clear_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
234 static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa
*gisa
, u32 gisc
)
236 return test_and_clear_bit_inv(IPM_BIT_OFFSET
+ gisc
, (unsigned long *) gisa
);
239 static inline unsigned long pending_irqs(struct kvm_vcpu
*vcpu
)
241 return vcpu
->kvm
->arch
.float_int
.pending_irqs
|
242 vcpu
->arch
.local_int
.pending_irqs
|
243 kvm_s390_gisa_get_ipm(vcpu
->kvm
->arch
.gisa
) << IRQ_PEND_IO_ISC_7
;
246 static inline int isc_to_irq_type(unsigned long isc
)
248 return IRQ_PEND_IO_ISC_0
- isc
;
251 static inline int irq_type_to_isc(unsigned long irq_type
)
253 return IRQ_PEND_IO_ISC_0
- irq_type
;
256 static unsigned long disable_iscs(struct kvm_vcpu
*vcpu
,
257 unsigned long active_mask
)
261 for (i
= 0; i
<= MAX_ISC
; i
++)
262 if (!(vcpu
->arch
.sie_block
->gcr
[6] & isc_to_isc_bits(i
)))
263 active_mask
&= ~(1UL << (isc_to_irq_type(i
)));
268 static unsigned long deliverable_irqs(struct kvm_vcpu
*vcpu
)
270 unsigned long active_mask
;
272 active_mask
= pending_irqs(vcpu
);
276 if (psw_extint_disabled(vcpu
))
277 active_mask
&= ~IRQ_PEND_EXT_MASK
;
278 if (psw_ioint_disabled(vcpu
))
279 active_mask
&= ~IRQ_PEND_IO_MASK
;
281 active_mask
= disable_iscs(vcpu
, active_mask
);
282 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
283 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
284 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
))
285 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
286 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
287 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
288 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
))
289 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
290 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
291 __clear_bit(IRQ_PEND_EXT_SERVICE
, &active_mask
);
292 if (psw_mchk_disabled(vcpu
))
293 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
295 * Check both floating and local interrupt's cr14 because
296 * bit IRQ_PEND_MCHK_REP could be set in both cases.
298 if (!(vcpu
->arch
.sie_block
->gcr
[14] &
299 (vcpu
->kvm
->arch
.float_int
.mchk
.cr14
|
300 vcpu
->arch
.local_int
.irq
.mchk
.cr14
)))
301 __clear_bit(IRQ_PEND_MCHK_REP
, &active_mask
);
304 * STOP irqs will never be actively delivered. They are triggered via
305 * intercept requests and cleared when the stop intercept is performed.
307 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
312 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
314 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_WAIT
);
315 set_bit(vcpu
->vcpu_id
, vcpu
->kvm
->arch
.float_int
.idle_mask
);
318 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
320 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_WAIT
);
321 clear_bit(vcpu
->vcpu_id
, vcpu
->kvm
->arch
.float_int
.idle_mask
);
324 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
326 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
|
328 vcpu
->arch
.sie_block
->lctl
= 0x0000;
329 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
331 if (guestdbg_enabled(vcpu
)) {
332 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
333 LCTL_CR10
| LCTL_CR11
);
334 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
338 static void set_intercept_indicators_io(struct kvm_vcpu
*vcpu
)
340 if (!(pending_irqs(vcpu
) & IRQ_PEND_IO_MASK
))
342 else if (psw_ioint_disabled(vcpu
))
343 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_IO_INT
);
345 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
348 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
350 if (!(pending_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
352 if (psw_extint_disabled(vcpu
))
353 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
355 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
358 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
360 if (!(pending_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
362 if (psw_mchk_disabled(vcpu
))
363 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
365 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
368 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
370 if (kvm_s390_is_stop_irq_pending(vcpu
))
371 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOP_INT
);
374 /* Set interception request for non-deliverable interrupts */
375 static void set_intercept_indicators(struct kvm_vcpu
*vcpu
)
377 set_intercept_indicators_io(vcpu
);
378 set_intercept_indicators_ext(vcpu
);
379 set_intercept_indicators_mchk(vcpu
);
380 set_intercept_indicators_stop(vcpu
);
383 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
385 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
388 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
391 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
392 (u16
*)__LC_EXT_INT_CODE
);
393 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
394 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
395 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
396 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
397 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
398 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
399 return rc
? -EFAULT
: 0;
402 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
404 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
407 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
410 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
411 (u16 __user
*)__LC_EXT_INT_CODE
);
412 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
413 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
414 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
415 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
416 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
417 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
418 return rc
? -EFAULT
: 0;
421 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
423 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
424 struct kvm_s390_ext_info ext
;
427 spin_lock(&li
->lock
);
429 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
430 li
->irq
.ext
.ext_params2
= 0;
431 spin_unlock(&li
->lock
);
433 VCPU_EVENT(vcpu
, 4, "deliver: pfault init token 0x%llx",
435 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
436 KVM_S390_INT_PFAULT_INIT
,
439 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
440 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
441 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
442 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
443 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
444 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
445 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
446 return rc
? -EFAULT
: 0;
449 static int __write_machine_check(struct kvm_vcpu
*vcpu
,
450 struct kvm_s390_mchk_info
*mchk
)
452 unsigned long ext_sa_addr
;
454 freg_t fprs
[NUM_FPRS
];
458 mci
.val
= mchk
->mcic
;
459 /* take care of lazy register loading */
461 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
462 if (MACHINE_HAS_GS
&& vcpu
->arch
.gs_enabled
)
463 save_gs_cb(current
->thread
.gs_cb
);
465 /* Extended save area */
466 rc
= read_guest_lc(vcpu
, __LC_MCESAD
, &ext_sa_addr
,
467 sizeof(unsigned long));
468 /* Only bits 0 through 63-LC are used for address formation */
469 lc
= ext_sa_addr
& MCESA_LC_MASK
;
470 if (test_kvm_facility(vcpu
->kvm
, 133)) {
474 ext_sa_addr
&= ~0x3ffUL
;
477 ext_sa_addr
&= ~0x7ffUL
;
480 ext_sa_addr
&= ~0xfffUL
;
487 ext_sa_addr
&= ~0x3ffUL
;
490 if (!rc
&& mci
.vr
&& ext_sa_addr
&& test_kvm_facility(vcpu
->kvm
, 129)) {
491 if (write_guest_abs(vcpu
, ext_sa_addr
, vcpu
->run
->s
.regs
.vrs
,
497 if (!rc
&& mci
.gs
&& ext_sa_addr
&& test_kvm_facility(vcpu
->kvm
, 133)
498 && (lc
== 11 || lc
== 12)) {
499 if (write_guest_abs(vcpu
, ext_sa_addr
+ 1024,
500 &vcpu
->run
->s
.regs
.gscb
, 32))
506 /* General interruption information */
507 rc
|= put_guest_lc(vcpu
, 1, (u8 __user
*) __LC_AR_MODE_ID
);
508 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
509 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
510 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
511 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
512 rc
|= put_guest_lc(vcpu
, mci
.val
, (u64 __user
*) __LC_MCCK_CODE
);
514 /* Register-save areas */
515 if (MACHINE_HAS_VX
) {
516 convert_vx_to_fp(fprs
, (__vector128
*) vcpu
->run
->s
.regs
.vrs
);
517 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
, fprs
, 128);
519 rc
|= write_guest_lc(vcpu
, __LC_FPREGS_SAVE_AREA
,
520 vcpu
->run
->s
.regs
.fprs
, 128);
522 rc
|= write_guest_lc(vcpu
, __LC_GPREGS_SAVE_AREA
,
523 vcpu
->run
->s
.regs
.gprs
, 128);
524 rc
|= put_guest_lc(vcpu
, current
->thread
.fpu
.fpc
,
525 (u32 __user
*) __LC_FP_CREG_SAVE_AREA
);
526 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->todpr
,
527 (u32 __user
*) __LC_TOD_PROGREG_SAVE_AREA
);
528 rc
|= put_guest_lc(vcpu
, kvm_s390_get_cpu_timer(vcpu
),
529 (u64 __user
*) __LC_CPU_TIMER_SAVE_AREA
);
530 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->ckc
>> 8,
531 (u64 __user
*) __LC_CLOCK_COMP_SAVE_AREA
);
532 rc
|= write_guest_lc(vcpu
, __LC_AREGS_SAVE_AREA
,
533 &vcpu
->run
->s
.regs
.acrs
, 64);
534 rc
|= write_guest_lc(vcpu
, __LC_CREGS_SAVE_AREA
,
535 &vcpu
->arch
.sie_block
->gcr
, 128);
537 /* Extended interruption information */
538 rc
|= put_guest_lc(vcpu
, mchk
->ext_damage_code
,
539 (u32 __user
*) __LC_EXT_DAMAGE_CODE
);
540 rc
|= put_guest_lc(vcpu
, mchk
->failing_storage_address
,
541 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
542 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
, &mchk
->fixed_logout
,
543 sizeof(mchk
->fixed_logout
));
544 return rc
? -EFAULT
: 0;
547 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
549 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
550 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
551 struct kvm_s390_mchk_info mchk
= {};
555 spin_lock(&fi
->lock
);
556 spin_lock(&li
->lock
);
557 if (test_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
) ||
558 test_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
)) {
560 * If there was an exigent machine check pending, then any
561 * repressible machine checks that might have been pending
562 * are indicated along with it, so always clear bits for
563 * repressible and exigent interrupts
566 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
567 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
568 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
572 * We indicate floating repressible conditions along with
573 * other pending conditions. Channel Report Pending and Channel
574 * Subsystem damage are the only two and and are indicated by
575 * bits in mcic and masked in cr14.
577 if (test_and_clear_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
578 mchk
.mcic
|= fi
->mchk
.mcic
;
579 mchk
.cr14
|= fi
->mchk
.cr14
;
580 memset(&fi
->mchk
, 0, sizeof(mchk
));
583 spin_unlock(&li
->lock
);
584 spin_unlock(&fi
->lock
);
587 VCPU_EVENT(vcpu
, 3, "deliver: machine check mcic 0x%llx",
589 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
591 mchk
.cr14
, mchk
.mcic
);
592 rc
= __write_machine_check(vcpu
, &mchk
);
597 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
599 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
602 VCPU_EVENT(vcpu
, 3, "%s", "deliver: cpu restart");
603 vcpu
->stat
.deliver_restart_signal
++;
604 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
606 rc
= write_guest_lc(vcpu
,
607 offsetof(struct lowcore
, restart_old_psw
),
608 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
609 rc
|= read_guest_lc(vcpu
, offsetof(struct lowcore
, restart_psw
),
610 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
611 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
612 return rc
? -EFAULT
: 0;
615 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
617 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
618 struct kvm_s390_prefix_info prefix
;
620 spin_lock(&li
->lock
);
621 prefix
= li
->irq
.prefix
;
622 li
->irq
.prefix
.address
= 0;
623 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
624 spin_unlock(&li
->lock
);
626 vcpu
->stat
.deliver_prefix_signal
++;
627 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
628 KVM_S390_SIGP_SET_PREFIX
,
631 kvm_s390_set_prefix(vcpu
, prefix
.address
);
635 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
637 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
641 spin_lock(&li
->lock
);
642 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
643 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
644 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
645 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
646 spin_unlock(&li
->lock
);
648 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp emerg");
649 vcpu
->stat
.deliver_emergency_signal
++;
650 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
653 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
654 (u16
*)__LC_EXT_INT_CODE
);
655 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
656 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
657 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
658 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
659 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
660 return rc
? -EFAULT
: 0;
663 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
665 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
666 struct kvm_s390_extcall_info extcall
;
669 spin_lock(&li
->lock
);
670 extcall
= li
->irq
.extcall
;
671 li
->irq
.extcall
.code
= 0;
672 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
673 spin_unlock(&li
->lock
);
675 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp ext call");
676 vcpu
->stat
.deliver_external_call
++;
677 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
678 KVM_S390_INT_EXTERNAL_CALL
,
681 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
682 (u16
*)__LC_EXT_INT_CODE
);
683 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
684 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
685 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
686 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
688 return rc
? -EFAULT
: 0;
691 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
693 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
694 struct kvm_s390_pgm_info pgm_info
;
695 int rc
= 0, nullifying
= false;
698 spin_lock(&li
->lock
);
699 pgm_info
= li
->irq
.pgm
;
700 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
701 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
702 spin_unlock(&li
->lock
);
704 ilen
= pgm_info
.flags
& KVM_S390_PGM_FLAGS_ILC_MASK
;
705 VCPU_EVENT(vcpu
, 3, "deliver: program irq code 0x%x, ilen:%d",
706 pgm_info
.code
, ilen
);
707 vcpu
->stat
.deliver_program_int
++;
708 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
711 switch (pgm_info
.code
& ~PGM_PER
) {
712 case PGM_AFX_TRANSLATION
:
713 case PGM_ASX_TRANSLATION
:
714 case PGM_EX_TRANSLATION
:
715 case PGM_LFX_TRANSLATION
:
716 case PGM_LSTE_SEQUENCE
:
717 case PGM_LSX_TRANSLATION
:
718 case PGM_LX_TRANSLATION
:
719 case PGM_PRIMARY_AUTHORITY
:
720 case PGM_SECONDARY_AUTHORITY
:
723 case PGM_SPACE_SWITCH
:
724 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
725 (u64
*)__LC_TRANS_EXC_CODE
);
727 case PGM_ALEN_TRANSLATION
:
728 case PGM_ALE_SEQUENCE
:
729 case PGM_ASTE_INSTANCE
:
730 case PGM_ASTE_SEQUENCE
:
731 case PGM_ASTE_VALIDITY
:
732 case PGM_EXTENDED_AUTHORITY
:
733 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
734 (u8
*)__LC_EXC_ACCESS_ID
);
738 case PGM_PAGE_TRANSLATION
:
739 case PGM_REGION_FIRST_TRANS
:
740 case PGM_REGION_SECOND_TRANS
:
741 case PGM_REGION_THIRD_TRANS
:
742 case PGM_SEGMENT_TRANSLATION
:
743 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
744 (u64
*)__LC_TRANS_EXC_CODE
);
745 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
746 (u8
*)__LC_EXC_ACCESS_ID
);
747 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
748 (u8
*)__LC_OP_ACCESS_ID
);
752 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
753 (u16
*)__LC_MON_CLASS_NR
);
754 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
755 (u64
*)__LC_MON_CODE
);
757 case PGM_VECTOR_PROCESSING
:
759 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
760 (u32
*)__LC_DATA_EXC_CODE
);
763 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
764 (u64
*)__LC_TRANS_EXC_CODE
);
765 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
766 (u8
*)__LC_EXC_ACCESS_ID
);
769 case PGM_STACK_EMPTY
:
770 case PGM_STACK_SPECIFICATION
:
772 case PGM_STACK_OPERATION
:
773 case PGM_TRACE_TABEL
:
774 case PGM_CRYPTO_OPERATION
:
779 if (pgm_info
.code
& PGM_PER
) {
780 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
781 (u8
*) __LC_PER_CODE
);
782 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
783 (u8
*)__LC_PER_ATMID
);
784 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
785 (u64
*) __LC_PER_ADDRESS
);
786 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
787 (u8
*) __LC_PER_ACCESS_ID
);
790 if (nullifying
&& !(pgm_info
.flags
& KVM_S390_PGM_FLAGS_NO_REWIND
))
791 kvm_s390_rewind_psw(vcpu
, ilen
);
793 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
794 rc
|= put_guest_lc(vcpu
, ilen
, (u16
*) __LC_PGM_ILC
);
795 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->gbea
,
796 (u64
*) __LC_LAST_BREAK
);
797 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
798 (u16
*)__LC_PGM_INT_CODE
);
799 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
800 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
801 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
802 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
803 return rc
? -EFAULT
: 0;
806 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
)
808 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
809 struct kvm_s390_ext_info ext
;
812 spin_lock(&fi
->lock
);
813 if (!(test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
))) {
814 spin_unlock(&fi
->lock
);
817 ext
= fi
->srv_signal
;
818 memset(&fi
->srv_signal
, 0, sizeof(ext
));
819 clear_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
820 spin_unlock(&fi
->lock
);
822 VCPU_EVENT(vcpu
, 4, "deliver: sclp parameter 0x%x",
824 vcpu
->stat
.deliver_service_signal
++;
825 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
828 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
829 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
830 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
831 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
832 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
833 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
834 rc
|= put_guest_lc(vcpu
, ext
.ext_params
,
835 (u32
*)__LC_EXT_PARAMS
);
837 return rc
? -EFAULT
: 0;
840 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
)
842 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
843 struct kvm_s390_interrupt_info
*inti
;
846 spin_lock(&fi
->lock
);
847 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_PFAULT
],
848 struct kvm_s390_interrupt_info
,
851 list_del(&inti
->list
);
852 fi
->counters
[FIRQ_CNTR_PFAULT
] -= 1;
854 if (list_empty(&fi
->lists
[FIRQ_LIST_PFAULT
]))
855 clear_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
856 spin_unlock(&fi
->lock
);
859 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
860 KVM_S390_INT_PFAULT_DONE
, 0,
861 inti
->ext
.ext_params2
);
862 VCPU_EVENT(vcpu
, 4, "deliver: pfault done token 0x%llx",
863 inti
->ext
.ext_params2
);
865 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
866 (u16
*)__LC_EXT_INT_CODE
);
867 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
,
868 (u16
*)__LC_EXT_CPU_ADDR
);
869 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
870 &vcpu
->arch
.sie_block
->gpsw
,
872 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
873 &vcpu
->arch
.sie_block
->gpsw
,
875 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
876 (u64
*)__LC_EXT_PARAMS2
);
879 return rc
? -EFAULT
: 0;
882 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
)
884 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
885 struct kvm_s390_interrupt_info
*inti
;
888 spin_lock(&fi
->lock
);
889 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_VIRTIO
],
890 struct kvm_s390_interrupt_info
,
894 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
895 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
896 vcpu
->stat
.deliver_virtio_interrupt
++;
897 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
899 inti
->ext
.ext_params
,
900 inti
->ext
.ext_params2
);
901 list_del(&inti
->list
);
902 fi
->counters
[FIRQ_CNTR_VIRTIO
] -= 1;
904 if (list_empty(&fi
->lists
[FIRQ_LIST_VIRTIO
]))
905 clear_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
906 spin_unlock(&fi
->lock
);
909 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
910 (u16
*)__LC_EXT_INT_CODE
);
911 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
,
912 (u16
*)__LC_EXT_CPU_ADDR
);
913 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
914 &vcpu
->arch
.sie_block
->gpsw
,
916 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
917 &vcpu
->arch
.sie_block
->gpsw
,
919 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
920 (u32
*)__LC_EXT_PARAMS
);
921 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
922 (u64
*)__LC_EXT_PARAMS2
);
925 return rc
? -EFAULT
: 0;
928 static int __do_deliver_io(struct kvm_vcpu
*vcpu
, struct kvm_s390_io_info
*io
)
932 rc
= put_guest_lc(vcpu
, io
->subchannel_id
, (u16
*)__LC_SUBCHANNEL_ID
);
933 rc
|= put_guest_lc(vcpu
, io
->subchannel_nr
, (u16
*)__LC_SUBCHANNEL_NR
);
934 rc
|= put_guest_lc(vcpu
, io
->io_int_parm
, (u32
*)__LC_IO_INT_PARM
);
935 rc
|= put_guest_lc(vcpu
, io
->io_int_word
, (u32
*)__LC_IO_INT_WORD
);
936 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
937 &vcpu
->arch
.sie_block
->gpsw
,
939 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
940 &vcpu
->arch
.sie_block
->gpsw
,
942 return rc
? -EFAULT
: 0;
945 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
946 unsigned long irq_type
)
948 struct list_head
*isc_list
;
949 struct kvm_s390_float_interrupt
*fi
;
950 struct kvm_s390_interrupt_info
*inti
= NULL
;
951 struct kvm_s390_io_info io
;
955 fi
= &vcpu
->kvm
->arch
.float_int
;
957 spin_lock(&fi
->lock
);
958 isc
= irq_type_to_isc(irq_type
);
959 isc_list
= &fi
->lists
[isc
];
960 inti
= list_first_entry_or_null(isc_list
,
961 struct kvm_s390_interrupt_info
,
964 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
965 VCPU_EVENT(vcpu
, 4, "%s", "deliver: I/O (AI)");
967 VCPU_EVENT(vcpu
, 4, "deliver: I/O %x ss %x schid %04x",
968 inti
->io
.subchannel_id
>> 8,
969 inti
->io
.subchannel_id
>> 1 & 0x3,
970 inti
->io
.subchannel_nr
);
972 vcpu
->stat
.deliver_io_int
++;
973 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
975 ((__u32
)inti
->io
.subchannel_id
<< 16) |
976 inti
->io
.subchannel_nr
,
977 ((__u64
)inti
->io
.io_int_parm
<< 32) |
978 inti
->io
.io_int_word
);
979 list_del(&inti
->list
);
980 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
982 if (list_empty(isc_list
))
983 clear_bit(irq_type
, &fi
->pending_irqs
);
984 spin_unlock(&fi
->lock
);
987 rc
= __do_deliver_io(vcpu
, &(inti
->io
));
992 if (vcpu
->kvm
->arch
.gisa
&&
993 kvm_s390_gisa_tac_ipm_gisc(vcpu
->kvm
->arch
.gisa
, isc
)) {
995 * in case an adapter interrupt was not delivered
996 * in SIE context KVM will handle the delivery
998 VCPU_EVENT(vcpu
, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc
);
999 memset(&io
, 0, sizeof(io
));
1000 io
.io_int_word
= isc_to_int_word(isc
);
1001 vcpu
->stat
.deliver_io_int
++;
1002 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
1003 KVM_S390_INT_IO(1, 0, 0, 0),
1004 ((__u32
)io
.subchannel_id
<< 16) |
1006 ((__u64
)io
.io_int_parm
<< 32) |
1008 rc
= __do_deliver_io(vcpu
, &io
);
1014 typedef int (*deliver_irq_t
)(struct kvm_vcpu
*vcpu
);
1016 static const deliver_irq_t deliver_irq_funcs
[] = {
1017 [IRQ_PEND_MCHK_EX
] = __deliver_machine_check
,
1018 [IRQ_PEND_MCHK_REP
] = __deliver_machine_check
,
1019 [IRQ_PEND_PROG
] = __deliver_prog
,
1020 [IRQ_PEND_EXT_EMERGENCY
] = __deliver_emergency_signal
,
1021 [IRQ_PEND_EXT_EXTERNAL
] = __deliver_external_call
,
1022 [IRQ_PEND_EXT_CLOCK_COMP
] = __deliver_ckc
,
1023 [IRQ_PEND_EXT_CPU_TIMER
] = __deliver_cpu_timer
,
1024 [IRQ_PEND_RESTART
] = __deliver_restart
,
1025 [IRQ_PEND_SET_PREFIX
] = __deliver_set_prefix
,
1026 [IRQ_PEND_PFAULT_INIT
] = __deliver_pfault_init
,
1027 [IRQ_PEND_EXT_SERVICE
] = __deliver_service
,
1028 [IRQ_PEND_PFAULT_DONE
] = __deliver_pfault_done
,
1029 [IRQ_PEND_VIRTIO
] = __deliver_virtio
,
1032 /* Check whether an external call is pending (deliverable or not) */
1033 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
1035 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1037 if (!sclp
.has_sigpif
)
1038 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
1040 return sca_ext_call_pending(vcpu
, NULL
);
1043 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
1045 if (deliverable_irqs(vcpu
))
1048 if (kvm_cpu_has_pending_timer(vcpu
))
1051 /* external call pending and deliverable */
1052 if (kvm_s390_ext_call_pending(vcpu
) &&
1053 !psw_extint_disabled(vcpu
) &&
1054 (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
1057 if (!exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
1062 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1064 return ckc_irq_pending(vcpu
) || cpu_timer_irq_pending(vcpu
);
1067 static u64
__calculate_sltime(struct kvm_vcpu
*vcpu
)
1069 u64 now
, cputm
, sltime
= 0;
1071 if (ckc_interrupts_enabled(vcpu
)) {
1072 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
1073 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
1074 /* already expired or overflow? */
1075 if (!sltime
|| vcpu
->arch
.sie_block
->ckc
<= now
)
1077 if (cpu_timer_interrupts_enabled(vcpu
)) {
1078 cputm
= kvm_s390_get_cpu_timer(vcpu
);
1079 /* already expired? */
1082 return min(sltime
, tod_to_ns(cputm
));
1084 } else if (cpu_timer_interrupts_enabled(vcpu
)) {
1085 sltime
= kvm_s390_get_cpu_timer(vcpu
);
1086 /* already expired? */
1093 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
1097 vcpu
->stat
.exit_wait_state
++;
1100 if (kvm_arch_vcpu_runnable(vcpu
))
1103 if (psw_interrupts_disabled(vcpu
)) {
1104 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
1105 return -EOPNOTSUPP
; /* disabled wait */
1108 if (!ckc_interrupts_enabled(vcpu
) &&
1109 !cpu_timer_interrupts_enabled(vcpu
)) {
1110 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
1111 __set_cpu_idle(vcpu
);
1115 sltime
= __calculate_sltime(vcpu
);
1119 __set_cpu_idle(vcpu
);
1120 hrtimer_start(&vcpu
->arch
.ckc_timer
, sltime
, HRTIMER_MODE_REL
);
1121 VCPU_EVENT(vcpu
, 4, "enabled wait: %llu ns", sltime
);
1123 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1124 kvm_vcpu_block(vcpu
);
1125 __unset_cpu_idle(vcpu
);
1126 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1128 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
1132 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
1135 * We cannot move this into the if, as the CPU might be already
1136 * in kvm_vcpu_block without having the waitqueue set (polling)
1138 vcpu
->valid_wakeup
= true;
1140 * This is mostly to document, that the read in swait_active could
1141 * be moved before other stores, leading to subtle races.
1142 * All current users do not store or use an atomic like update
1144 smp_mb__after_atomic();
1145 if (swait_active(&vcpu
->wq
)) {
1147 * The vcpu gave up the cpu voluntarily, mark it as a good
1150 vcpu
->preempted
= true;
1151 swake_up(&vcpu
->wq
);
1152 vcpu
->stat
.halt_wakeup
++;
1155 * The VCPU might not be sleeping but is executing the VSIE. Let's
1156 * kick it, so it leaves the SIE to process the request.
1158 kvm_s390_vsie_kick(vcpu
);
1161 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
1163 struct kvm_vcpu
*vcpu
;
1166 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
1167 sltime
= __calculate_sltime(vcpu
);
1170 * If the monotonic clock runs faster than the tod clock we might be
1171 * woken up too early and have to go back to sleep to avoid deadlocks.
1173 if (sltime
&& hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
1174 return HRTIMER_RESTART
;
1175 kvm_s390_vcpu_wakeup(vcpu
);
1176 return HRTIMER_NORESTART
;
1179 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
1181 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1183 spin_lock(&li
->lock
);
1184 li
->pending_irqs
= 0;
1185 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
1186 memset(&li
->irq
, 0, sizeof(li
->irq
));
1187 spin_unlock(&li
->lock
);
1189 sca_clear_ext_call(vcpu
);
1192 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
1194 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1197 unsigned long irq_type
;
1200 __reset_intercept_indicators(vcpu
);
1202 /* pending ckc conditions might have been invalidated */
1203 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1204 if (ckc_irq_pending(vcpu
))
1205 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1207 /* pending cpu timer conditions might have been invalidated */
1208 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1209 if (cpu_timer_irq_pending(vcpu
))
1210 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1212 while ((irqs
= deliverable_irqs(vcpu
)) && !rc
) {
1213 /* bits are in the reverse order of interrupt priority */
1214 irq_type
= find_last_bit(&irqs
, IRQ_PEND_COUNT
);
1215 if (is_ioirq(irq_type
)) {
1216 rc
= __deliver_io(vcpu
, irq_type
);
1218 func
= deliver_irq_funcs
[irq_type
];
1220 WARN_ON_ONCE(func
== NULL
);
1221 clear_bit(irq_type
, &li
->pending_irqs
);
1228 set_intercept_indicators(vcpu
);
1233 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1235 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1237 VCPU_EVENT(vcpu
, 3, "inject: program irq code 0x%x", irq
->u
.pgm
.code
);
1238 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
1239 irq
->u
.pgm
.code
, 0);
1241 if (!(irq
->u
.pgm
.flags
& KVM_S390_PGM_FLAGS_ILC_VALID
)) {
1242 /* auto detection if no valid ILC was given */
1243 irq
->u
.pgm
.flags
&= ~KVM_S390_PGM_FLAGS_ILC_MASK
;
1244 irq
->u
.pgm
.flags
|= kvm_s390_get_ilen(vcpu
);
1245 irq
->u
.pgm
.flags
|= KVM_S390_PGM_FLAGS_ILC_VALID
;
1248 if (irq
->u
.pgm
.code
== PGM_PER
) {
1249 li
->irq
.pgm
.code
|= PGM_PER
;
1250 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1251 /* only modify PER related information */
1252 li
->irq
.pgm
.per_address
= irq
->u
.pgm
.per_address
;
1253 li
->irq
.pgm
.per_code
= irq
->u
.pgm
.per_code
;
1254 li
->irq
.pgm
.per_atmid
= irq
->u
.pgm
.per_atmid
;
1255 li
->irq
.pgm
.per_access_id
= irq
->u
.pgm
.per_access_id
;
1256 } else if (!(irq
->u
.pgm
.code
& PGM_PER
)) {
1257 li
->irq
.pgm
.code
= (li
->irq
.pgm
.code
& PGM_PER
) |
1259 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1260 /* only modify non-PER information */
1261 li
->irq
.pgm
.trans_exc_code
= irq
->u
.pgm
.trans_exc_code
;
1262 li
->irq
.pgm
.mon_code
= irq
->u
.pgm
.mon_code
;
1263 li
->irq
.pgm
.data_exc_code
= irq
->u
.pgm
.data_exc_code
;
1264 li
->irq
.pgm
.mon_class_nr
= irq
->u
.pgm
.mon_class_nr
;
1265 li
->irq
.pgm
.exc_access_id
= irq
->u
.pgm
.exc_access_id
;
1266 li
->irq
.pgm
.op_access_id
= irq
->u
.pgm
.op_access_id
;
1268 li
->irq
.pgm
= irq
->u
.pgm
;
1270 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
1274 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1276 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1278 VCPU_EVENT(vcpu
, 4, "inject: pfault init parameter block at 0x%llx",
1279 irq
->u
.ext
.ext_params2
);
1280 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
1281 irq
->u
.ext
.ext_params
,
1282 irq
->u
.ext
.ext_params2
);
1284 li
->irq
.ext
= irq
->u
.ext
;
1285 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1286 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1290 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1292 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1293 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1294 uint16_t src_id
= irq
->u
.extcall
.code
;
1296 VCPU_EVENT(vcpu
, 4, "inject: external call source-cpu:%u",
1298 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1301 /* sending vcpu invalid */
1302 if (kvm_get_vcpu_by_id(vcpu
->kvm
, src_id
) == NULL
)
1305 if (sclp
.has_sigpif
)
1306 return sca_inject_ext_call(vcpu
, src_id
);
1308 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1310 *extcall
= irq
->u
.extcall
;
1311 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1315 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1317 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1318 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1320 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x",
1321 irq
->u
.prefix
.address
);
1322 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1323 irq
->u
.prefix
.address
, 0);
1325 if (!is_vcpu_stopped(vcpu
))
1328 *prefix
= irq
->u
.prefix
;
1329 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1333 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1334 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1336 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1337 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1340 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0);
1342 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1345 if (is_vcpu_stopped(vcpu
)) {
1346 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1347 rc
= kvm_s390_store_status_unloaded(vcpu
,
1348 KVM_S390_STORE_STATUS_NOADDR
);
1352 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1354 stop
->flags
= irq
->u
.stop
.flags
;
1355 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOP_INT
);
1359 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1360 struct kvm_s390_irq
*irq
)
1362 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1364 VCPU_EVENT(vcpu
, 3, "%s", "inject: restart int");
1365 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
1367 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1371 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1372 struct kvm_s390_irq
*irq
)
1374 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1376 VCPU_EVENT(vcpu
, 4, "inject: emergency from cpu %u",
1378 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1379 irq
->u
.emerg
.code
, 0);
1381 /* sending vcpu invalid */
1382 if (kvm_get_vcpu_by_id(vcpu
->kvm
, irq
->u
.emerg
.code
) == NULL
)
1385 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1386 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1387 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1391 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1393 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1394 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1396 VCPU_EVENT(vcpu
, 3, "inject: machine check mcic 0x%llx",
1398 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1402 * Because repressible machine checks can be indicated along with
1403 * exigent machine checks (PoP, Chapter 11, Interruption action)
1404 * we need to combine cr14, mcic and external damage code.
1405 * Failing storage address and the logout area should not be or'ed
1406 * together, we just indicate the last occurrence of the corresponding
1409 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1410 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1411 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1412 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1413 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1414 sizeof(mchk
->fixed_logout
));
1415 if (mchk
->mcic
& MCHK_EX_MASK
)
1416 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1417 else if (mchk
->mcic
& MCHK_REP_MASK
)
1418 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1422 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1424 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1426 VCPU_EVENT(vcpu
, 3, "%s", "inject: clock comparator external");
1427 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1430 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1431 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1435 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1437 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1439 VCPU_EVENT(vcpu
, 3, "%s", "inject: cpu timer external");
1440 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1443 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1444 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_EXT_INT
);
1448 static struct kvm_s390_interrupt_info
*get_io_int(struct kvm
*kvm
,
1451 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1452 struct list_head
*isc_list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1453 struct kvm_s390_interrupt_info
*iter
;
1454 u16 id
= (schid
& 0xffff0000U
) >> 16;
1455 u16 nr
= schid
& 0x0000ffffU
;
1457 spin_lock(&fi
->lock
);
1458 list_for_each_entry(iter
, isc_list
, list
) {
1459 if (schid
&& (id
!= iter
->io
.subchannel_id
||
1460 nr
!= iter
->io
.subchannel_nr
))
1462 /* found an appropriate entry */
1463 list_del_init(&iter
->list
);
1464 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1465 if (list_empty(isc_list
))
1466 clear_bit(isc_to_irq_type(isc
), &fi
->pending_irqs
);
1467 spin_unlock(&fi
->lock
);
1470 spin_unlock(&fi
->lock
);
1474 static struct kvm_s390_interrupt_info
*get_top_io_int(struct kvm
*kvm
,
1475 u64 isc_mask
, u32 schid
)
1477 struct kvm_s390_interrupt_info
*inti
= NULL
;
1480 for (isc
= 0; isc
<= MAX_ISC
&& !inti
; isc
++) {
1481 if (isc_mask
& isc_to_isc_bits(isc
))
1482 inti
= get_io_int(kvm
, isc
, schid
);
1487 static int get_top_gisa_isc(struct kvm
*kvm
, u64 isc_mask
, u32 schid
)
1489 unsigned long active_mask
;
1494 if (!kvm
->arch
.gisa
)
1497 active_mask
= (isc_mask
& kvm_s390_gisa_get_ipm(kvm
->arch
.gisa
) << 24) << 32;
1498 while (active_mask
) {
1499 isc
= __fls(active_mask
) ^ (BITS_PER_LONG
- 1);
1500 if (kvm_s390_gisa_tac_ipm_gisc(kvm
->arch
.gisa
, isc
))
1502 clear_bit_inv(isc
, &active_mask
);
1509 * Dequeue and return an I/O interrupt matching any of the interruption
1510 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1511 * Take into account the interrupts pending in the interrupt list and in GISA.
1513 * Note that for a guest that does not enable I/O interrupts
1514 * but relies on TPI, a flood of classic interrupts may starve
1515 * out adapter interrupts on the same isc. Linux does not do
1516 * that, and it is possible to work around the issue by configuring
1517 * different iscs for classic and adapter interrupts in the guest,
1518 * but we may want to revisit this in the future.
1520 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1521 u64 isc_mask
, u32 schid
)
1523 struct kvm_s390_interrupt_info
*inti
, *tmp_inti
;
1526 inti
= get_top_io_int(kvm
, isc_mask
, schid
);
1528 isc
= get_top_gisa_isc(kvm
, isc_mask
, schid
);
1534 /* AI in GISA but no classical IO int */
1537 /* both types of interrupts present */
1538 if (int_word_to_isc(inti
->io
.io_int_word
) <= isc
) {
1539 /* classical IO int with higher priority */
1540 kvm_s390_gisa_set_ipm_gisc(kvm
->arch
.gisa
, isc
);
1544 tmp_inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1546 tmp_inti
->type
= KVM_S390_INT_IO(1, 0, 0, 0);
1547 tmp_inti
->io
.io_int_word
= isc_to_int_word(isc
);
1549 kvm_s390_reinject_io_int(kvm
, inti
);
1552 kvm_s390_gisa_set_ipm_gisc(kvm
->arch
.gisa
, isc
);
1557 #define SCCB_MASK 0xFFFFFFF8
1558 #define SCCB_EVENT_PENDING 0x3
1560 static int __inject_service(struct kvm
*kvm
,
1561 struct kvm_s390_interrupt_info
*inti
)
1563 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1565 spin_lock(&fi
->lock
);
1566 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_EVENT_PENDING
;
1568 * Early versions of the QEMU s390 bios will inject several
1569 * service interrupts after another without handling a
1570 * condition code indicating busy.
1571 * We will silently ignore those superfluous sccb values.
1572 * A future version of QEMU will take care of serialization
1575 if (fi
->srv_signal
.ext_params
& SCCB_MASK
)
1577 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_MASK
;
1578 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1580 spin_unlock(&fi
->lock
);
1585 static int __inject_virtio(struct kvm
*kvm
,
1586 struct kvm_s390_interrupt_info
*inti
)
1588 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1590 spin_lock(&fi
->lock
);
1591 if (fi
->counters
[FIRQ_CNTR_VIRTIO
] >= KVM_S390_MAX_VIRTIO_IRQS
) {
1592 spin_unlock(&fi
->lock
);
1595 fi
->counters
[FIRQ_CNTR_VIRTIO
] += 1;
1596 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_VIRTIO
]);
1597 set_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1598 spin_unlock(&fi
->lock
);
1602 static int __inject_pfault_done(struct kvm
*kvm
,
1603 struct kvm_s390_interrupt_info
*inti
)
1605 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1607 spin_lock(&fi
->lock
);
1608 if (fi
->counters
[FIRQ_CNTR_PFAULT
] >=
1609 (ASYNC_PF_PER_VCPU
* KVM_MAX_VCPUS
)) {
1610 spin_unlock(&fi
->lock
);
1613 fi
->counters
[FIRQ_CNTR_PFAULT
] += 1;
1614 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_PFAULT
]);
1615 set_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1616 spin_unlock(&fi
->lock
);
1620 #define CR_PENDING_SUBCLASS 28
1621 static int __inject_float_mchk(struct kvm
*kvm
,
1622 struct kvm_s390_interrupt_info
*inti
)
1624 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1626 spin_lock(&fi
->lock
);
1627 fi
->mchk
.cr14
|= inti
->mchk
.cr14
& (1UL << CR_PENDING_SUBCLASS
);
1628 fi
->mchk
.mcic
|= inti
->mchk
.mcic
;
1629 set_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
);
1630 spin_unlock(&fi
->lock
);
1635 static int __inject_io(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1637 struct kvm_s390_float_interrupt
*fi
;
1638 struct list_head
*list
;
1641 isc
= int_word_to_isc(inti
->io
.io_int_word
);
1643 if (kvm
->arch
.gisa
&& inti
->type
& KVM_S390_INT_IO_AI_MASK
) {
1644 VM_EVENT(kvm
, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc
);
1645 kvm_s390_gisa_set_ipm_gisc(kvm
->arch
.gisa
, isc
);
1650 fi
= &kvm
->arch
.float_int
;
1651 spin_lock(&fi
->lock
);
1652 if (fi
->counters
[FIRQ_CNTR_IO
] >= KVM_S390_MAX_FLOAT_IRQS
) {
1653 spin_unlock(&fi
->lock
);
1656 fi
->counters
[FIRQ_CNTR_IO
] += 1;
1658 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
1659 VM_EVENT(kvm
, 4, "%s", "inject: I/O (AI)");
1661 VM_EVENT(kvm
, 4, "inject: I/O %x ss %x schid %04x",
1662 inti
->io
.subchannel_id
>> 8,
1663 inti
->io
.subchannel_id
>> 1 & 0x3,
1664 inti
->io
.subchannel_nr
);
1665 list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1666 list_add_tail(&inti
->list
, list
);
1667 set_bit(isc_to_irq_type(isc
), &fi
->pending_irqs
);
1668 spin_unlock(&fi
->lock
);
1673 * Find a destination VCPU for a floating irq and kick it.
1675 static void __floating_irq_kick(struct kvm
*kvm
, u64 type
)
1677 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1678 struct kvm_vcpu
*dst_vcpu
;
1679 int sigcpu
, online_vcpus
, nr_tries
= 0;
1681 online_vcpus
= atomic_read(&kvm
->online_vcpus
);
1685 /* find idle VCPUs first, then round robin */
1686 sigcpu
= find_first_bit(fi
->idle_mask
, online_vcpus
);
1687 if (sigcpu
== online_vcpus
) {
1689 sigcpu
= fi
->next_rr_cpu
;
1690 fi
->next_rr_cpu
= (fi
->next_rr_cpu
+ 1) % online_vcpus
;
1691 /* avoid endless loops if all vcpus are stopped */
1692 if (nr_tries
++ >= online_vcpus
)
1694 } while (is_vcpu_stopped(kvm_get_vcpu(kvm
, sigcpu
)));
1696 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1698 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1701 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_STOP_INT
);
1703 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1704 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_IO_INT
);
1707 kvm_s390_set_cpuflags(dst_vcpu
, CPUSTAT_EXT_INT
);
1710 kvm_s390_vcpu_wakeup(dst_vcpu
);
1713 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1715 u64 type
= READ_ONCE(inti
->type
);
1720 rc
= __inject_float_mchk(kvm
, inti
);
1722 case KVM_S390_INT_VIRTIO
:
1723 rc
= __inject_virtio(kvm
, inti
);
1725 case KVM_S390_INT_SERVICE
:
1726 rc
= __inject_service(kvm
, inti
);
1728 case KVM_S390_INT_PFAULT_DONE
:
1729 rc
= __inject_pfault_done(kvm
, inti
);
1731 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1732 rc
= __inject_io(kvm
, inti
);
1740 __floating_irq_kick(kvm
, type
);
1744 int kvm_s390_inject_vm(struct kvm
*kvm
,
1745 struct kvm_s390_interrupt
*s390int
)
1747 struct kvm_s390_interrupt_info
*inti
;
1750 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1754 inti
->type
= s390int
->type
;
1755 switch (inti
->type
) {
1756 case KVM_S390_INT_VIRTIO
:
1757 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1758 s390int
->parm
, s390int
->parm64
);
1759 inti
->ext
.ext_params
= s390int
->parm
;
1760 inti
->ext
.ext_params2
= s390int
->parm64
;
1762 case KVM_S390_INT_SERVICE
:
1763 VM_EVENT(kvm
, 4, "inject: sclp parm:%x", s390int
->parm
);
1764 inti
->ext
.ext_params
= s390int
->parm
;
1766 case KVM_S390_INT_PFAULT_DONE
:
1767 inti
->ext
.ext_params2
= s390int
->parm64
;
1770 VM_EVENT(kvm
, 3, "inject: machine check mcic 0x%llx",
1772 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1773 inti
->mchk
.mcic
= s390int
->parm64
;
1775 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1776 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1777 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1778 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1779 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1785 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1788 rc
= __inject_vm(kvm
, inti
);
1794 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
1795 struct kvm_s390_interrupt_info
*inti
)
1797 return __inject_vm(kvm
, inti
);
1800 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1801 struct kvm_s390_irq
*irq
)
1803 irq
->type
= s390int
->type
;
1804 switch (irq
->type
) {
1805 case KVM_S390_PROGRAM_INT
:
1806 if (s390int
->parm
& 0xffff0000)
1808 irq
->u
.pgm
.code
= s390int
->parm
;
1810 case KVM_S390_SIGP_SET_PREFIX
:
1811 irq
->u
.prefix
.address
= s390int
->parm
;
1813 case KVM_S390_SIGP_STOP
:
1814 irq
->u
.stop
.flags
= s390int
->parm
;
1816 case KVM_S390_INT_EXTERNAL_CALL
:
1817 if (s390int
->parm
& 0xffff0000)
1819 irq
->u
.extcall
.code
= s390int
->parm
;
1821 case KVM_S390_INT_EMERGENCY
:
1822 if (s390int
->parm
& 0xffff0000)
1824 irq
->u
.emerg
.code
= s390int
->parm
;
1827 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1833 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
1835 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1837 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1840 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
1842 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1844 spin_lock(&li
->lock
);
1845 li
->irq
.stop
.flags
= 0;
1846 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1847 spin_unlock(&li
->lock
);
1850 static int do_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1854 switch (irq
->type
) {
1855 case KVM_S390_PROGRAM_INT
:
1856 rc
= __inject_prog(vcpu
, irq
);
1858 case KVM_S390_SIGP_SET_PREFIX
:
1859 rc
= __inject_set_prefix(vcpu
, irq
);
1861 case KVM_S390_SIGP_STOP
:
1862 rc
= __inject_sigp_stop(vcpu
, irq
);
1864 case KVM_S390_RESTART
:
1865 rc
= __inject_sigp_restart(vcpu
, irq
);
1867 case KVM_S390_INT_CLOCK_COMP
:
1868 rc
= __inject_ckc(vcpu
);
1870 case KVM_S390_INT_CPU_TIMER
:
1871 rc
= __inject_cpu_timer(vcpu
);
1873 case KVM_S390_INT_EXTERNAL_CALL
:
1874 rc
= __inject_extcall(vcpu
, irq
);
1876 case KVM_S390_INT_EMERGENCY
:
1877 rc
= __inject_sigp_emergency(vcpu
, irq
);
1880 rc
= __inject_mchk(vcpu
, irq
);
1882 case KVM_S390_INT_PFAULT_INIT
:
1883 rc
= __inject_pfault_init(vcpu
, irq
);
1885 case KVM_S390_INT_VIRTIO
:
1886 case KVM_S390_INT_SERVICE
:
1887 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1895 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1897 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1900 spin_lock(&li
->lock
);
1901 rc
= do_inject_vcpu(vcpu
, irq
);
1902 spin_unlock(&li
->lock
);
1904 kvm_s390_vcpu_wakeup(vcpu
);
1908 static inline void clear_irq_list(struct list_head
*_list
)
1910 struct kvm_s390_interrupt_info
*inti
, *n
;
1912 list_for_each_entry_safe(inti
, n
, _list
, list
) {
1913 list_del(&inti
->list
);
1918 static void inti_to_irq(struct kvm_s390_interrupt_info
*inti
,
1919 struct kvm_s390_irq
*irq
)
1921 irq
->type
= inti
->type
;
1922 switch (inti
->type
) {
1923 case KVM_S390_INT_PFAULT_INIT
:
1924 case KVM_S390_INT_PFAULT_DONE
:
1925 case KVM_S390_INT_VIRTIO
:
1926 irq
->u
.ext
= inti
->ext
;
1928 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1929 irq
->u
.io
= inti
->io
;
1934 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1936 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1939 spin_lock(&fi
->lock
);
1940 fi
->pending_irqs
= 0;
1941 memset(&fi
->srv_signal
, 0, sizeof(fi
->srv_signal
));
1942 memset(&fi
->mchk
, 0, sizeof(fi
->mchk
));
1943 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1944 clear_irq_list(&fi
->lists
[i
]);
1945 for (i
= 0; i
< FIRQ_MAX_COUNT
; i
++)
1946 fi
->counters
[i
] = 0;
1947 spin_unlock(&fi
->lock
);
1948 kvm_s390_gisa_clear(kvm
);
1951 static int get_all_floating_irqs(struct kvm
*kvm
, u8 __user
*usrbuf
, u64 len
)
1953 struct kvm_s390_interrupt_info
*inti
;
1954 struct kvm_s390_float_interrupt
*fi
;
1955 struct kvm_s390_irq
*buf
;
1956 struct kvm_s390_irq
*irq
;
1962 if (len
> KVM_S390_FLIC_MAX_BUFFER
|| len
== 0)
1966 * We are already using -ENOMEM to signal
1967 * userspace it may retry with a bigger buffer,
1968 * so we need to use something else for this case
1974 max_irqs
= len
/ sizeof(struct kvm_s390_irq
);
1976 if (kvm
->arch
.gisa
&&
1977 kvm_s390_gisa_get_ipm(kvm
->arch
.gisa
)) {
1978 for (i
= 0; i
<= MAX_ISC
; i
++) {
1979 if (n
== max_irqs
) {
1980 /* signal userspace to try again */
1984 if (kvm_s390_gisa_tac_ipm_gisc(kvm
->arch
.gisa
, i
)) {
1985 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1986 irq
->type
= KVM_S390_INT_IO(1, 0, 0, 0);
1987 irq
->u
.io
.io_int_word
= isc_to_int_word(i
);
1992 fi
= &kvm
->arch
.float_int
;
1993 spin_lock(&fi
->lock
);
1994 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++) {
1995 list_for_each_entry(inti
, &fi
->lists
[i
], list
) {
1996 if (n
== max_irqs
) {
1997 /* signal userspace to try again */
2001 inti_to_irq(inti
, &buf
[n
]);
2005 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
)) {
2006 if (n
== max_irqs
) {
2007 /* signal userspace to try again */
2011 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2012 irq
->type
= KVM_S390_INT_SERVICE
;
2013 irq
->u
.ext
= fi
->srv_signal
;
2016 if (test_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
2017 if (n
== max_irqs
) {
2018 /* signal userspace to try again */
2022 irq
= (struct kvm_s390_irq
*) &buf
[n
];
2023 irq
->type
= KVM_S390_MCHK
;
2024 irq
->u
.mchk
= fi
->mchk
;
2029 spin_unlock(&fi
->lock
);
2031 if (!ret
&& n
> 0) {
2032 if (copy_to_user(usrbuf
, buf
, sizeof(struct kvm_s390_irq
) * n
))
2037 return ret
< 0 ? ret
: n
;
2040 static int flic_ais_mode_get_all(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2042 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2043 struct kvm_s390_ais_all ais
;
2045 if (attr
->attr
< sizeof(ais
))
2048 if (!test_kvm_facility(kvm
, 72))
2051 mutex_lock(&fi
->ais_lock
);
2052 ais
.simm
= fi
->simm
;
2053 ais
.nimm
= fi
->nimm
;
2054 mutex_unlock(&fi
->ais_lock
);
2056 if (copy_to_user((void __user
*)attr
->addr
, &ais
, sizeof(ais
)))
2062 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2066 switch (attr
->group
) {
2067 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2068 r
= get_all_floating_irqs(dev
->kvm
, (u8 __user
*) attr
->addr
,
2071 case KVM_DEV_FLIC_AISM_ALL
:
2072 r
= flic_ais_mode_get_all(dev
->kvm
, attr
);
2081 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
2084 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
2085 void *target
= NULL
;
2086 void __user
*source
;
2089 if (get_user(inti
->type
, (u64 __user
*)addr
))
2092 switch (inti
->type
) {
2093 case KVM_S390_INT_PFAULT_INIT
:
2094 case KVM_S390_INT_PFAULT_DONE
:
2095 case KVM_S390_INT_VIRTIO
:
2096 case KVM_S390_INT_SERVICE
:
2097 target
= (void *) &inti
->ext
;
2098 source
= &uptr
->u
.ext
;
2099 size
= sizeof(inti
->ext
);
2101 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
2102 target
= (void *) &inti
->io
;
2103 source
= &uptr
->u
.io
;
2104 size
= sizeof(inti
->io
);
2107 target
= (void *) &inti
->mchk
;
2108 source
= &uptr
->u
.mchk
;
2109 size
= sizeof(inti
->mchk
);
2115 if (copy_from_user(target
, source
, size
))
2121 static int enqueue_floating_irq(struct kvm_device
*dev
,
2122 struct kvm_device_attr
*attr
)
2124 struct kvm_s390_interrupt_info
*inti
= NULL
;
2126 int len
= attr
->attr
;
2128 if (len
% sizeof(struct kvm_s390_irq
) != 0)
2130 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
2133 while (len
>= sizeof(struct kvm_s390_irq
)) {
2134 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
2138 r
= copy_irq_from_user(inti
, attr
->addr
);
2143 r
= __inject_vm(dev
->kvm
, inti
);
2148 len
-= sizeof(struct kvm_s390_irq
);
2149 attr
->addr
+= sizeof(struct kvm_s390_irq
);
2155 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
2157 if (id
>= MAX_S390_IO_ADAPTERS
)
2159 return kvm
->arch
.adapters
[id
];
2162 static int register_io_adapter(struct kvm_device
*dev
,
2163 struct kvm_device_attr
*attr
)
2165 struct s390_io_adapter
*adapter
;
2166 struct kvm_s390_io_adapter adapter_info
;
2168 if (copy_from_user(&adapter_info
,
2169 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
2172 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
2173 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
2176 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
2180 INIT_LIST_HEAD(&adapter
->maps
);
2181 init_rwsem(&adapter
->maps_lock
);
2182 atomic_set(&adapter
->nr_maps
, 0);
2183 adapter
->id
= adapter_info
.id
;
2184 adapter
->isc
= adapter_info
.isc
;
2185 adapter
->maskable
= adapter_info
.maskable
;
2186 adapter
->masked
= false;
2187 adapter
->swap
= adapter_info
.swap
;
2188 adapter
->suppressible
= (adapter_info
.flags
) &
2189 KVM_S390_ADAPTER_SUPPRESSIBLE
;
2190 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
2195 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
2198 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2200 if (!adapter
|| !adapter
->maskable
)
2202 ret
= adapter
->masked
;
2203 adapter
->masked
= masked
;
2207 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
2209 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2210 struct s390_map_info
*map
;
2213 if (!adapter
|| !addr
)
2216 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
2221 INIT_LIST_HEAD(&map
->list
);
2222 map
->guest_addr
= addr
;
2223 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
2224 if (map
->addr
== -EFAULT
) {
2228 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
2232 down_write(&adapter
->maps_lock
);
2233 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
2234 list_add_tail(&map
->list
, &adapter
->maps
);
2237 put_page(map
->page
);
2240 up_write(&adapter
->maps_lock
);
2247 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
2249 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2250 struct s390_map_info
*map
, *tmp
;
2253 if (!adapter
|| !addr
)
2256 down_write(&adapter
->maps_lock
);
2257 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
2258 if (map
->guest_addr
== addr
) {
2260 atomic_dec(&adapter
->nr_maps
);
2261 list_del(&map
->list
);
2262 put_page(map
->page
);
2267 up_write(&adapter
->maps_lock
);
2269 return found
? 0 : -EINVAL
;
2272 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
2275 struct s390_map_info
*map
, *tmp
;
2277 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
2278 if (!kvm
->arch
.adapters
[i
])
2280 list_for_each_entry_safe(map
, tmp
,
2281 &kvm
->arch
.adapters
[i
]->maps
, list
) {
2282 list_del(&map
->list
);
2283 put_page(map
->page
);
2286 kfree(kvm
->arch
.adapters
[i
]);
2290 static int modify_io_adapter(struct kvm_device
*dev
,
2291 struct kvm_device_attr
*attr
)
2293 struct kvm_s390_io_adapter_req req
;
2294 struct s390_io_adapter
*adapter
;
2297 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2300 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
2304 case KVM_S390_IO_ADAPTER_MASK
:
2305 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
2309 case KVM_S390_IO_ADAPTER_MAP
:
2310 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
2312 case KVM_S390_IO_ADAPTER_UNMAP
:
2313 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
2322 static int clear_io_irq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2325 const u64 isc_mask
= 0xffUL
<< 24; /* all iscs set */
2330 if (attr
->attr
!= sizeof(schid
))
2332 if (copy_from_user(&schid
, (void __user
*) attr
->addr
, sizeof(schid
)))
2336 kfree(kvm_s390_get_io_int(kvm
, isc_mask
, schid
));
2338 * If userspace is conforming to the architecture, we can have at most
2339 * one pending I/O interrupt per subchannel, so this is effectively a
2345 static int modify_ais_mode(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2347 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2348 struct kvm_s390_ais_req req
;
2351 if (!test_kvm_facility(kvm
, 72))
2354 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2357 if (req
.isc
> MAX_ISC
)
2360 trace_kvm_s390_modify_ais_mode(req
.isc
,
2361 (fi
->simm
& AIS_MODE_MASK(req
.isc
)) ?
2362 (fi
->nimm
& AIS_MODE_MASK(req
.isc
)) ?
2363 2 : KVM_S390_AIS_MODE_SINGLE
:
2364 KVM_S390_AIS_MODE_ALL
, req
.mode
);
2366 mutex_lock(&fi
->ais_lock
);
2368 case KVM_S390_AIS_MODE_ALL
:
2369 fi
->simm
&= ~AIS_MODE_MASK(req
.isc
);
2370 fi
->nimm
&= ~AIS_MODE_MASK(req
.isc
);
2372 case KVM_S390_AIS_MODE_SINGLE
:
2373 fi
->simm
|= AIS_MODE_MASK(req
.isc
);
2374 fi
->nimm
&= ~AIS_MODE_MASK(req
.isc
);
2379 mutex_unlock(&fi
->ais_lock
);
2384 static int kvm_s390_inject_airq(struct kvm
*kvm
,
2385 struct s390_io_adapter
*adapter
)
2387 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2388 struct kvm_s390_interrupt s390int
= {
2389 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
2391 .parm64
= isc_to_int_word(adapter
->isc
),
2395 if (!test_kvm_facility(kvm
, 72) || !adapter
->suppressible
)
2396 return kvm_s390_inject_vm(kvm
, &s390int
);
2398 mutex_lock(&fi
->ais_lock
);
2399 if (fi
->nimm
& AIS_MODE_MASK(adapter
->isc
)) {
2400 trace_kvm_s390_airq_suppressed(adapter
->id
, adapter
->isc
);
2404 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
2405 if (!ret
&& (fi
->simm
& AIS_MODE_MASK(adapter
->isc
))) {
2406 fi
->nimm
|= AIS_MODE_MASK(adapter
->isc
);
2407 trace_kvm_s390_modify_ais_mode(adapter
->isc
,
2408 KVM_S390_AIS_MODE_SINGLE
, 2);
2411 mutex_unlock(&fi
->ais_lock
);
2415 static int flic_inject_airq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2417 unsigned int id
= attr
->attr
;
2418 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
2423 return kvm_s390_inject_airq(kvm
, adapter
);
2426 static int flic_ais_mode_set_all(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2428 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
2429 struct kvm_s390_ais_all ais
;
2431 if (!test_kvm_facility(kvm
, 72))
2434 if (copy_from_user(&ais
, (void __user
*)attr
->addr
, sizeof(ais
)))
2437 mutex_lock(&fi
->ais_lock
);
2438 fi
->simm
= ais
.simm
;
2439 fi
->nimm
= ais
.nimm
;
2440 mutex_unlock(&fi
->ais_lock
);
2445 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2449 struct kvm_vcpu
*vcpu
;
2451 switch (attr
->group
) {
2452 case KVM_DEV_FLIC_ENQUEUE
:
2453 r
= enqueue_floating_irq(dev
, attr
);
2455 case KVM_DEV_FLIC_CLEAR_IRQS
:
2456 kvm_s390_clear_float_irqs(dev
->kvm
);
2458 case KVM_DEV_FLIC_APF_ENABLE
:
2459 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
2461 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2462 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
2464 * Make sure no async faults are in transition when
2465 * clearing the queues. So we don't need to worry
2466 * about late coming workers.
2468 synchronize_srcu(&dev
->kvm
->srcu
);
2469 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
2470 kvm_clear_async_pf_completion_queue(vcpu
);
2472 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2473 r
= register_io_adapter(dev
, attr
);
2475 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2476 r
= modify_io_adapter(dev
, attr
);
2478 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2479 r
= clear_io_irq(dev
->kvm
, attr
);
2481 case KVM_DEV_FLIC_AISM
:
2482 r
= modify_ais_mode(dev
->kvm
, attr
);
2484 case KVM_DEV_FLIC_AIRQ_INJECT
:
2485 r
= flic_inject_airq(dev
->kvm
, attr
);
2487 case KVM_DEV_FLIC_AISM_ALL
:
2488 r
= flic_ais_mode_set_all(dev
->kvm
, attr
);
2497 static int flic_has_attr(struct kvm_device
*dev
,
2498 struct kvm_device_attr
*attr
)
2500 switch (attr
->group
) {
2501 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2502 case KVM_DEV_FLIC_ENQUEUE
:
2503 case KVM_DEV_FLIC_CLEAR_IRQS
:
2504 case KVM_DEV_FLIC_APF_ENABLE
:
2505 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2506 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2507 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2508 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2509 case KVM_DEV_FLIC_AISM
:
2510 case KVM_DEV_FLIC_AIRQ_INJECT
:
2511 case KVM_DEV_FLIC_AISM_ALL
:
2517 static int flic_create(struct kvm_device
*dev
, u32 type
)
2521 if (dev
->kvm
->arch
.flic
)
2523 dev
->kvm
->arch
.flic
= dev
;
2527 static void flic_destroy(struct kvm_device
*dev
)
2529 dev
->kvm
->arch
.flic
= NULL
;
2533 /* s390 floating irq controller (flic) */
2534 struct kvm_device_ops kvm_flic_ops
= {
2536 .get_attr
= flic_get_attr
,
2537 .set_attr
= flic_set_attr
,
2538 .has_attr
= flic_has_attr
,
2539 .create
= flic_create
,
2540 .destroy
= flic_destroy
,
2543 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
2547 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
2549 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
2552 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
2555 struct s390_map_info
*map
;
2560 list_for_each_entry(map
, &adapter
->maps
, list
) {
2561 if (map
->guest_addr
== addr
)
2567 static int adapter_indicators_set(struct kvm
*kvm
,
2568 struct s390_io_adapter
*adapter
,
2569 struct kvm_s390_adapter_int
*adapter_int
)
2572 int summary_set
, idx
;
2573 struct s390_map_info
*info
;
2576 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
2579 map
= page_address(info
->page
);
2580 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
2582 idx
= srcu_read_lock(&kvm
->srcu
);
2583 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2584 set_page_dirty_lock(info
->page
);
2585 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
2587 srcu_read_unlock(&kvm
->srcu
, idx
);
2590 map
= page_address(info
->page
);
2591 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
2593 summary_set
= test_and_set_bit(bit
, map
);
2594 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2595 set_page_dirty_lock(info
->page
);
2596 srcu_read_unlock(&kvm
->srcu
, idx
);
2597 return summary_set
? 0 : 1;
2601 * < 0 - not injected due to error
2602 * = 0 - coalesced, summary indicator already active
2603 * > 0 - injected interrupt
2605 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
2606 struct kvm
*kvm
, int irq_source_id
, int level
,
2610 struct s390_io_adapter
*adapter
;
2612 /* We're only interested in the 0->1 transition. */
2615 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
2618 down_read(&adapter
->maps_lock
);
2619 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
2620 up_read(&adapter
->maps_lock
);
2621 if ((ret
> 0) && !adapter
->masked
) {
2622 ret
= kvm_s390_inject_airq(kvm
, adapter
);
2630 * Inject the machine check to the guest.
2632 void kvm_s390_reinject_machine_check(struct kvm_vcpu
*vcpu
,
2633 struct mcck_volatile_info
*mcck_info
)
2635 struct kvm_s390_interrupt_info inti
;
2636 struct kvm_s390_irq irq
;
2637 struct kvm_s390_mchk_info
*mchk
;
2639 __u64 cr14
= 0; /* upper bits are not used */
2642 mci
.val
= mcck_info
->mcic
;
2644 cr14
|= CR14_RECOVERY_SUBMASK
;
2646 cr14
|= CR14_DEGRADATION_SUBMASK
;
2648 cr14
|= CR14_WARNING_SUBMASK
;
2650 mchk
= mci
.ck
? &inti
.mchk
: &irq
.u
.mchk
;
2652 mchk
->mcic
= mcck_info
->mcic
;
2653 mchk
->ext_damage_code
= mcck_info
->ext_damage_code
;
2654 mchk
->failing_storage_address
= mcck_info
->failing_storage_address
;
2656 /* Inject the floating machine check */
2657 inti
.type
= KVM_S390_MCHK
;
2658 rc
= __inject_vm(vcpu
->kvm
, &inti
);
2660 /* Inject the machine check to specified vcpu */
2661 irq
.type
= KVM_S390_MCHK
;
2662 rc
= kvm_s390_inject_vcpu(vcpu
, &irq
);
2667 int kvm_set_routing_entry(struct kvm
*kvm
,
2668 struct kvm_kernel_irq_routing_entry
*e
,
2669 const struct kvm_irq_routing_entry
*ue
)
2674 case KVM_IRQ_ROUTING_S390_ADAPTER
:
2675 e
->set
= set_adapter_int
;
2676 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
2677 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
2678 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
2679 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
2680 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
2690 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
2691 int irq_source_id
, int level
, bool line_status
)
2696 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
, void __user
*irqstate
, int len
)
2698 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2699 struct kvm_s390_irq
*buf
;
2707 if (copy_from_user((void *) buf
, irqstate
, len
)) {
2713 * Don't allow setting the interrupt state
2714 * when there are already interrupts pending
2716 spin_lock(&li
->lock
);
2717 if (li
->pending_irqs
) {
2722 for (n
= 0; n
< len
/ sizeof(*buf
); n
++) {
2723 r
= do_inject_vcpu(vcpu
, &buf
[n
]);
2729 spin_unlock(&li
->lock
);
2736 static void store_local_irq(struct kvm_s390_local_interrupt
*li
,
2737 struct kvm_s390_irq
*irq
,
2738 unsigned long irq_type
)
2741 case IRQ_PEND_MCHK_EX
:
2742 case IRQ_PEND_MCHK_REP
:
2743 irq
->type
= KVM_S390_MCHK
;
2744 irq
->u
.mchk
= li
->irq
.mchk
;
2747 irq
->type
= KVM_S390_PROGRAM_INT
;
2748 irq
->u
.pgm
= li
->irq
.pgm
;
2750 case IRQ_PEND_PFAULT_INIT
:
2751 irq
->type
= KVM_S390_INT_PFAULT_INIT
;
2752 irq
->u
.ext
= li
->irq
.ext
;
2754 case IRQ_PEND_EXT_EXTERNAL
:
2755 irq
->type
= KVM_S390_INT_EXTERNAL_CALL
;
2756 irq
->u
.extcall
= li
->irq
.extcall
;
2758 case IRQ_PEND_EXT_CLOCK_COMP
:
2759 irq
->type
= KVM_S390_INT_CLOCK_COMP
;
2761 case IRQ_PEND_EXT_CPU_TIMER
:
2762 irq
->type
= KVM_S390_INT_CPU_TIMER
;
2764 case IRQ_PEND_SIGP_STOP
:
2765 irq
->type
= KVM_S390_SIGP_STOP
;
2766 irq
->u
.stop
= li
->irq
.stop
;
2768 case IRQ_PEND_RESTART
:
2769 irq
->type
= KVM_S390_RESTART
;
2771 case IRQ_PEND_SET_PREFIX
:
2772 irq
->type
= KVM_S390_SIGP_SET_PREFIX
;
2773 irq
->u
.prefix
= li
->irq
.prefix
;
2778 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
, __u8 __user
*buf
, int len
)
2781 unsigned long sigp_emerg_pending
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
2782 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2783 unsigned long pending_irqs
;
2784 struct kvm_s390_irq irq
;
2785 unsigned long irq_type
;
2789 spin_lock(&li
->lock
);
2790 pending_irqs
= li
->pending_irqs
;
2791 memcpy(&sigp_emerg_pending
, &li
->sigp_emerg_pending
,
2792 sizeof(sigp_emerg_pending
));
2793 spin_unlock(&li
->lock
);
2795 for_each_set_bit(irq_type
, &pending_irqs
, IRQ_PEND_COUNT
) {
2796 memset(&irq
, 0, sizeof(irq
));
2797 if (irq_type
== IRQ_PEND_EXT_EMERGENCY
)
2799 if (n
+ sizeof(irq
) > len
)
2801 store_local_irq(&vcpu
->arch
.local_int
, &irq
, irq_type
);
2802 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2807 if (test_bit(IRQ_PEND_EXT_EMERGENCY
, &pending_irqs
)) {
2808 for_each_set_bit(cpuaddr
, sigp_emerg_pending
, KVM_MAX_VCPUS
) {
2809 memset(&irq
, 0, sizeof(irq
));
2810 if (n
+ sizeof(irq
) > len
)
2812 irq
.type
= KVM_S390_INT_EMERGENCY
;
2813 irq
.u
.emerg
.code
= cpuaddr
;
2814 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2820 if (sca_ext_call_pending(vcpu
, &scn
)) {
2821 if (n
+ sizeof(irq
) > len
)
2823 memset(&irq
, 0, sizeof(irq
));
2824 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
2825 irq
.u
.extcall
.code
= scn
;
2826 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2834 void kvm_s390_gisa_clear(struct kvm
*kvm
)
2836 if (kvm
->arch
.gisa
) {
2837 memset(kvm
->arch
.gisa
, 0, sizeof(struct kvm_s390_gisa
));
2838 kvm
->arch
.gisa
->next_alert
= (u32
)(u64
)kvm
->arch
.gisa
;
2839 VM_EVENT(kvm
, 3, "gisa 0x%pK cleared", kvm
->arch
.gisa
);
2843 void kvm_s390_gisa_init(struct kvm
*kvm
)
2845 if (css_general_characteristics
.aiv
) {
2846 kvm
->arch
.gisa
= &kvm
->arch
.sie_page2
->gisa
;
2847 VM_EVENT(kvm
, 3, "gisa 0x%pK initialized", kvm
->arch
.gisa
);
2848 kvm_s390_gisa_clear(kvm
);
2852 void kvm_s390_gisa_destroy(struct kvm
*kvm
)
2854 if (!kvm
->arch
.gisa
)
2856 kvm
->arch
.gisa
= NULL
;