2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008, 2015
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <linux/vmalloc.h>
21 #include <asm/asm-offsets.h>
23 #include <asm/uaccess.h>
29 #include "trace-s390.h"
31 #define IOINT_SCHID_MASK 0x0000ffff
32 #define IOINT_SSID_MASK 0x00030000
33 #define IOINT_CSSID_MASK 0x03fc0000
34 #define PFAULT_INIT 0x0600
35 #define PFAULT_DONE 0x0680
36 #define VIRTIO_PARAM 0x0d00
38 /* handle external calls via sigp interpretation facility */
39 static int sca_ext_call_pending(struct kvm_vcpu
*vcpu
, int *src_id
)
43 if (!(atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_ECALL_PEND
))
46 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
47 if (vcpu
->kvm
->arch
.use_esca
) {
48 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
49 union esca_sigp_ctrl sigp_ctrl
=
50 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
55 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
56 union bsca_sigp_ctrl sigp_ctrl
=
57 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
62 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
70 static int sca_inject_ext_call(struct kvm_vcpu
*vcpu
, int src_id
)
74 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
75 if (vcpu
->kvm
->arch
.use_esca
) {
76 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
77 union esca_sigp_ctrl
*sigp_ctrl
=
78 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
79 union esca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
85 expect
= old_val
.value
;
86 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
88 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
89 union bsca_sigp_ctrl
*sigp_ctrl
=
90 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
91 union bsca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
97 expect
= old_val
.value
;
98 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
100 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
103 /* another external call is pending */
106 atomic_or(CPUSTAT_ECALL_PEND
, &vcpu
->arch
.sie_block
->cpuflags
);
110 static void sca_clear_ext_call(struct kvm_vcpu
*vcpu
)
112 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
115 atomic_andnot(CPUSTAT_ECALL_PEND
, li
->cpuflags
);
116 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
117 if (vcpu
->kvm
->arch
.use_esca
) {
118 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
119 union esca_sigp_ctrl
*sigp_ctrl
=
120 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
121 union esca_sigp_ctrl old
= *sigp_ctrl
;
124 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
126 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
127 union bsca_sigp_ctrl
*sigp_ctrl
=
128 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
129 union bsca_sigp_ctrl old
= *sigp_ctrl
;
132 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
134 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
135 WARN_ON(rc
!= expect
); /* cannot clear? */
138 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
140 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
143 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
145 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
148 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
150 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
153 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
155 return psw_extint_disabled(vcpu
) &&
156 psw_ioint_disabled(vcpu
) &&
157 psw_mchk_disabled(vcpu
);
160 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
162 if (psw_extint_disabled(vcpu
) ||
163 !(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
165 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
166 /* No timer interrupts when single stepping */
171 static int ckc_irq_pending(struct kvm_vcpu
*vcpu
)
173 if (vcpu
->arch
.sie_block
->ckc
>= kvm_s390_get_tod_clock_fast(vcpu
->kvm
))
175 return ckc_interrupts_enabled(vcpu
);
178 static int cpu_timer_interrupts_enabled(struct kvm_vcpu
*vcpu
)
180 return !psw_extint_disabled(vcpu
) &&
181 (vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
);
184 static int cpu_timer_irq_pending(struct kvm_vcpu
*vcpu
)
186 if (!cpu_timer_interrupts_enabled(vcpu
))
188 return kvm_s390_get_cpu_timer(vcpu
) >> 63;
191 static inline int is_ioirq(unsigned long irq_type
)
193 return ((irq_type
>= IRQ_PEND_IO_ISC_0
) &&
194 (irq_type
<= IRQ_PEND_IO_ISC_7
));
197 static uint64_t isc_to_isc_bits(int isc
)
199 return (0x80 >> isc
) << 24;
202 static inline u8
int_word_to_isc(u32 int_word
)
204 return (int_word
& 0x38000000) >> 27;
207 static inline unsigned long pending_irqs(struct kvm_vcpu
*vcpu
)
209 return vcpu
->kvm
->arch
.float_int
.pending_irqs
|
210 vcpu
->arch
.local_int
.pending_irqs
;
213 static unsigned long disable_iscs(struct kvm_vcpu
*vcpu
,
214 unsigned long active_mask
)
218 for (i
= 0; i
<= MAX_ISC
; i
++)
219 if (!(vcpu
->arch
.sie_block
->gcr
[6] & isc_to_isc_bits(i
)))
220 active_mask
&= ~(1UL << (IRQ_PEND_IO_ISC_0
+ i
));
225 static unsigned long deliverable_irqs(struct kvm_vcpu
*vcpu
)
227 unsigned long active_mask
;
229 active_mask
= pending_irqs(vcpu
);
233 if (psw_extint_disabled(vcpu
))
234 active_mask
&= ~IRQ_PEND_EXT_MASK
;
235 if (psw_ioint_disabled(vcpu
))
236 active_mask
&= ~IRQ_PEND_IO_MASK
;
238 active_mask
= disable_iscs(vcpu
, active_mask
);
239 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
240 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
241 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
))
242 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
243 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
244 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
245 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
))
246 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
247 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
248 __clear_bit(IRQ_PEND_EXT_SERVICE
, &active_mask
);
249 if (psw_mchk_disabled(vcpu
))
250 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
251 if (!(vcpu
->arch
.sie_block
->gcr
[14] &
252 vcpu
->kvm
->arch
.float_int
.mchk
.cr14
))
253 __clear_bit(IRQ_PEND_MCHK_REP
, &active_mask
);
256 * STOP irqs will never be actively delivered. They are triggered via
257 * intercept requests and cleared when the stop intercept is performed.
259 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
264 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
266 atomic_or(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
267 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
270 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
272 atomic_andnot(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
273 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
276 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
278 atomic_andnot(CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
279 &vcpu
->arch
.sie_block
->cpuflags
);
280 vcpu
->arch
.sie_block
->lctl
= 0x0000;
281 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
283 if (guestdbg_enabled(vcpu
)) {
284 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
285 LCTL_CR10
| LCTL_CR11
);
286 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
290 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
292 atomic_or(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
295 static void set_intercept_indicators_io(struct kvm_vcpu
*vcpu
)
297 if (!(pending_irqs(vcpu
) & IRQ_PEND_IO_MASK
))
299 else if (psw_ioint_disabled(vcpu
))
300 __set_cpuflag(vcpu
, CPUSTAT_IO_INT
);
302 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
305 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
307 if (!(pending_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
309 if (psw_extint_disabled(vcpu
))
310 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
312 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
315 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
317 if (!(pending_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
319 if (psw_mchk_disabled(vcpu
))
320 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
322 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
325 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
327 if (kvm_s390_is_stop_irq_pending(vcpu
))
328 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
331 /* Set interception request for non-deliverable interrupts */
332 static void set_intercept_indicators(struct kvm_vcpu
*vcpu
)
334 set_intercept_indicators_io(vcpu
);
335 set_intercept_indicators_ext(vcpu
);
336 set_intercept_indicators_mchk(vcpu
);
337 set_intercept_indicators_stop(vcpu
);
340 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
342 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
345 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
348 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
349 (u16
*)__LC_EXT_INT_CODE
);
350 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
351 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
352 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
353 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
354 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
355 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
356 return rc
? -EFAULT
: 0;
359 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
361 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
364 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
367 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
368 (u16 __user
*)__LC_EXT_INT_CODE
);
369 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
370 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
371 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
372 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
373 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
374 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
375 return rc
? -EFAULT
: 0;
378 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
380 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
381 struct kvm_s390_ext_info ext
;
384 spin_lock(&li
->lock
);
386 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
387 li
->irq
.ext
.ext_params2
= 0;
388 spin_unlock(&li
->lock
);
390 VCPU_EVENT(vcpu
, 4, "deliver: pfault init token 0x%llx",
392 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
393 KVM_S390_INT_PFAULT_INIT
,
396 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
397 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
398 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
399 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
400 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
401 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
402 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
403 return rc
? -EFAULT
: 0;
406 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
408 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
409 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
410 struct kvm_s390_mchk_info mchk
= {};
411 unsigned long adtl_status_addr
;
415 spin_lock(&fi
->lock
);
416 spin_lock(&li
->lock
);
417 if (test_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
) ||
418 test_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
)) {
420 * If there was an exigent machine check pending, then any
421 * repressible machine checks that might have been pending
422 * are indicated along with it, so always clear bits for
423 * repressible and exigent interrupts
426 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
427 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
428 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
432 * We indicate floating repressible conditions along with
433 * other pending conditions. Channel Report Pending and Channel
434 * Subsystem damage are the only two and and are indicated by
435 * bits in mcic and masked in cr14.
437 if (test_and_clear_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
438 mchk
.mcic
|= fi
->mchk
.mcic
;
439 mchk
.cr14
|= fi
->mchk
.cr14
;
440 memset(&fi
->mchk
, 0, sizeof(mchk
));
443 spin_unlock(&li
->lock
);
444 spin_unlock(&fi
->lock
);
447 VCPU_EVENT(vcpu
, 3, "deliver: machine check mcic 0x%llx",
449 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
451 mchk
.cr14
, mchk
.mcic
);
453 rc
= kvm_s390_vcpu_store_status(vcpu
,
454 KVM_S390_STORE_STATUS_PREFIXED
);
455 rc
|= read_guest_lc(vcpu
, __LC_VX_SAVE_AREA_ADDR
,
457 sizeof(unsigned long));
458 rc
|= kvm_s390_vcpu_store_adtl_status(vcpu
,
460 rc
|= put_guest_lc(vcpu
, mchk
.mcic
,
461 (u64 __user
*) __LC_MCCK_CODE
);
462 rc
|= put_guest_lc(vcpu
, mchk
.failing_storage_address
,
463 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
464 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
,
466 sizeof(mchk
.fixed_logout
));
467 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
468 &vcpu
->arch
.sie_block
->gpsw
,
470 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
471 &vcpu
->arch
.sie_block
->gpsw
,
474 return rc
? -EFAULT
: 0;
477 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
479 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
482 VCPU_EVENT(vcpu
, 3, "%s", "deliver: cpu restart");
483 vcpu
->stat
.deliver_restart_signal
++;
484 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
486 rc
= write_guest_lc(vcpu
,
487 offsetof(struct lowcore
, restart_old_psw
),
488 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
489 rc
|= read_guest_lc(vcpu
, offsetof(struct lowcore
, restart_psw
),
490 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
491 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
492 return rc
? -EFAULT
: 0;
495 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
497 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
498 struct kvm_s390_prefix_info prefix
;
500 spin_lock(&li
->lock
);
501 prefix
= li
->irq
.prefix
;
502 li
->irq
.prefix
.address
= 0;
503 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
504 spin_unlock(&li
->lock
);
506 vcpu
->stat
.deliver_prefix_signal
++;
507 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
508 KVM_S390_SIGP_SET_PREFIX
,
511 kvm_s390_set_prefix(vcpu
, prefix
.address
);
515 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
517 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
521 spin_lock(&li
->lock
);
522 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
523 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
524 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
525 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
526 spin_unlock(&li
->lock
);
528 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp emerg");
529 vcpu
->stat
.deliver_emergency_signal
++;
530 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
533 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
534 (u16
*)__LC_EXT_INT_CODE
);
535 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
536 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
537 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
538 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
539 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
540 return rc
? -EFAULT
: 0;
543 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
545 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
546 struct kvm_s390_extcall_info extcall
;
549 spin_lock(&li
->lock
);
550 extcall
= li
->irq
.extcall
;
551 li
->irq
.extcall
.code
= 0;
552 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
553 spin_unlock(&li
->lock
);
555 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp ext call");
556 vcpu
->stat
.deliver_external_call
++;
557 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
558 KVM_S390_INT_EXTERNAL_CALL
,
561 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
562 (u16
*)__LC_EXT_INT_CODE
);
563 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
564 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
565 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
566 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
568 return rc
? -EFAULT
: 0;
571 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
573 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
574 struct kvm_s390_pgm_info pgm_info
;
575 int rc
= 0, nullifying
= false;
578 spin_lock(&li
->lock
);
579 pgm_info
= li
->irq
.pgm
;
580 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
581 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
582 spin_unlock(&li
->lock
);
584 ilen
= pgm_info
.flags
& KVM_S390_PGM_FLAGS_ILC_MASK
;
585 VCPU_EVENT(vcpu
, 3, "deliver: program irq code 0x%x, ilen:%d",
586 pgm_info
.code
, ilen
);
587 vcpu
->stat
.deliver_program_int
++;
588 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
591 switch (pgm_info
.code
& ~PGM_PER
) {
592 case PGM_AFX_TRANSLATION
:
593 case PGM_ASX_TRANSLATION
:
594 case PGM_EX_TRANSLATION
:
595 case PGM_LFX_TRANSLATION
:
596 case PGM_LSTE_SEQUENCE
:
597 case PGM_LSX_TRANSLATION
:
598 case PGM_LX_TRANSLATION
:
599 case PGM_PRIMARY_AUTHORITY
:
600 case PGM_SECONDARY_AUTHORITY
:
603 case PGM_SPACE_SWITCH
:
604 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
605 (u64
*)__LC_TRANS_EXC_CODE
);
607 case PGM_ALEN_TRANSLATION
:
608 case PGM_ALE_SEQUENCE
:
609 case PGM_ASTE_INSTANCE
:
610 case PGM_ASTE_SEQUENCE
:
611 case PGM_ASTE_VALIDITY
:
612 case PGM_EXTENDED_AUTHORITY
:
613 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
614 (u8
*)__LC_EXC_ACCESS_ID
);
618 case PGM_PAGE_TRANSLATION
:
619 case PGM_REGION_FIRST_TRANS
:
620 case PGM_REGION_SECOND_TRANS
:
621 case PGM_REGION_THIRD_TRANS
:
622 case PGM_SEGMENT_TRANSLATION
:
623 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
624 (u64
*)__LC_TRANS_EXC_CODE
);
625 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
626 (u8
*)__LC_EXC_ACCESS_ID
);
627 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
628 (u8
*)__LC_OP_ACCESS_ID
);
632 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
633 (u16
*)__LC_MON_CLASS_NR
);
634 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
635 (u64
*)__LC_MON_CODE
);
637 case PGM_VECTOR_PROCESSING
:
639 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
640 (u32
*)__LC_DATA_EXC_CODE
);
643 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
644 (u64
*)__LC_TRANS_EXC_CODE
);
645 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
646 (u8
*)__LC_EXC_ACCESS_ID
);
649 case PGM_STACK_EMPTY
:
650 case PGM_STACK_SPECIFICATION
:
652 case PGM_STACK_OPERATION
:
653 case PGM_TRACE_TABEL
:
654 case PGM_CRYPTO_OPERATION
:
659 if (pgm_info
.code
& PGM_PER
) {
660 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
661 (u8
*) __LC_PER_CODE
);
662 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
663 (u8
*)__LC_PER_ATMID
);
664 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
665 (u64
*) __LC_PER_ADDRESS
);
666 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
667 (u8
*) __LC_PER_ACCESS_ID
);
670 if (nullifying
&& !(pgm_info
.flags
& KVM_S390_PGM_FLAGS_NO_REWIND
))
671 kvm_s390_rewind_psw(vcpu
, ilen
);
673 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
674 rc
|= put_guest_lc(vcpu
, ilen
, (u16
*) __LC_PGM_ILC
);
675 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->gbea
,
676 (u64
*) __LC_LAST_BREAK
);
677 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
678 (u16
*)__LC_PGM_INT_CODE
);
679 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
680 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
681 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
682 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
683 return rc
? -EFAULT
: 0;
686 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
)
688 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
689 struct kvm_s390_ext_info ext
;
692 spin_lock(&fi
->lock
);
693 if (!(test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
))) {
694 spin_unlock(&fi
->lock
);
697 ext
= fi
->srv_signal
;
698 memset(&fi
->srv_signal
, 0, sizeof(ext
));
699 clear_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
700 spin_unlock(&fi
->lock
);
702 VCPU_EVENT(vcpu
, 4, "deliver: sclp parameter 0x%x",
704 vcpu
->stat
.deliver_service_signal
++;
705 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
708 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
709 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
710 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
711 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
712 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
713 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
714 rc
|= put_guest_lc(vcpu
, ext
.ext_params
,
715 (u32
*)__LC_EXT_PARAMS
);
717 return rc
? -EFAULT
: 0;
720 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
)
722 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
723 struct kvm_s390_interrupt_info
*inti
;
726 spin_lock(&fi
->lock
);
727 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_PFAULT
],
728 struct kvm_s390_interrupt_info
,
731 list_del(&inti
->list
);
732 fi
->counters
[FIRQ_CNTR_PFAULT
] -= 1;
734 if (list_empty(&fi
->lists
[FIRQ_LIST_PFAULT
]))
735 clear_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
736 spin_unlock(&fi
->lock
);
739 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
740 KVM_S390_INT_PFAULT_DONE
, 0,
741 inti
->ext
.ext_params2
);
742 VCPU_EVENT(vcpu
, 4, "deliver: pfault done token 0x%llx",
743 inti
->ext
.ext_params2
);
745 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
746 (u16
*)__LC_EXT_INT_CODE
);
747 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
,
748 (u16
*)__LC_EXT_CPU_ADDR
);
749 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
750 &vcpu
->arch
.sie_block
->gpsw
,
752 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
753 &vcpu
->arch
.sie_block
->gpsw
,
755 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
756 (u64
*)__LC_EXT_PARAMS2
);
759 return rc
? -EFAULT
: 0;
762 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
)
764 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
765 struct kvm_s390_interrupt_info
*inti
;
768 spin_lock(&fi
->lock
);
769 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_VIRTIO
],
770 struct kvm_s390_interrupt_info
,
774 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
775 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
776 vcpu
->stat
.deliver_virtio_interrupt
++;
777 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
779 inti
->ext
.ext_params
,
780 inti
->ext
.ext_params2
);
781 list_del(&inti
->list
);
782 fi
->counters
[FIRQ_CNTR_VIRTIO
] -= 1;
784 if (list_empty(&fi
->lists
[FIRQ_LIST_VIRTIO
]))
785 clear_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
786 spin_unlock(&fi
->lock
);
789 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
790 (u16
*)__LC_EXT_INT_CODE
);
791 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
,
792 (u16
*)__LC_EXT_CPU_ADDR
);
793 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
794 &vcpu
->arch
.sie_block
->gpsw
,
796 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
797 &vcpu
->arch
.sie_block
->gpsw
,
799 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
800 (u32
*)__LC_EXT_PARAMS
);
801 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
802 (u64
*)__LC_EXT_PARAMS2
);
805 return rc
? -EFAULT
: 0;
808 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
809 unsigned long irq_type
)
811 struct list_head
*isc_list
;
812 struct kvm_s390_float_interrupt
*fi
;
813 struct kvm_s390_interrupt_info
*inti
= NULL
;
816 fi
= &vcpu
->kvm
->arch
.float_int
;
818 spin_lock(&fi
->lock
);
819 isc_list
= &fi
->lists
[irq_type
- IRQ_PEND_IO_ISC_0
];
820 inti
= list_first_entry_or_null(isc_list
,
821 struct kvm_s390_interrupt_info
,
824 VCPU_EVENT(vcpu
, 4, "deliver: I/O 0x%llx", inti
->type
);
825 vcpu
->stat
.deliver_io_int
++;
826 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
828 ((__u32
)inti
->io
.subchannel_id
<< 16) |
829 inti
->io
.subchannel_nr
,
830 ((__u64
)inti
->io
.io_int_parm
<< 32) |
831 inti
->io
.io_int_word
);
832 list_del(&inti
->list
);
833 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
835 if (list_empty(isc_list
))
836 clear_bit(irq_type
, &fi
->pending_irqs
);
837 spin_unlock(&fi
->lock
);
840 rc
= put_guest_lc(vcpu
, inti
->io
.subchannel_id
,
841 (u16
*)__LC_SUBCHANNEL_ID
);
842 rc
|= put_guest_lc(vcpu
, inti
->io
.subchannel_nr
,
843 (u16
*)__LC_SUBCHANNEL_NR
);
844 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_parm
,
845 (u32
*)__LC_IO_INT_PARM
);
846 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_word
,
847 (u32
*)__LC_IO_INT_WORD
);
848 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
849 &vcpu
->arch
.sie_block
->gpsw
,
851 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
852 &vcpu
->arch
.sie_block
->gpsw
,
857 return rc
? -EFAULT
: 0;
860 typedef int (*deliver_irq_t
)(struct kvm_vcpu
*vcpu
);
862 static const deliver_irq_t deliver_irq_funcs
[] = {
863 [IRQ_PEND_MCHK_EX
] = __deliver_machine_check
,
864 [IRQ_PEND_MCHK_REP
] = __deliver_machine_check
,
865 [IRQ_PEND_PROG
] = __deliver_prog
,
866 [IRQ_PEND_EXT_EMERGENCY
] = __deliver_emergency_signal
,
867 [IRQ_PEND_EXT_EXTERNAL
] = __deliver_external_call
,
868 [IRQ_PEND_EXT_CLOCK_COMP
] = __deliver_ckc
,
869 [IRQ_PEND_EXT_CPU_TIMER
] = __deliver_cpu_timer
,
870 [IRQ_PEND_RESTART
] = __deliver_restart
,
871 [IRQ_PEND_SET_PREFIX
] = __deliver_set_prefix
,
872 [IRQ_PEND_PFAULT_INIT
] = __deliver_pfault_init
,
873 [IRQ_PEND_EXT_SERVICE
] = __deliver_service
,
874 [IRQ_PEND_PFAULT_DONE
] = __deliver_pfault_done
,
875 [IRQ_PEND_VIRTIO
] = __deliver_virtio
,
878 /* Check whether an external call is pending (deliverable or not) */
879 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
881 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
883 if (!sclp
.has_sigpif
)
884 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
886 return sca_ext_call_pending(vcpu
, NULL
);
889 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
891 if (deliverable_irqs(vcpu
))
894 if (kvm_cpu_has_pending_timer(vcpu
))
897 /* external call pending and deliverable */
898 if (kvm_s390_ext_call_pending(vcpu
) &&
899 !psw_extint_disabled(vcpu
) &&
900 (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
903 if (!exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
908 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
910 return ckc_irq_pending(vcpu
) || cpu_timer_irq_pending(vcpu
);
913 static u64
__calculate_sltime(struct kvm_vcpu
*vcpu
)
915 u64 now
, cputm
, sltime
= 0;
917 if (ckc_interrupts_enabled(vcpu
)) {
918 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
919 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
920 /* already expired or overflow? */
921 if (!sltime
|| vcpu
->arch
.sie_block
->ckc
<= now
)
923 if (cpu_timer_interrupts_enabled(vcpu
)) {
924 cputm
= kvm_s390_get_cpu_timer(vcpu
);
925 /* already expired? */
928 return min(sltime
, tod_to_ns(cputm
));
930 } else if (cpu_timer_interrupts_enabled(vcpu
)) {
931 sltime
= kvm_s390_get_cpu_timer(vcpu
);
932 /* already expired? */
939 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
943 vcpu
->stat
.exit_wait_state
++;
946 if (kvm_arch_vcpu_runnable(vcpu
))
949 if (psw_interrupts_disabled(vcpu
)) {
950 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
951 return -EOPNOTSUPP
; /* disabled wait */
954 if (!ckc_interrupts_enabled(vcpu
) &&
955 !cpu_timer_interrupts_enabled(vcpu
)) {
956 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
957 __set_cpu_idle(vcpu
);
961 sltime
= __calculate_sltime(vcpu
);
965 __set_cpu_idle(vcpu
);
966 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
967 VCPU_EVENT(vcpu
, 4, "enabled wait: %llu ns", sltime
);
969 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
970 kvm_vcpu_block(vcpu
);
971 __unset_cpu_idle(vcpu
);
972 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
974 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
978 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
980 if (swait_active(&vcpu
->wq
)) {
982 * The vcpu gave up the cpu voluntarily, mark it as a good
985 vcpu
->preempted
= true;
987 vcpu
->stat
.halt_wakeup
++;
991 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
993 struct kvm_vcpu
*vcpu
;
996 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
997 sltime
= __calculate_sltime(vcpu
);
1000 * If the monotonic clock runs faster than the tod clock we might be
1001 * woken up too early and have to go back to sleep to avoid deadlocks.
1003 if (sltime
&& hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
1004 return HRTIMER_RESTART
;
1005 kvm_s390_vcpu_wakeup(vcpu
);
1006 return HRTIMER_NORESTART
;
1009 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
1011 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1013 spin_lock(&li
->lock
);
1014 li
->pending_irqs
= 0;
1015 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
1016 memset(&li
->irq
, 0, sizeof(li
->irq
));
1017 spin_unlock(&li
->lock
);
1019 sca_clear_ext_call(vcpu
);
1022 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
1024 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1027 unsigned long irq_type
;
1030 __reset_intercept_indicators(vcpu
);
1032 /* pending ckc conditions might have been invalidated */
1033 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1034 if (ckc_irq_pending(vcpu
))
1035 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1037 /* pending cpu timer conditions might have been invalidated */
1038 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1039 if (cpu_timer_irq_pending(vcpu
))
1040 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1042 while ((irqs
= deliverable_irqs(vcpu
)) && !rc
) {
1043 /* bits are in the order of interrupt priority */
1044 irq_type
= find_first_bit(&irqs
, IRQ_PEND_COUNT
);
1045 if (is_ioirq(irq_type
)) {
1046 rc
= __deliver_io(vcpu
, irq_type
);
1048 func
= deliver_irq_funcs
[irq_type
];
1050 WARN_ON_ONCE(func
== NULL
);
1051 clear_bit(irq_type
, &li
->pending_irqs
);
1058 set_intercept_indicators(vcpu
);
1063 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1065 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1067 VCPU_EVENT(vcpu
, 3, "inject: program irq code 0x%x", irq
->u
.pgm
.code
);
1068 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
1069 irq
->u
.pgm
.code
, 0);
1071 if (!(irq
->u
.pgm
.flags
& KVM_S390_PGM_FLAGS_ILC_VALID
)) {
1072 /* auto detection if no valid ILC was given */
1073 irq
->u
.pgm
.flags
&= ~KVM_S390_PGM_FLAGS_ILC_MASK
;
1074 irq
->u
.pgm
.flags
|= kvm_s390_get_ilen(vcpu
);
1075 irq
->u
.pgm
.flags
|= KVM_S390_PGM_FLAGS_ILC_VALID
;
1078 if (irq
->u
.pgm
.code
== PGM_PER
) {
1079 li
->irq
.pgm
.code
|= PGM_PER
;
1080 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1081 /* only modify PER related information */
1082 li
->irq
.pgm
.per_address
= irq
->u
.pgm
.per_address
;
1083 li
->irq
.pgm
.per_code
= irq
->u
.pgm
.per_code
;
1084 li
->irq
.pgm
.per_atmid
= irq
->u
.pgm
.per_atmid
;
1085 li
->irq
.pgm
.per_access_id
= irq
->u
.pgm
.per_access_id
;
1086 } else if (!(irq
->u
.pgm
.code
& PGM_PER
)) {
1087 li
->irq
.pgm
.code
= (li
->irq
.pgm
.code
& PGM_PER
) |
1089 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1090 /* only modify non-PER information */
1091 li
->irq
.pgm
.trans_exc_code
= irq
->u
.pgm
.trans_exc_code
;
1092 li
->irq
.pgm
.mon_code
= irq
->u
.pgm
.mon_code
;
1093 li
->irq
.pgm
.data_exc_code
= irq
->u
.pgm
.data_exc_code
;
1094 li
->irq
.pgm
.mon_class_nr
= irq
->u
.pgm
.mon_class_nr
;
1095 li
->irq
.pgm
.exc_access_id
= irq
->u
.pgm
.exc_access_id
;
1096 li
->irq
.pgm
.op_access_id
= irq
->u
.pgm
.op_access_id
;
1098 li
->irq
.pgm
= irq
->u
.pgm
;
1100 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
1104 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1106 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1108 VCPU_EVENT(vcpu
, 4, "inject: pfault init parameter block at 0x%llx",
1109 irq
->u
.ext
.ext_params2
);
1110 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
1111 irq
->u
.ext
.ext_params
,
1112 irq
->u
.ext
.ext_params2
);
1114 li
->irq
.ext
= irq
->u
.ext
;
1115 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1116 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1120 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1122 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1123 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1124 uint16_t src_id
= irq
->u
.extcall
.code
;
1126 VCPU_EVENT(vcpu
, 4, "inject: external call source-cpu:%u",
1128 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1131 /* sending vcpu invalid */
1132 if (kvm_get_vcpu_by_id(vcpu
->kvm
, src_id
) == NULL
)
1135 if (sclp
.has_sigpif
)
1136 return sca_inject_ext_call(vcpu
, src_id
);
1138 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1140 *extcall
= irq
->u
.extcall
;
1141 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1145 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1147 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1148 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1150 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x",
1151 irq
->u
.prefix
.address
);
1152 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1153 irq
->u
.prefix
.address
, 0);
1155 if (!is_vcpu_stopped(vcpu
))
1158 *prefix
= irq
->u
.prefix
;
1159 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1163 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1164 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1166 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1167 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1170 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0);
1172 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1175 if (is_vcpu_stopped(vcpu
)) {
1176 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1177 rc
= kvm_s390_store_status_unloaded(vcpu
,
1178 KVM_S390_STORE_STATUS_NOADDR
);
1182 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1184 stop
->flags
= irq
->u
.stop
.flags
;
1185 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
1189 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1190 struct kvm_s390_irq
*irq
)
1192 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1194 VCPU_EVENT(vcpu
, 3, "%s", "inject: restart int");
1195 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
1197 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1201 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1202 struct kvm_s390_irq
*irq
)
1204 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1206 VCPU_EVENT(vcpu
, 4, "inject: emergency from cpu %u",
1208 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1209 irq
->u
.emerg
.code
, 0);
1211 /* sending vcpu invalid */
1212 if (kvm_get_vcpu_by_id(vcpu
->kvm
, irq
->u
.emerg
.code
) == NULL
)
1215 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1216 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1217 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1221 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1223 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1224 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1226 VCPU_EVENT(vcpu
, 3, "inject: machine check mcic 0x%llx",
1228 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1232 * Because repressible machine checks can be indicated along with
1233 * exigent machine checks (PoP, Chapter 11, Interruption action)
1234 * we need to combine cr14, mcic and external damage code.
1235 * Failing storage address and the logout area should not be or'ed
1236 * together, we just indicate the last occurrence of the corresponding
1239 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1240 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1241 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1242 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1243 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1244 sizeof(mchk
->fixed_logout
));
1245 if (mchk
->mcic
& MCHK_EX_MASK
)
1246 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1247 else if (mchk
->mcic
& MCHK_REP_MASK
)
1248 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1252 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1254 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1256 VCPU_EVENT(vcpu
, 3, "%s", "inject: clock comparator external");
1257 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1260 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1261 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1265 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1267 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1269 VCPU_EVENT(vcpu
, 3, "%s", "inject: cpu timer external");
1270 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1273 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1274 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1278 static struct kvm_s390_interrupt_info
*get_io_int(struct kvm
*kvm
,
1281 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1282 struct list_head
*isc_list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1283 struct kvm_s390_interrupt_info
*iter
;
1284 u16 id
= (schid
& 0xffff0000U
) >> 16;
1285 u16 nr
= schid
& 0x0000ffffU
;
1287 spin_lock(&fi
->lock
);
1288 list_for_each_entry(iter
, isc_list
, list
) {
1289 if (schid
&& (id
!= iter
->io
.subchannel_id
||
1290 nr
!= iter
->io
.subchannel_nr
))
1292 /* found an appropriate entry */
1293 list_del_init(&iter
->list
);
1294 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1295 if (list_empty(isc_list
))
1296 clear_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1297 spin_unlock(&fi
->lock
);
1300 spin_unlock(&fi
->lock
);
1305 * Dequeue and return an I/O interrupt matching any of the interruption
1306 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1308 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1309 u64 isc_mask
, u32 schid
)
1311 struct kvm_s390_interrupt_info
*inti
= NULL
;
1314 for (isc
= 0; isc
<= MAX_ISC
&& !inti
; isc
++) {
1315 if (isc_mask
& isc_to_isc_bits(isc
))
1316 inti
= get_io_int(kvm
, isc
, schid
);
1321 #define SCCB_MASK 0xFFFFFFF8
1322 #define SCCB_EVENT_PENDING 0x3
1324 static int __inject_service(struct kvm
*kvm
,
1325 struct kvm_s390_interrupt_info
*inti
)
1327 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1329 spin_lock(&fi
->lock
);
1330 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_EVENT_PENDING
;
1332 * Early versions of the QEMU s390 bios will inject several
1333 * service interrupts after another without handling a
1334 * condition code indicating busy.
1335 * We will silently ignore those superfluous sccb values.
1336 * A future version of QEMU will take care of serialization
1339 if (fi
->srv_signal
.ext_params
& SCCB_MASK
)
1341 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_MASK
;
1342 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1344 spin_unlock(&fi
->lock
);
1349 static int __inject_virtio(struct kvm
*kvm
,
1350 struct kvm_s390_interrupt_info
*inti
)
1352 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1354 spin_lock(&fi
->lock
);
1355 if (fi
->counters
[FIRQ_CNTR_VIRTIO
] >= KVM_S390_MAX_VIRTIO_IRQS
) {
1356 spin_unlock(&fi
->lock
);
1359 fi
->counters
[FIRQ_CNTR_VIRTIO
] += 1;
1360 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_VIRTIO
]);
1361 set_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1362 spin_unlock(&fi
->lock
);
1366 static int __inject_pfault_done(struct kvm
*kvm
,
1367 struct kvm_s390_interrupt_info
*inti
)
1369 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1371 spin_lock(&fi
->lock
);
1372 if (fi
->counters
[FIRQ_CNTR_PFAULT
] >=
1373 (ASYNC_PF_PER_VCPU
* KVM_MAX_VCPUS
)) {
1374 spin_unlock(&fi
->lock
);
1377 fi
->counters
[FIRQ_CNTR_PFAULT
] += 1;
1378 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_PFAULT
]);
1379 set_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1380 spin_unlock(&fi
->lock
);
1384 #define CR_PENDING_SUBCLASS 28
1385 static int __inject_float_mchk(struct kvm
*kvm
,
1386 struct kvm_s390_interrupt_info
*inti
)
1388 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1390 spin_lock(&fi
->lock
);
1391 fi
->mchk
.cr14
|= inti
->mchk
.cr14
& (1UL << CR_PENDING_SUBCLASS
);
1392 fi
->mchk
.mcic
|= inti
->mchk
.mcic
;
1393 set_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
);
1394 spin_unlock(&fi
->lock
);
1399 static int __inject_io(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1401 struct kvm_s390_float_interrupt
*fi
;
1402 struct list_head
*list
;
1405 fi
= &kvm
->arch
.float_int
;
1406 spin_lock(&fi
->lock
);
1407 if (fi
->counters
[FIRQ_CNTR_IO
] >= KVM_S390_MAX_FLOAT_IRQS
) {
1408 spin_unlock(&fi
->lock
);
1411 fi
->counters
[FIRQ_CNTR_IO
] += 1;
1413 isc
= int_word_to_isc(inti
->io
.io_int_word
);
1414 list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1415 list_add_tail(&inti
->list
, list
);
1416 set_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1417 spin_unlock(&fi
->lock
);
1422 * Find a destination VCPU for a floating irq and kick it.
1424 static void __floating_irq_kick(struct kvm
*kvm
, u64 type
)
1426 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1427 struct kvm_s390_local_interrupt
*li
;
1428 struct kvm_vcpu
*dst_vcpu
;
1429 int sigcpu
, online_vcpus
, nr_tries
= 0;
1431 online_vcpus
= atomic_read(&kvm
->online_vcpus
);
1435 /* find idle VCPUs first, then round robin */
1436 sigcpu
= find_first_bit(fi
->idle_mask
, online_vcpus
);
1437 if (sigcpu
== online_vcpus
) {
1439 sigcpu
= fi
->next_rr_cpu
;
1440 fi
->next_rr_cpu
= (fi
->next_rr_cpu
+ 1) % online_vcpus
;
1441 /* avoid endless loops if all vcpus are stopped */
1442 if (nr_tries
++ >= online_vcpus
)
1444 } while (is_vcpu_stopped(kvm_get_vcpu(kvm
, sigcpu
)));
1446 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1448 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1449 li
= &dst_vcpu
->arch
.local_int
;
1450 spin_lock(&li
->lock
);
1453 atomic_or(CPUSTAT_STOP_INT
, li
->cpuflags
);
1455 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1456 atomic_or(CPUSTAT_IO_INT
, li
->cpuflags
);
1459 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1462 spin_unlock(&li
->lock
);
1463 kvm_s390_vcpu_wakeup(dst_vcpu
);
1466 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1468 u64 type
= READ_ONCE(inti
->type
);
1473 rc
= __inject_float_mchk(kvm
, inti
);
1475 case KVM_S390_INT_VIRTIO
:
1476 rc
= __inject_virtio(kvm
, inti
);
1478 case KVM_S390_INT_SERVICE
:
1479 rc
= __inject_service(kvm
, inti
);
1481 case KVM_S390_INT_PFAULT_DONE
:
1482 rc
= __inject_pfault_done(kvm
, inti
);
1484 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1485 rc
= __inject_io(kvm
, inti
);
1493 __floating_irq_kick(kvm
, type
);
1497 int kvm_s390_inject_vm(struct kvm
*kvm
,
1498 struct kvm_s390_interrupt
*s390int
)
1500 struct kvm_s390_interrupt_info
*inti
;
1503 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1507 inti
->type
= s390int
->type
;
1508 switch (inti
->type
) {
1509 case KVM_S390_INT_VIRTIO
:
1510 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1511 s390int
->parm
, s390int
->parm64
);
1512 inti
->ext
.ext_params
= s390int
->parm
;
1513 inti
->ext
.ext_params2
= s390int
->parm64
;
1515 case KVM_S390_INT_SERVICE
:
1516 VM_EVENT(kvm
, 4, "inject: sclp parm:%x", s390int
->parm
);
1517 inti
->ext
.ext_params
= s390int
->parm
;
1519 case KVM_S390_INT_PFAULT_DONE
:
1520 inti
->ext
.ext_params2
= s390int
->parm64
;
1523 VM_EVENT(kvm
, 3, "inject: machine check mcic 0x%llx",
1525 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1526 inti
->mchk
.mcic
= s390int
->parm64
;
1528 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1529 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
1530 VM_EVENT(kvm
, 5, "%s", "inject: I/O (AI)");
1532 VM_EVENT(kvm
, 5, "inject: I/O css %x ss %x schid %04x",
1533 s390int
->type
& IOINT_CSSID_MASK
,
1534 s390int
->type
& IOINT_SSID_MASK
,
1535 s390int
->type
& IOINT_SCHID_MASK
);
1536 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1537 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1538 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1539 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1545 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1548 rc
= __inject_vm(kvm
, inti
);
1554 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
1555 struct kvm_s390_interrupt_info
*inti
)
1557 return __inject_vm(kvm
, inti
);
1560 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1561 struct kvm_s390_irq
*irq
)
1563 irq
->type
= s390int
->type
;
1564 switch (irq
->type
) {
1565 case KVM_S390_PROGRAM_INT
:
1566 if (s390int
->parm
& 0xffff0000)
1568 irq
->u
.pgm
.code
= s390int
->parm
;
1570 case KVM_S390_SIGP_SET_PREFIX
:
1571 irq
->u
.prefix
.address
= s390int
->parm
;
1573 case KVM_S390_SIGP_STOP
:
1574 irq
->u
.stop
.flags
= s390int
->parm
;
1576 case KVM_S390_INT_EXTERNAL_CALL
:
1577 if (s390int
->parm
& 0xffff0000)
1579 irq
->u
.extcall
.code
= s390int
->parm
;
1581 case KVM_S390_INT_EMERGENCY
:
1582 if (s390int
->parm
& 0xffff0000)
1584 irq
->u
.emerg
.code
= s390int
->parm
;
1587 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1593 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
1595 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1597 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1600 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
1602 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1604 spin_lock(&li
->lock
);
1605 li
->irq
.stop
.flags
= 0;
1606 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1607 spin_unlock(&li
->lock
);
1610 static int do_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1614 switch (irq
->type
) {
1615 case KVM_S390_PROGRAM_INT
:
1616 rc
= __inject_prog(vcpu
, irq
);
1618 case KVM_S390_SIGP_SET_PREFIX
:
1619 rc
= __inject_set_prefix(vcpu
, irq
);
1621 case KVM_S390_SIGP_STOP
:
1622 rc
= __inject_sigp_stop(vcpu
, irq
);
1624 case KVM_S390_RESTART
:
1625 rc
= __inject_sigp_restart(vcpu
, irq
);
1627 case KVM_S390_INT_CLOCK_COMP
:
1628 rc
= __inject_ckc(vcpu
);
1630 case KVM_S390_INT_CPU_TIMER
:
1631 rc
= __inject_cpu_timer(vcpu
);
1633 case KVM_S390_INT_EXTERNAL_CALL
:
1634 rc
= __inject_extcall(vcpu
, irq
);
1636 case KVM_S390_INT_EMERGENCY
:
1637 rc
= __inject_sigp_emergency(vcpu
, irq
);
1640 rc
= __inject_mchk(vcpu
, irq
);
1642 case KVM_S390_INT_PFAULT_INIT
:
1643 rc
= __inject_pfault_init(vcpu
, irq
);
1645 case KVM_S390_INT_VIRTIO
:
1646 case KVM_S390_INT_SERVICE
:
1647 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1655 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1657 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1660 spin_lock(&li
->lock
);
1661 rc
= do_inject_vcpu(vcpu
, irq
);
1662 spin_unlock(&li
->lock
);
1664 kvm_s390_vcpu_wakeup(vcpu
);
1668 static inline void clear_irq_list(struct list_head
*_list
)
1670 struct kvm_s390_interrupt_info
*inti
, *n
;
1672 list_for_each_entry_safe(inti
, n
, _list
, list
) {
1673 list_del(&inti
->list
);
1678 static void inti_to_irq(struct kvm_s390_interrupt_info
*inti
,
1679 struct kvm_s390_irq
*irq
)
1681 irq
->type
= inti
->type
;
1682 switch (inti
->type
) {
1683 case KVM_S390_INT_PFAULT_INIT
:
1684 case KVM_S390_INT_PFAULT_DONE
:
1685 case KVM_S390_INT_VIRTIO
:
1686 irq
->u
.ext
= inti
->ext
;
1688 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1689 irq
->u
.io
= inti
->io
;
1694 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1696 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1699 spin_lock(&fi
->lock
);
1700 fi
->pending_irqs
= 0;
1701 memset(&fi
->srv_signal
, 0, sizeof(fi
->srv_signal
));
1702 memset(&fi
->mchk
, 0, sizeof(fi
->mchk
));
1703 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1704 clear_irq_list(&fi
->lists
[i
]);
1705 for (i
= 0; i
< FIRQ_MAX_COUNT
; i
++)
1706 fi
->counters
[i
] = 0;
1707 spin_unlock(&fi
->lock
);
1710 static int get_all_floating_irqs(struct kvm
*kvm
, u8 __user
*usrbuf
, u64 len
)
1712 struct kvm_s390_interrupt_info
*inti
;
1713 struct kvm_s390_float_interrupt
*fi
;
1714 struct kvm_s390_irq
*buf
;
1715 struct kvm_s390_irq
*irq
;
1721 if (len
> KVM_S390_FLIC_MAX_BUFFER
|| len
== 0)
1725 * We are already using -ENOMEM to signal
1726 * userspace it may retry with a bigger buffer,
1727 * so we need to use something else for this case
1733 max_irqs
= len
/ sizeof(struct kvm_s390_irq
);
1735 fi
= &kvm
->arch
.float_int
;
1736 spin_lock(&fi
->lock
);
1737 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++) {
1738 list_for_each_entry(inti
, &fi
->lists
[i
], list
) {
1739 if (n
== max_irqs
) {
1740 /* signal userspace to try again */
1744 inti_to_irq(inti
, &buf
[n
]);
1748 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
)) {
1749 if (n
== max_irqs
) {
1750 /* signal userspace to try again */
1754 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1755 irq
->type
= KVM_S390_INT_SERVICE
;
1756 irq
->u
.ext
= fi
->srv_signal
;
1759 if (test_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
1760 if (n
== max_irqs
) {
1761 /* signal userspace to try again */
1765 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1766 irq
->type
= KVM_S390_MCHK
;
1767 irq
->u
.mchk
= fi
->mchk
;
1772 spin_unlock(&fi
->lock
);
1773 if (!ret
&& n
> 0) {
1774 if (copy_to_user(usrbuf
, buf
, sizeof(struct kvm_s390_irq
) * n
))
1779 return ret
< 0 ? ret
: n
;
1782 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1786 switch (attr
->group
) {
1787 case KVM_DEV_FLIC_GET_ALL_IRQS
:
1788 r
= get_all_floating_irqs(dev
->kvm
, (u8 __user
*) attr
->addr
,
1798 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
1801 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
1802 void *target
= NULL
;
1803 void __user
*source
;
1806 if (get_user(inti
->type
, (u64 __user
*)addr
))
1809 switch (inti
->type
) {
1810 case KVM_S390_INT_PFAULT_INIT
:
1811 case KVM_S390_INT_PFAULT_DONE
:
1812 case KVM_S390_INT_VIRTIO
:
1813 case KVM_S390_INT_SERVICE
:
1814 target
= (void *) &inti
->ext
;
1815 source
= &uptr
->u
.ext
;
1816 size
= sizeof(inti
->ext
);
1818 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1819 target
= (void *) &inti
->io
;
1820 source
= &uptr
->u
.io
;
1821 size
= sizeof(inti
->io
);
1824 target
= (void *) &inti
->mchk
;
1825 source
= &uptr
->u
.mchk
;
1826 size
= sizeof(inti
->mchk
);
1832 if (copy_from_user(target
, source
, size
))
1838 static int enqueue_floating_irq(struct kvm_device
*dev
,
1839 struct kvm_device_attr
*attr
)
1841 struct kvm_s390_interrupt_info
*inti
= NULL
;
1843 int len
= attr
->attr
;
1845 if (len
% sizeof(struct kvm_s390_irq
) != 0)
1847 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
1850 while (len
>= sizeof(struct kvm_s390_irq
)) {
1851 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1855 r
= copy_irq_from_user(inti
, attr
->addr
);
1860 r
= __inject_vm(dev
->kvm
, inti
);
1865 len
-= sizeof(struct kvm_s390_irq
);
1866 attr
->addr
+= sizeof(struct kvm_s390_irq
);
1872 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
1874 if (id
>= MAX_S390_IO_ADAPTERS
)
1876 return kvm
->arch
.adapters
[id
];
1879 static int register_io_adapter(struct kvm_device
*dev
,
1880 struct kvm_device_attr
*attr
)
1882 struct s390_io_adapter
*adapter
;
1883 struct kvm_s390_io_adapter adapter_info
;
1885 if (copy_from_user(&adapter_info
,
1886 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
1889 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
1890 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
1893 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
1897 INIT_LIST_HEAD(&adapter
->maps
);
1898 init_rwsem(&adapter
->maps_lock
);
1899 atomic_set(&adapter
->nr_maps
, 0);
1900 adapter
->id
= adapter_info
.id
;
1901 adapter
->isc
= adapter_info
.isc
;
1902 adapter
->maskable
= adapter_info
.maskable
;
1903 adapter
->masked
= false;
1904 adapter
->swap
= adapter_info
.swap
;
1905 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
1910 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
1913 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1915 if (!adapter
|| !adapter
->maskable
)
1917 ret
= adapter
->masked
;
1918 adapter
->masked
= masked
;
1922 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1924 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1925 struct s390_map_info
*map
;
1928 if (!adapter
|| !addr
)
1931 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
1936 INIT_LIST_HEAD(&map
->list
);
1937 map
->guest_addr
= addr
;
1938 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
1939 if (map
->addr
== -EFAULT
) {
1943 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
1947 down_write(&adapter
->maps_lock
);
1948 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
1949 list_add_tail(&map
->list
, &adapter
->maps
);
1952 put_page(map
->page
);
1955 up_write(&adapter
->maps_lock
);
1962 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1964 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1965 struct s390_map_info
*map
, *tmp
;
1968 if (!adapter
|| !addr
)
1971 down_write(&adapter
->maps_lock
);
1972 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
1973 if (map
->guest_addr
== addr
) {
1975 atomic_dec(&adapter
->nr_maps
);
1976 list_del(&map
->list
);
1977 put_page(map
->page
);
1982 up_write(&adapter
->maps_lock
);
1984 return found
? 0 : -EINVAL
;
1987 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
1990 struct s390_map_info
*map
, *tmp
;
1992 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
1993 if (!kvm
->arch
.adapters
[i
])
1995 list_for_each_entry_safe(map
, tmp
,
1996 &kvm
->arch
.adapters
[i
]->maps
, list
) {
1997 list_del(&map
->list
);
1998 put_page(map
->page
);
2001 kfree(kvm
->arch
.adapters
[i
]);
2005 static int modify_io_adapter(struct kvm_device
*dev
,
2006 struct kvm_device_attr
*attr
)
2008 struct kvm_s390_io_adapter_req req
;
2009 struct s390_io_adapter
*adapter
;
2012 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2015 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
2019 case KVM_S390_IO_ADAPTER_MASK
:
2020 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
2024 case KVM_S390_IO_ADAPTER_MAP
:
2025 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
2027 case KVM_S390_IO_ADAPTER_UNMAP
:
2028 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
2037 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2041 struct kvm_vcpu
*vcpu
;
2043 switch (attr
->group
) {
2044 case KVM_DEV_FLIC_ENQUEUE
:
2045 r
= enqueue_floating_irq(dev
, attr
);
2047 case KVM_DEV_FLIC_CLEAR_IRQS
:
2048 kvm_s390_clear_float_irqs(dev
->kvm
);
2050 case KVM_DEV_FLIC_APF_ENABLE
:
2051 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
2053 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2054 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
2056 * Make sure no async faults are in transition when
2057 * clearing the queues. So we don't need to worry
2058 * about late coming workers.
2060 synchronize_srcu(&dev
->kvm
->srcu
);
2061 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
2062 kvm_clear_async_pf_completion_queue(vcpu
);
2064 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2065 r
= register_io_adapter(dev
, attr
);
2067 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2068 r
= modify_io_adapter(dev
, attr
);
2077 static int flic_create(struct kvm_device
*dev
, u32 type
)
2081 if (dev
->kvm
->arch
.flic
)
2083 dev
->kvm
->arch
.flic
= dev
;
2087 static void flic_destroy(struct kvm_device
*dev
)
2089 dev
->kvm
->arch
.flic
= NULL
;
2093 /* s390 floating irq controller (flic) */
2094 struct kvm_device_ops kvm_flic_ops
= {
2096 .get_attr
= flic_get_attr
,
2097 .set_attr
= flic_set_attr
,
2098 .create
= flic_create
,
2099 .destroy
= flic_destroy
,
2102 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
2106 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
2108 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
2111 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
2114 struct s390_map_info
*map
;
2119 list_for_each_entry(map
, &adapter
->maps
, list
) {
2120 if (map
->guest_addr
== addr
)
2126 static int adapter_indicators_set(struct kvm
*kvm
,
2127 struct s390_io_adapter
*adapter
,
2128 struct kvm_s390_adapter_int
*adapter_int
)
2131 int summary_set
, idx
;
2132 struct s390_map_info
*info
;
2135 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
2138 map
= page_address(info
->page
);
2139 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
2141 idx
= srcu_read_lock(&kvm
->srcu
);
2142 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2143 set_page_dirty_lock(info
->page
);
2144 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
2146 srcu_read_unlock(&kvm
->srcu
, idx
);
2149 map
= page_address(info
->page
);
2150 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
2152 summary_set
= test_and_set_bit(bit
, map
);
2153 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2154 set_page_dirty_lock(info
->page
);
2155 srcu_read_unlock(&kvm
->srcu
, idx
);
2156 return summary_set
? 0 : 1;
2160 * < 0 - not injected due to error
2161 * = 0 - coalesced, summary indicator already active
2162 * > 0 - injected interrupt
2164 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
2165 struct kvm
*kvm
, int irq_source_id
, int level
,
2169 struct s390_io_adapter
*adapter
;
2171 /* We're only interested in the 0->1 transition. */
2174 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
2177 down_read(&adapter
->maps_lock
);
2178 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
2179 up_read(&adapter
->maps_lock
);
2180 if ((ret
> 0) && !adapter
->masked
) {
2181 struct kvm_s390_interrupt s390int
= {
2182 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
2184 .parm64
= (adapter
->isc
<< 27) | 0x80000000,
2186 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
2193 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry
*e
,
2194 const struct kvm_irq_routing_entry
*ue
)
2199 case KVM_IRQ_ROUTING_S390_ADAPTER
:
2200 e
->set
= set_adapter_int
;
2201 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
2202 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
2203 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
2204 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
2205 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
2215 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
2216 int irq_source_id
, int level
, bool line_status
)
2221 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
, void __user
*irqstate
, int len
)
2223 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2224 struct kvm_s390_irq
*buf
;
2232 if (copy_from_user((void *) buf
, irqstate
, len
)) {
2238 * Don't allow setting the interrupt state
2239 * when there are already interrupts pending
2241 spin_lock(&li
->lock
);
2242 if (li
->pending_irqs
) {
2247 for (n
= 0; n
< len
/ sizeof(*buf
); n
++) {
2248 r
= do_inject_vcpu(vcpu
, &buf
[n
]);
2254 spin_unlock(&li
->lock
);
2261 static void store_local_irq(struct kvm_s390_local_interrupt
*li
,
2262 struct kvm_s390_irq
*irq
,
2263 unsigned long irq_type
)
2266 case IRQ_PEND_MCHK_EX
:
2267 case IRQ_PEND_MCHK_REP
:
2268 irq
->type
= KVM_S390_MCHK
;
2269 irq
->u
.mchk
= li
->irq
.mchk
;
2272 irq
->type
= KVM_S390_PROGRAM_INT
;
2273 irq
->u
.pgm
= li
->irq
.pgm
;
2275 case IRQ_PEND_PFAULT_INIT
:
2276 irq
->type
= KVM_S390_INT_PFAULT_INIT
;
2277 irq
->u
.ext
= li
->irq
.ext
;
2279 case IRQ_PEND_EXT_EXTERNAL
:
2280 irq
->type
= KVM_S390_INT_EXTERNAL_CALL
;
2281 irq
->u
.extcall
= li
->irq
.extcall
;
2283 case IRQ_PEND_EXT_CLOCK_COMP
:
2284 irq
->type
= KVM_S390_INT_CLOCK_COMP
;
2286 case IRQ_PEND_EXT_CPU_TIMER
:
2287 irq
->type
= KVM_S390_INT_CPU_TIMER
;
2289 case IRQ_PEND_SIGP_STOP
:
2290 irq
->type
= KVM_S390_SIGP_STOP
;
2291 irq
->u
.stop
= li
->irq
.stop
;
2293 case IRQ_PEND_RESTART
:
2294 irq
->type
= KVM_S390_RESTART
;
2296 case IRQ_PEND_SET_PREFIX
:
2297 irq
->type
= KVM_S390_SIGP_SET_PREFIX
;
2298 irq
->u
.prefix
= li
->irq
.prefix
;
2303 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
, __u8 __user
*buf
, int len
)
2306 unsigned long sigp_emerg_pending
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
2307 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2308 unsigned long pending_irqs
;
2309 struct kvm_s390_irq irq
;
2310 unsigned long irq_type
;
2314 spin_lock(&li
->lock
);
2315 pending_irqs
= li
->pending_irqs
;
2316 memcpy(&sigp_emerg_pending
, &li
->sigp_emerg_pending
,
2317 sizeof(sigp_emerg_pending
));
2318 spin_unlock(&li
->lock
);
2320 for_each_set_bit(irq_type
, &pending_irqs
, IRQ_PEND_COUNT
) {
2321 memset(&irq
, 0, sizeof(irq
));
2322 if (irq_type
== IRQ_PEND_EXT_EMERGENCY
)
2324 if (n
+ sizeof(irq
) > len
)
2326 store_local_irq(&vcpu
->arch
.local_int
, &irq
, irq_type
);
2327 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2332 if (test_bit(IRQ_PEND_EXT_EMERGENCY
, &pending_irqs
)) {
2333 for_each_set_bit(cpuaddr
, sigp_emerg_pending
, KVM_MAX_VCPUS
) {
2334 memset(&irq
, 0, sizeof(irq
));
2335 if (n
+ sizeof(irq
) > len
)
2337 irq
.type
= KVM_S390_INT_EMERGENCY
;
2338 irq
.u
.emerg
.code
= cpuaddr
;
2339 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2345 if (sca_ext_call_pending(vcpu
, &scn
)) {
2346 if (n
+ sizeof(irq
) > len
)
2348 memset(&irq
, 0, sizeof(irq
));
2349 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
2350 irq
.u
.extcall
.code
= scn
;
2351 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))