2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008, 2015
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <linux/vmalloc.h>
21 #include <asm/asm-offsets.h>
23 #include <asm/uaccess.h>
29 #include "trace-s390.h"
31 #define PFAULT_INIT 0x0600
32 #define PFAULT_DONE 0x0680
33 #define VIRTIO_PARAM 0x0d00
35 /* handle external calls via sigp interpretation facility */
36 static int sca_ext_call_pending(struct kvm_vcpu
*vcpu
, int *src_id
)
40 if (!(atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_ECALL_PEND
))
43 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
44 if (vcpu
->kvm
->arch
.use_esca
) {
45 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
46 union esca_sigp_ctrl sigp_ctrl
=
47 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
52 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
53 union bsca_sigp_ctrl sigp_ctrl
=
54 sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
59 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
67 static int sca_inject_ext_call(struct kvm_vcpu
*vcpu
, int src_id
)
71 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
72 if (vcpu
->kvm
->arch
.use_esca
) {
73 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
74 union esca_sigp_ctrl
*sigp_ctrl
=
75 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
76 union esca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
82 expect
= old_val
.value
;
83 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
85 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
86 union bsca_sigp_ctrl
*sigp_ctrl
=
87 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
88 union bsca_sigp_ctrl new_val
= {0}, old_val
= *sigp_ctrl
;
94 expect
= old_val
.value
;
95 rc
= cmpxchg(&sigp_ctrl
->value
, old_val
.value
, new_val
.value
);
97 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
100 /* another external call is pending */
103 atomic_or(CPUSTAT_ECALL_PEND
, &vcpu
->arch
.sie_block
->cpuflags
);
107 static void sca_clear_ext_call(struct kvm_vcpu
*vcpu
)
109 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
112 atomic_andnot(CPUSTAT_ECALL_PEND
, li
->cpuflags
);
113 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
114 if (vcpu
->kvm
->arch
.use_esca
) {
115 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
116 union esca_sigp_ctrl
*sigp_ctrl
=
117 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
118 union esca_sigp_ctrl old
= *sigp_ctrl
;
121 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
123 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
124 union bsca_sigp_ctrl
*sigp_ctrl
=
125 &(sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
);
126 union bsca_sigp_ctrl old
= *sigp_ctrl
;
129 rc
= cmpxchg(&sigp_ctrl
->value
, old
.value
, 0);
131 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
132 WARN_ON(rc
!= expect
); /* cannot clear? */
135 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
137 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
140 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
142 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
145 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
147 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
150 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
152 return psw_extint_disabled(vcpu
) &&
153 psw_ioint_disabled(vcpu
) &&
154 psw_mchk_disabled(vcpu
);
157 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
159 if (psw_extint_disabled(vcpu
) ||
160 !(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
162 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
163 /* No timer interrupts when single stepping */
168 static int ckc_irq_pending(struct kvm_vcpu
*vcpu
)
170 if (vcpu
->arch
.sie_block
->ckc
>= kvm_s390_get_tod_clock_fast(vcpu
->kvm
))
172 return ckc_interrupts_enabled(vcpu
);
175 static int cpu_timer_interrupts_enabled(struct kvm_vcpu
*vcpu
)
177 return !psw_extint_disabled(vcpu
) &&
178 (vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
);
181 static int cpu_timer_irq_pending(struct kvm_vcpu
*vcpu
)
183 if (!cpu_timer_interrupts_enabled(vcpu
))
185 return kvm_s390_get_cpu_timer(vcpu
) >> 63;
188 static inline int is_ioirq(unsigned long irq_type
)
190 return ((irq_type
>= IRQ_PEND_IO_ISC_0
) &&
191 (irq_type
<= IRQ_PEND_IO_ISC_7
));
194 static uint64_t isc_to_isc_bits(int isc
)
196 return (0x80 >> isc
) << 24;
199 static inline u8
int_word_to_isc(u32 int_word
)
201 return (int_word
& 0x38000000) >> 27;
204 static inline unsigned long pending_irqs(struct kvm_vcpu
*vcpu
)
206 return vcpu
->kvm
->arch
.float_int
.pending_irqs
|
207 vcpu
->arch
.local_int
.pending_irqs
;
210 static unsigned long disable_iscs(struct kvm_vcpu
*vcpu
,
211 unsigned long active_mask
)
215 for (i
= 0; i
<= MAX_ISC
; i
++)
216 if (!(vcpu
->arch
.sie_block
->gcr
[6] & isc_to_isc_bits(i
)))
217 active_mask
&= ~(1UL << (IRQ_PEND_IO_ISC_0
+ i
));
222 static unsigned long deliverable_irqs(struct kvm_vcpu
*vcpu
)
224 unsigned long active_mask
;
226 active_mask
= pending_irqs(vcpu
);
230 if (psw_extint_disabled(vcpu
))
231 active_mask
&= ~IRQ_PEND_EXT_MASK
;
232 if (psw_ioint_disabled(vcpu
))
233 active_mask
&= ~IRQ_PEND_IO_MASK
;
235 active_mask
= disable_iscs(vcpu
, active_mask
);
236 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
237 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
238 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
))
239 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
240 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
241 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
242 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
))
243 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
244 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
245 __clear_bit(IRQ_PEND_EXT_SERVICE
, &active_mask
);
246 if (psw_mchk_disabled(vcpu
))
247 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
248 if (!(vcpu
->arch
.sie_block
->gcr
[14] &
249 vcpu
->kvm
->arch
.float_int
.mchk
.cr14
))
250 __clear_bit(IRQ_PEND_MCHK_REP
, &active_mask
);
253 * STOP irqs will never be actively delivered. They are triggered via
254 * intercept requests and cleared when the stop intercept is performed.
256 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
261 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
263 atomic_or(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
264 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
267 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
269 atomic_andnot(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
270 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
273 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
275 atomic_andnot(CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
276 &vcpu
->arch
.sie_block
->cpuflags
);
277 vcpu
->arch
.sie_block
->lctl
= 0x0000;
278 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
280 if (guestdbg_enabled(vcpu
)) {
281 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
282 LCTL_CR10
| LCTL_CR11
);
283 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
287 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
289 atomic_or(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
292 static void set_intercept_indicators_io(struct kvm_vcpu
*vcpu
)
294 if (!(pending_irqs(vcpu
) & IRQ_PEND_IO_MASK
))
296 else if (psw_ioint_disabled(vcpu
))
297 __set_cpuflag(vcpu
, CPUSTAT_IO_INT
);
299 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
302 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
304 if (!(pending_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
306 if (psw_extint_disabled(vcpu
))
307 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
309 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
312 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
314 if (!(pending_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
316 if (psw_mchk_disabled(vcpu
))
317 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
319 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
322 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
324 if (kvm_s390_is_stop_irq_pending(vcpu
))
325 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
328 /* Set interception request for non-deliverable interrupts */
329 static void set_intercept_indicators(struct kvm_vcpu
*vcpu
)
331 set_intercept_indicators_io(vcpu
);
332 set_intercept_indicators_ext(vcpu
);
333 set_intercept_indicators_mchk(vcpu
);
334 set_intercept_indicators_stop(vcpu
);
337 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
339 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
342 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
345 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
346 (u16
*)__LC_EXT_INT_CODE
);
347 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
348 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
349 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
350 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
351 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
352 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
353 return rc
? -EFAULT
: 0;
356 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
358 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
361 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
364 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
365 (u16 __user
*)__LC_EXT_INT_CODE
);
366 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
367 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
368 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
369 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
370 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
371 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
372 return rc
? -EFAULT
: 0;
375 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
377 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
378 struct kvm_s390_ext_info ext
;
381 spin_lock(&li
->lock
);
383 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
384 li
->irq
.ext
.ext_params2
= 0;
385 spin_unlock(&li
->lock
);
387 VCPU_EVENT(vcpu
, 4, "deliver: pfault init token 0x%llx",
389 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
390 KVM_S390_INT_PFAULT_INIT
,
393 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
394 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
395 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
396 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
397 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
398 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
399 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
400 return rc
? -EFAULT
: 0;
403 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
405 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
406 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
407 struct kvm_s390_mchk_info mchk
= {};
408 unsigned long adtl_status_addr
;
412 spin_lock(&fi
->lock
);
413 spin_lock(&li
->lock
);
414 if (test_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
) ||
415 test_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
)) {
417 * If there was an exigent machine check pending, then any
418 * repressible machine checks that might have been pending
419 * are indicated along with it, so always clear bits for
420 * repressible and exigent interrupts
423 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
424 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
425 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
429 * We indicate floating repressible conditions along with
430 * other pending conditions. Channel Report Pending and Channel
431 * Subsystem damage are the only two and and are indicated by
432 * bits in mcic and masked in cr14.
434 if (test_and_clear_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
435 mchk
.mcic
|= fi
->mchk
.mcic
;
436 mchk
.cr14
|= fi
->mchk
.cr14
;
437 memset(&fi
->mchk
, 0, sizeof(mchk
));
440 spin_unlock(&li
->lock
);
441 spin_unlock(&fi
->lock
);
444 VCPU_EVENT(vcpu
, 3, "deliver: machine check mcic 0x%llx",
446 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
448 mchk
.cr14
, mchk
.mcic
);
450 rc
= kvm_s390_vcpu_store_status(vcpu
,
451 KVM_S390_STORE_STATUS_PREFIXED
);
452 rc
|= read_guest_lc(vcpu
, __LC_VX_SAVE_AREA_ADDR
,
454 sizeof(unsigned long));
455 rc
|= kvm_s390_vcpu_store_adtl_status(vcpu
,
457 rc
|= put_guest_lc(vcpu
, mchk
.mcic
,
458 (u64 __user
*) __LC_MCCK_CODE
);
459 rc
|= put_guest_lc(vcpu
, mchk
.failing_storage_address
,
460 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
461 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
,
463 sizeof(mchk
.fixed_logout
));
464 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
465 &vcpu
->arch
.sie_block
->gpsw
,
467 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
468 &vcpu
->arch
.sie_block
->gpsw
,
471 return rc
? -EFAULT
: 0;
474 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
476 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
479 VCPU_EVENT(vcpu
, 3, "%s", "deliver: cpu restart");
480 vcpu
->stat
.deliver_restart_signal
++;
481 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
483 rc
= write_guest_lc(vcpu
,
484 offsetof(struct lowcore
, restart_old_psw
),
485 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
486 rc
|= read_guest_lc(vcpu
, offsetof(struct lowcore
, restart_psw
),
487 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
488 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
489 return rc
? -EFAULT
: 0;
492 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
494 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
495 struct kvm_s390_prefix_info prefix
;
497 spin_lock(&li
->lock
);
498 prefix
= li
->irq
.prefix
;
499 li
->irq
.prefix
.address
= 0;
500 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
501 spin_unlock(&li
->lock
);
503 vcpu
->stat
.deliver_prefix_signal
++;
504 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
505 KVM_S390_SIGP_SET_PREFIX
,
508 kvm_s390_set_prefix(vcpu
, prefix
.address
);
512 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
514 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
518 spin_lock(&li
->lock
);
519 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
520 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
521 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
522 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
523 spin_unlock(&li
->lock
);
525 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp emerg");
526 vcpu
->stat
.deliver_emergency_signal
++;
527 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
530 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
531 (u16
*)__LC_EXT_INT_CODE
);
532 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
533 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
534 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
535 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
536 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
537 return rc
? -EFAULT
: 0;
540 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
542 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
543 struct kvm_s390_extcall_info extcall
;
546 spin_lock(&li
->lock
);
547 extcall
= li
->irq
.extcall
;
548 li
->irq
.extcall
.code
= 0;
549 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
550 spin_unlock(&li
->lock
);
552 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp ext call");
553 vcpu
->stat
.deliver_external_call
++;
554 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
555 KVM_S390_INT_EXTERNAL_CALL
,
558 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
559 (u16
*)__LC_EXT_INT_CODE
);
560 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
561 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
562 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
563 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
565 return rc
? -EFAULT
: 0;
568 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
570 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
571 struct kvm_s390_pgm_info pgm_info
;
572 int rc
= 0, nullifying
= false;
575 spin_lock(&li
->lock
);
576 pgm_info
= li
->irq
.pgm
;
577 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
578 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
579 spin_unlock(&li
->lock
);
581 ilen
= pgm_info
.flags
& KVM_S390_PGM_FLAGS_ILC_MASK
;
582 VCPU_EVENT(vcpu
, 3, "deliver: program irq code 0x%x, ilen:%d",
583 pgm_info
.code
, ilen
);
584 vcpu
->stat
.deliver_program_int
++;
585 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
588 switch (pgm_info
.code
& ~PGM_PER
) {
589 case PGM_AFX_TRANSLATION
:
590 case PGM_ASX_TRANSLATION
:
591 case PGM_EX_TRANSLATION
:
592 case PGM_LFX_TRANSLATION
:
593 case PGM_LSTE_SEQUENCE
:
594 case PGM_LSX_TRANSLATION
:
595 case PGM_LX_TRANSLATION
:
596 case PGM_PRIMARY_AUTHORITY
:
597 case PGM_SECONDARY_AUTHORITY
:
600 case PGM_SPACE_SWITCH
:
601 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
602 (u64
*)__LC_TRANS_EXC_CODE
);
604 case PGM_ALEN_TRANSLATION
:
605 case PGM_ALE_SEQUENCE
:
606 case PGM_ASTE_INSTANCE
:
607 case PGM_ASTE_SEQUENCE
:
608 case PGM_ASTE_VALIDITY
:
609 case PGM_EXTENDED_AUTHORITY
:
610 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
611 (u8
*)__LC_EXC_ACCESS_ID
);
615 case PGM_PAGE_TRANSLATION
:
616 case PGM_REGION_FIRST_TRANS
:
617 case PGM_REGION_SECOND_TRANS
:
618 case PGM_REGION_THIRD_TRANS
:
619 case PGM_SEGMENT_TRANSLATION
:
620 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
621 (u64
*)__LC_TRANS_EXC_CODE
);
622 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
623 (u8
*)__LC_EXC_ACCESS_ID
);
624 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
625 (u8
*)__LC_OP_ACCESS_ID
);
629 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
630 (u16
*)__LC_MON_CLASS_NR
);
631 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
632 (u64
*)__LC_MON_CODE
);
634 case PGM_VECTOR_PROCESSING
:
636 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
637 (u32
*)__LC_DATA_EXC_CODE
);
640 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
641 (u64
*)__LC_TRANS_EXC_CODE
);
642 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
643 (u8
*)__LC_EXC_ACCESS_ID
);
646 case PGM_STACK_EMPTY
:
647 case PGM_STACK_SPECIFICATION
:
649 case PGM_STACK_OPERATION
:
650 case PGM_TRACE_TABEL
:
651 case PGM_CRYPTO_OPERATION
:
656 if (pgm_info
.code
& PGM_PER
) {
657 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
658 (u8
*) __LC_PER_CODE
);
659 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
660 (u8
*)__LC_PER_ATMID
);
661 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
662 (u64
*) __LC_PER_ADDRESS
);
663 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
664 (u8
*) __LC_PER_ACCESS_ID
);
667 if (nullifying
&& !(pgm_info
.flags
& KVM_S390_PGM_FLAGS_NO_REWIND
))
668 kvm_s390_rewind_psw(vcpu
, ilen
);
670 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
671 rc
|= put_guest_lc(vcpu
, ilen
, (u16
*) __LC_PGM_ILC
);
672 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->gbea
,
673 (u64
*) __LC_LAST_BREAK
);
674 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
675 (u16
*)__LC_PGM_INT_CODE
);
676 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
677 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
678 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
679 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
680 return rc
? -EFAULT
: 0;
683 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
)
685 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
686 struct kvm_s390_ext_info ext
;
689 spin_lock(&fi
->lock
);
690 if (!(test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
))) {
691 spin_unlock(&fi
->lock
);
694 ext
= fi
->srv_signal
;
695 memset(&fi
->srv_signal
, 0, sizeof(ext
));
696 clear_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
697 spin_unlock(&fi
->lock
);
699 VCPU_EVENT(vcpu
, 4, "deliver: sclp parameter 0x%x",
701 vcpu
->stat
.deliver_service_signal
++;
702 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
705 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
706 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
707 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
708 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
709 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
710 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
711 rc
|= put_guest_lc(vcpu
, ext
.ext_params
,
712 (u32
*)__LC_EXT_PARAMS
);
714 return rc
? -EFAULT
: 0;
717 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
)
719 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
720 struct kvm_s390_interrupt_info
*inti
;
723 spin_lock(&fi
->lock
);
724 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_PFAULT
],
725 struct kvm_s390_interrupt_info
,
728 list_del(&inti
->list
);
729 fi
->counters
[FIRQ_CNTR_PFAULT
] -= 1;
731 if (list_empty(&fi
->lists
[FIRQ_LIST_PFAULT
]))
732 clear_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
733 spin_unlock(&fi
->lock
);
736 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
737 KVM_S390_INT_PFAULT_DONE
, 0,
738 inti
->ext
.ext_params2
);
739 VCPU_EVENT(vcpu
, 4, "deliver: pfault done token 0x%llx",
740 inti
->ext
.ext_params2
);
742 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
743 (u16
*)__LC_EXT_INT_CODE
);
744 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
,
745 (u16
*)__LC_EXT_CPU_ADDR
);
746 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
747 &vcpu
->arch
.sie_block
->gpsw
,
749 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
750 &vcpu
->arch
.sie_block
->gpsw
,
752 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
753 (u64
*)__LC_EXT_PARAMS2
);
756 return rc
? -EFAULT
: 0;
759 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
)
761 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
762 struct kvm_s390_interrupt_info
*inti
;
765 spin_lock(&fi
->lock
);
766 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_VIRTIO
],
767 struct kvm_s390_interrupt_info
,
771 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
772 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
773 vcpu
->stat
.deliver_virtio_interrupt
++;
774 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
776 inti
->ext
.ext_params
,
777 inti
->ext
.ext_params2
);
778 list_del(&inti
->list
);
779 fi
->counters
[FIRQ_CNTR_VIRTIO
] -= 1;
781 if (list_empty(&fi
->lists
[FIRQ_LIST_VIRTIO
]))
782 clear_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
783 spin_unlock(&fi
->lock
);
786 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
787 (u16
*)__LC_EXT_INT_CODE
);
788 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
,
789 (u16
*)__LC_EXT_CPU_ADDR
);
790 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
791 &vcpu
->arch
.sie_block
->gpsw
,
793 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
794 &vcpu
->arch
.sie_block
->gpsw
,
796 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
797 (u32
*)__LC_EXT_PARAMS
);
798 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
799 (u64
*)__LC_EXT_PARAMS2
);
802 return rc
? -EFAULT
: 0;
805 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
806 unsigned long irq_type
)
808 struct list_head
*isc_list
;
809 struct kvm_s390_float_interrupt
*fi
;
810 struct kvm_s390_interrupt_info
*inti
= NULL
;
813 fi
= &vcpu
->kvm
->arch
.float_int
;
815 spin_lock(&fi
->lock
);
816 isc_list
= &fi
->lists
[irq_type
- IRQ_PEND_IO_ISC_0
];
817 inti
= list_first_entry_or_null(isc_list
,
818 struct kvm_s390_interrupt_info
,
821 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
822 VCPU_EVENT(vcpu
, 4, "%s", "deliver: I/O (AI)");
824 VCPU_EVENT(vcpu
, 4, "deliver: I/O %x ss %x schid %04x",
825 inti
->io
.subchannel_id
>> 8,
826 inti
->io
.subchannel_id
>> 1 & 0x3,
827 inti
->io
.subchannel_nr
);
829 vcpu
->stat
.deliver_io_int
++;
830 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
832 ((__u32
)inti
->io
.subchannel_id
<< 16) |
833 inti
->io
.subchannel_nr
,
834 ((__u64
)inti
->io
.io_int_parm
<< 32) |
835 inti
->io
.io_int_word
);
836 list_del(&inti
->list
);
837 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
839 if (list_empty(isc_list
))
840 clear_bit(irq_type
, &fi
->pending_irqs
);
841 spin_unlock(&fi
->lock
);
844 rc
= put_guest_lc(vcpu
, inti
->io
.subchannel_id
,
845 (u16
*)__LC_SUBCHANNEL_ID
);
846 rc
|= put_guest_lc(vcpu
, inti
->io
.subchannel_nr
,
847 (u16
*)__LC_SUBCHANNEL_NR
);
848 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_parm
,
849 (u32
*)__LC_IO_INT_PARM
);
850 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_word
,
851 (u32
*)__LC_IO_INT_WORD
);
852 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
853 &vcpu
->arch
.sie_block
->gpsw
,
855 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
856 &vcpu
->arch
.sie_block
->gpsw
,
861 return rc
? -EFAULT
: 0;
864 typedef int (*deliver_irq_t
)(struct kvm_vcpu
*vcpu
);
866 static const deliver_irq_t deliver_irq_funcs
[] = {
867 [IRQ_PEND_MCHK_EX
] = __deliver_machine_check
,
868 [IRQ_PEND_MCHK_REP
] = __deliver_machine_check
,
869 [IRQ_PEND_PROG
] = __deliver_prog
,
870 [IRQ_PEND_EXT_EMERGENCY
] = __deliver_emergency_signal
,
871 [IRQ_PEND_EXT_EXTERNAL
] = __deliver_external_call
,
872 [IRQ_PEND_EXT_CLOCK_COMP
] = __deliver_ckc
,
873 [IRQ_PEND_EXT_CPU_TIMER
] = __deliver_cpu_timer
,
874 [IRQ_PEND_RESTART
] = __deliver_restart
,
875 [IRQ_PEND_SET_PREFIX
] = __deliver_set_prefix
,
876 [IRQ_PEND_PFAULT_INIT
] = __deliver_pfault_init
,
877 [IRQ_PEND_EXT_SERVICE
] = __deliver_service
,
878 [IRQ_PEND_PFAULT_DONE
] = __deliver_pfault_done
,
879 [IRQ_PEND_VIRTIO
] = __deliver_virtio
,
882 /* Check whether an external call is pending (deliverable or not) */
883 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
885 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
887 if (!sclp
.has_sigpif
)
888 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
890 return sca_ext_call_pending(vcpu
, NULL
);
893 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
895 if (deliverable_irqs(vcpu
))
898 if (kvm_cpu_has_pending_timer(vcpu
))
901 /* external call pending and deliverable */
902 if (kvm_s390_ext_call_pending(vcpu
) &&
903 !psw_extint_disabled(vcpu
) &&
904 (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
907 if (!exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
912 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
914 return ckc_irq_pending(vcpu
) || cpu_timer_irq_pending(vcpu
);
917 static u64
__calculate_sltime(struct kvm_vcpu
*vcpu
)
919 u64 now
, cputm
, sltime
= 0;
921 if (ckc_interrupts_enabled(vcpu
)) {
922 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
923 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
924 /* already expired or overflow? */
925 if (!sltime
|| vcpu
->arch
.sie_block
->ckc
<= now
)
927 if (cpu_timer_interrupts_enabled(vcpu
)) {
928 cputm
= kvm_s390_get_cpu_timer(vcpu
);
929 /* already expired? */
932 return min(sltime
, tod_to_ns(cputm
));
934 } else if (cpu_timer_interrupts_enabled(vcpu
)) {
935 sltime
= kvm_s390_get_cpu_timer(vcpu
);
936 /* already expired? */
943 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
947 vcpu
->stat
.exit_wait_state
++;
950 if (kvm_arch_vcpu_runnable(vcpu
))
953 if (psw_interrupts_disabled(vcpu
)) {
954 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
955 return -EOPNOTSUPP
; /* disabled wait */
958 if (!ckc_interrupts_enabled(vcpu
) &&
959 !cpu_timer_interrupts_enabled(vcpu
)) {
960 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
961 __set_cpu_idle(vcpu
);
965 sltime
= __calculate_sltime(vcpu
);
969 __set_cpu_idle(vcpu
);
970 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
971 VCPU_EVENT(vcpu
, 4, "enabled wait: %llu ns", sltime
);
973 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
974 kvm_vcpu_block(vcpu
);
975 __unset_cpu_idle(vcpu
);
976 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
978 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
982 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
985 * We cannot move this into the if, as the CPU might be already
986 * in kvm_vcpu_block without having the waitqueue set (polling)
988 vcpu
->valid_wakeup
= true;
989 if (swait_active(&vcpu
->wq
)) {
991 * The vcpu gave up the cpu voluntarily, mark it as a good
994 vcpu
->preempted
= true;
996 vcpu
->stat
.halt_wakeup
++;
999 * The VCPU might not be sleeping but is executing the VSIE. Let's
1000 * kick it, so it leaves the SIE to process the request.
1002 kvm_s390_vsie_kick(vcpu
);
1005 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
1007 struct kvm_vcpu
*vcpu
;
1010 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
1011 sltime
= __calculate_sltime(vcpu
);
1014 * If the monotonic clock runs faster than the tod clock we might be
1015 * woken up too early and have to go back to sleep to avoid deadlocks.
1017 if (sltime
&& hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
1018 return HRTIMER_RESTART
;
1019 kvm_s390_vcpu_wakeup(vcpu
);
1020 return HRTIMER_NORESTART
;
1023 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
1025 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1027 spin_lock(&li
->lock
);
1028 li
->pending_irqs
= 0;
1029 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
1030 memset(&li
->irq
, 0, sizeof(li
->irq
));
1031 spin_unlock(&li
->lock
);
1033 sca_clear_ext_call(vcpu
);
1036 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
1038 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1041 unsigned long irq_type
;
1044 __reset_intercept_indicators(vcpu
);
1046 /* pending ckc conditions might have been invalidated */
1047 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1048 if (ckc_irq_pending(vcpu
))
1049 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1051 /* pending cpu timer conditions might have been invalidated */
1052 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1053 if (cpu_timer_irq_pending(vcpu
))
1054 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1056 while ((irqs
= deliverable_irqs(vcpu
)) && !rc
) {
1057 /* bits are in the order of interrupt priority */
1058 irq_type
= find_first_bit(&irqs
, IRQ_PEND_COUNT
);
1059 if (is_ioirq(irq_type
)) {
1060 rc
= __deliver_io(vcpu
, irq_type
);
1062 func
= deliver_irq_funcs
[irq_type
];
1064 WARN_ON_ONCE(func
== NULL
);
1065 clear_bit(irq_type
, &li
->pending_irqs
);
1072 set_intercept_indicators(vcpu
);
1077 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1079 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1081 VCPU_EVENT(vcpu
, 3, "inject: program irq code 0x%x", irq
->u
.pgm
.code
);
1082 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
1083 irq
->u
.pgm
.code
, 0);
1085 if (!(irq
->u
.pgm
.flags
& KVM_S390_PGM_FLAGS_ILC_VALID
)) {
1086 /* auto detection if no valid ILC was given */
1087 irq
->u
.pgm
.flags
&= ~KVM_S390_PGM_FLAGS_ILC_MASK
;
1088 irq
->u
.pgm
.flags
|= kvm_s390_get_ilen(vcpu
);
1089 irq
->u
.pgm
.flags
|= KVM_S390_PGM_FLAGS_ILC_VALID
;
1092 if (irq
->u
.pgm
.code
== PGM_PER
) {
1093 li
->irq
.pgm
.code
|= PGM_PER
;
1094 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1095 /* only modify PER related information */
1096 li
->irq
.pgm
.per_address
= irq
->u
.pgm
.per_address
;
1097 li
->irq
.pgm
.per_code
= irq
->u
.pgm
.per_code
;
1098 li
->irq
.pgm
.per_atmid
= irq
->u
.pgm
.per_atmid
;
1099 li
->irq
.pgm
.per_access_id
= irq
->u
.pgm
.per_access_id
;
1100 } else if (!(irq
->u
.pgm
.code
& PGM_PER
)) {
1101 li
->irq
.pgm
.code
= (li
->irq
.pgm
.code
& PGM_PER
) |
1103 li
->irq
.pgm
.flags
= irq
->u
.pgm
.flags
;
1104 /* only modify non-PER information */
1105 li
->irq
.pgm
.trans_exc_code
= irq
->u
.pgm
.trans_exc_code
;
1106 li
->irq
.pgm
.mon_code
= irq
->u
.pgm
.mon_code
;
1107 li
->irq
.pgm
.data_exc_code
= irq
->u
.pgm
.data_exc_code
;
1108 li
->irq
.pgm
.mon_class_nr
= irq
->u
.pgm
.mon_class_nr
;
1109 li
->irq
.pgm
.exc_access_id
= irq
->u
.pgm
.exc_access_id
;
1110 li
->irq
.pgm
.op_access_id
= irq
->u
.pgm
.op_access_id
;
1112 li
->irq
.pgm
= irq
->u
.pgm
;
1114 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
1118 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1120 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1122 VCPU_EVENT(vcpu
, 4, "inject: pfault init parameter block at 0x%llx",
1123 irq
->u
.ext
.ext_params2
);
1124 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
1125 irq
->u
.ext
.ext_params
,
1126 irq
->u
.ext
.ext_params2
);
1128 li
->irq
.ext
= irq
->u
.ext
;
1129 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1130 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1134 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1136 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1137 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1138 uint16_t src_id
= irq
->u
.extcall
.code
;
1140 VCPU_EVENT(vcpu
, 4, "inject: external call source-cpu:%u",
1142 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1145 /* sending vcpu invalid */
1146 if (kvm_get_vcpu_by_id(vcpu
->kvm
, src_id
) == NULL
)
1149 if (sclp
.has_sigpif
)
1150 return sca_inject_ext_call(vcpu
, src_id
);
1152 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1154 *extcall
= irq
->u
.extcall
;
1155 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1159 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1161 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1162 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1164 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x",
1165 irq
->u
.prefix
.address
);
1166 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1167 irq
->u
.prefix
.address
, 0);
1169 if (!is_vcpu_stopped(vcpu
))
1172 *prefix
= irq
->u
.prefix
;
1173 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1177 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1178 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1180 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1181 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1184 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0);
1186 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1189 if (is_vcpu_stopped(vcpu
)) {
1190 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1191 rc
= kvm_s390_store_status_unloaded(vcpu
,
1192 KVM_S390_STORE_STATUS_NOADDR
);
1196 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1198 stop
->flags
= irq
->u
.stop
.flags
;
1199 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
1203 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1204 struct kvm_s390_irq
*irq
)
1206 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1208 VCPU_EVENT(vcpu
, 3, "%s", "inject: restart int");
1209 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
1211 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1215 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1216 struct kvm_s390_irq
*irq
)
1218 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1220 VCPU_EVENT(vcpu
, 4, "inject: emergency from cpu %u",
1222 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1223 irq
->u
.emerg
.code
, 0);
1225 /* sending vcpu invalid */
1226 if (kvm_get_vcpu_by_id(vcpu
->kvm
, irq
->u
.emerg
.code
) == NULL
)
1229 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1230 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1231 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1235 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1237 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1238 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1240 VCPU_EVENT(vcpu
, 3, "inject: machine check mcic 0x%llx",
1242 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1246 * Because repressible machine checks can be indicated along with
1247 * exigent machine checks (PoP, Chapter 11, Interruption action)
1248 * we need to combine cr14, mcic and external damage code.
1249 * Failing storage address and the logout area should not be or'ed
1250 * together, we just indicate the last occurrence of the corresponding
1253 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1254 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1255 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1256 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1257 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1258 sizeof(mchk
->fixed_logout
));
1259 if (mchk
->mcic
& MCHK_EX_MASK
)
1260 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1261 else if (mchk
->mcic
& MCHK_REP_MASK
)
1262 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1266 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1268 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1270 VCPU_EVENT(vcpu
, 3, "%s", "inject: clock comparator external");
1271 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1274 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1275 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1279 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1281 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1283 VCPU_EVENT(vcpu
, 3, "%s", "inject: cpu timer external");
1284 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1287 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1288 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1292 static struct kvm_s390_interrupt_info
*get_io_int(struct kvm
*kvm
,
1295 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1296 struct list_head
*isc_list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1297 struct kvm_s390_interrupt_info
*iter
;
1298 u16 id
= (schid
& 0xffff0000U
) >> 16;
1299 u16 nr
= schid
& 0x0000ffffU
;
1301 spin_lock(&fi
->lock
);
1302 list_for_each_entry(iter
, isc_list
, list
) {
1303 if (schid
&& (id
!= iter
->io
.subchannel_id
||
1304 nr
!= iter
->io
.subchannel_nr
))
1306 /* found an appropriate entry */
1307 list_del_init(&iter
->list
);
1308 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1309 if (list_empty(isc_list
))
1310 clear_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1311 spin_unlock(&fi
->lock
);
1314 spin_unlock(&fi
->lock
);
1319 * Dequeue and return an I/O interrupt matching any of the interruption
1320 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1322 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1323 u64 isc_mask
, u32 schid
)
1325 struct kvm_s390_interrupt_info
*inti
= NULL
;
1328 for (isc
= 0; isc
<= MAX_ISC
&& !inti
; isc
++) {
1329 if (isc_mask
& isc_to_isc_bits(isc
))
1330 inti
= get_io_int(kvm
, isc
, schid
);
1335 #define SCCB_MASK 0xFFFFFFF8
1336 #define SCCB_EVENT_PENDING 0x3
1338 static int __inject_service(struct kvm
*kvm
,
1339 struct kvm_s390_interrupt_info
*inti
)
1341 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1343 spin_lock(&fi
->lock
);
1344 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_EVENT_PENDING
;
1346 * Early versions of the QEMU s390 bios will inject several
1347 * service interrupts after another without handling a
1348 * condition code indicating busy.
1349 * We will silently ignore those superfluous sccb values.
1350 * A future version of QEMU will take care of serialization
1353 if (fi
->srv_signal
.ext_params
& SCCB_MASK
)
1355 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_MASK
;
1356 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1358 spin_unlock(&fi
->lock
);
1363 static int __inject_virtio(struct kvm
*kvm
,
1364 struct kvm_s390_interrupt_info
*inti
)
1366 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1368 spin_lock(&fi
->lock
);
1369 if (fi
->counters
[FIRQ_CNTR_VIRTIO
] >= KVM_S390_MAX_VIRTIO_IRQS
) {
1370 spin_unlock(&fi
->lock
);
1373 fi
->counters
[FIRQ_CNTR_VIRTIO
] += 1;
1374 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_VIRTIO
]);
1375 set_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1376 spin_unlock(&fi
->lock
);
1380 static int __inject_pfault_done(struct kvm
*kvm
,
1381 struct kvm_s390_interrupt_info
*inti
)
1383 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1385 spin_lock(&fi
->lock
);
1386 if (fi
->counters
[FIRQ_CNTR_PFAULT
] >=
1387 (ASYNC_PF_PER_VCPU
* KVM_MAX_VCPUS
)) {
1388 spin_unlock(&fi
->lock
);
1391 fi
->counters
[FIRQ_CNTR_PFAULT
] += 1;
1392 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_PFAULT
]);
1393 set_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1394 spin_unlock(&fi
->lock
);
1398 #define CR_PENDING_SUBCLASS 28
1399 static int __inject_float_mchk(struct kvm
*kvm
,
1400 struct kvm_s390_interrupt_info
*inti
)
1402 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1404 spin_lock(&fi
->lock
);
1405 fi
->mchk
.cr14
|= inti
->mchk
.cr14
& (1UL << CR_PENDING_SUBCLASS
);
1406 fi
->mchk
.mcic
|= inti
->mchk
.mcic
;
1407 set_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
);
1408 spin_unlock(&fi
->lock
);
1413 static int __inject_io(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1415 struct kvm_s390_float_interrupt
*fi
;
1416 struct list_head
*list
;
1419 fi
= &kvm
->arch
.float_int
;
1420 spin_lock(&fi
->lock
);
1421 if (fi
->counters
[FIRQ_CNTR_IO
] >= KVM_S390_MAX_FLOAT_IRQS
) {
1422 spin_unlock(&fi
->lock
);
1425 fi
->counters
[FIRQ_CNTR_IO
] += 1;
1427 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
1428 VM_EVENT(kvm
, 4, "%s", "inject: I/O (AI)");
1430 VM_EVENT(kvm
, 4, "inject: I/O %x ss %x schid %04x",
1431 inti
->io
.subchannel_id
>> 8,
1432 inti
->io
.subchannel_id
>> 1 & 0x3,
1433 inti
->io
.subchannel_nr
);
1434 isc
= int_word_to_isc(inti
->io
.io_int_word
);
1435 list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1436 list_add_tail(&inti
->list
, list
);
1437 set_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1438 spin_unlock(&fi
->lock
);
1443 * Find a destination VCPU for a floating irq and kick it.
1445 static void __floating_irq_kick(struct kvm
*kvm
, u64 type
)
1447 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1448 struct kvm_s390_local_interrupt
*li
;
1449 struct kvm_vcpu
*dst_vcpu
;
1450 int sigcpu
, online_vcpus
, nr_tries
= 0;
1452 online_vcpus
= atomic_read(&kvm
->online_vcpus
);
1456 /* find idle VCPUs first, then round robin */
1457 sigcpu
= find_first_bit(fi
->idle_mask
, online_vcpus
);
1458 if (sigcpu
== online_vcpus
) {
1460 sigcpu
= fi
->next_rr_cpu
;
1461 fi
->next_rr_cpu
= (fi
->next_rr_cpu
+ 1) % online_vcpus
;
1462 /* avoid endless loops if all vcpus are stopped */
1463 if (nr_tries
++ >= online_vcpus
)
1465 } while (is_vcpu_stopped(kvm_get_vcpu(kvm
, sigcpu
)));
1467 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1469 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1470 li
= &dst_vcpu
->arch
.local_int
;
1471 spin_lock(&li
->lock
);
1474 atomic_or(CPUSTAT_STOP_INT
, li
->cpuflags
);
1476 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1477 atomic_or(CPUSTAT_IO_INT
, li
->cpuflags
);
1480 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1483 spin_unlock(&li
->lock
);
1484 kvm_s390_vcpu_wakeup(dst_vcpu
);
1487 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1489 u64 type
= READ_ONCE(inti
->type
);
1494 rc
= __inject_float_mchk(kvm
, inti
);
1496 case KVM_S390_INT_VIRTIO
:
1497 rc
= __inject_virtio(kvm
, inti
);
1499 case KVM_S390_INT_SERVICE
:
1500 rc
= __inject_service(kvm
, inti
);
1502 case KVM_S390_INT_PFAULT_DONE
:
1503 rc
= __inject_pfault_done(kvm
, inti
);
1505 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1506 rc
= __inject_io(kvm
, inti
);
1514 __floating_irq_kick(kvm
, type
);
1518 int kvm_s390_inject_vm(struct kvm
*kvm
,
1519 struct kvm_s390_interrupt
*s390int
)
1521 struct kvm_s390_interrupt_info
*inti
;
1524 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1528 inti
->type
= s390int
->type
;
1529 switch (inti
->type
) {
1530 case KVM_S390_INT_VIRTIO
:
1531 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1532 s390int
->parm
, s390int
->parm64
);
1533 inti
->ext
.ext_params
= s390int
->parm
;
1534 inti
->ext
.ext_params2
= s390int
->parm64
;
1536 case KVM_S390_INT_SERVICE
:
1537 VM_EVENT(kvm
, 4, "inject: sclp parm:%x", s390int
->parm
);
1538 inti
->ext
.ext_params
= s390int
->parm
;
1540 case KVM_S390_INT_PFAULT_DONE
:
1541 inti
->ext
.ext_params2
= s390int
->parm64
;
1544 VM_EVENT(kvm
, 3, "inject: machine check mcic 0x%llx",
1546 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1547 inti
->mchk
.mcic
= s390int
->parm64
;
1549 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1550 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1551 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1552 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1553 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1559 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1562 rc
= __inject_vm(kvm
, inti
);
1568 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
1569 struct kvm_s390_interrupt_info
*inti
)
1571 return __inject_vm(kvm
, inti
);
1574 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1575 struct kvm_s390_irq
*irq
)
1577 irq
->type
= s390int
->type
;
1578 switch (irq
->type
) {
1579 case KVM_S390_PROGRAM_INT
:
1580 if (s390int
->parm
& 0xffff0000)
1582 irq
->u
.pgm
.code
= s390int
->parm
;
1584 case KVM_S390_SIGP_SET_PREFIX
:
1585 irq
->u
.prefix
.address
= s390int
->parm
;
1587 case KVM_S390_SIGP_STOP
:
1588 irq
->u
.stop
.flags
= s390int
->parm
;
1590 case KVM_S390_INT_EXTERNAL_CALL
:
1591 if (s390int
->parm
& 0xffff0000)
1593 irq
->u
.extcall
.code
= s390int
->parm
;
1595 case KVM_S390_INT_EMERGENCY
:
1596 if (s390int
->parm
& 0xffff0000)
1598 irq
->u
.emerg
.code
= s390int
->parm
;
1601 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1607 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
1609 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1611 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1614 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
1616 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1618 spin_lock(&li
->lock
);
1619 li
->irq
.stop
.flags
= 0;
1620 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1621 spin_unlock(&li
->lock
);
1624 static int do_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1628 switch (irq
->type
) {
1629 case KVM_S390_PROGRAM_INT
:
1630 rc
= __inject_prog(vcpu
, irq
);
1632 case KVM_S390_SIGP_SET_PREFIX
:
1633 rc
= __inject_set_prefix(vcpu
, irq
);
1635 case KVM_S390_SIGP_STOP
:
1636 rc
= __inject_sigp_stop(vcpu
, irq
);
1638 case KVM_S390_RESTART
:
1639 rc
= __inject_sigp_restart(vcpu
, irq
);
1641 case KVM_S390_INT_CLOCK_COMP
:
1642 rc
= __inject_ckc(vcpu
);
1644 case KVM_S390_INT_CPU_TIMER
:
1645 rc
= __inject_cpu_timer(vcpu
);
1647 case KVM_S390_INT_EXTERNAL_CALL
:
1648 rc
= __inject_extcall(vcpu
, irq
);
1650 case KVM_S390_INT_EMERGENCY
:
1651 rc
= __inject_sigp_emergency(vcpu
, irq
);
1654 rc
= __inject_mchk(vcpu
, irq
);
1656 case KVM_S390_INT_PFAULT_INIT
:
1657 rc
= __inject_pfault_init(vcpu
, irq
);
1659 case KVM_S390_INT_VIRTIO
:
1660 case KVM_S390_INT_SERVICE
:
1661 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1669 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1671 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1674 spin_lock(&li
->lock
);
1675 rc
= do_inject_vcpu(vcpu
, irq
);
1676 spin_unlock(&li
->lock
);
1678 kvm_s390_vcpu_wakeup(vcpu
);
1682 static inline void clear_irq_list(struct list_head
*_list
)
1684 struct kvm_s390_interrupt_info
*inti
, *n
;
1686 list_for_each_entry_safe(inti
, n
, _list
, list
) {
1687 list_del(&inti
->list
);
1692 static void inti_to_irq(struct kvm_s390_interrupt_info
*inti
,
1693 struct kvm_s390_irq
*irq
)
1695 irq
->type
= inti
->type
;
1696 switch (inti
->type
) {
1697 case KVM_S390_INT_PFAULT_INIT
:
1698 case KVM_S390_INT_PFAULT_DONE
:
1699 case KVM_S390_INT_VIRTIO
:
1700 irq
->u
.ext
= inti
->ext
;
1702 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1703 irq
->u
.io
= inti
->io
;
1708 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1710 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1713 spin_lock(&fi
->lock
);
1714 fi
->pending_irqs
= 0;
1715 memset(&fi
->srv_signal
, 0, sizeof(fi
->srv_signal
));
1716 memset(&fi
->mchk
, 0, sizeof(fi
->mchk
));
1717 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1718 clear_irq_list(&fi
->lists
[i
]);
1719 for (i
= 0; i
< FIRQ_MAX_COUNT
; i
++)
1720 fi
->counters
[i
] = 0;
1721 spin_unlock(&fi
->lock
);
1724 static int get_all_floating_irqs(struct kvm
*kvm
, u8 __user
*usrbuf
, u64 len
)
1726 struct kvm_s390_interrupt_info
*inti
;
1727 struct kvm_s390_float_interrupt
*fi
;
1728 struct kvm_s390_irq
*buf
;
1729 struct kvm_s390_irq
*irq
;
1735 if (len
> KVM_S390_FLIC_MAX_BUFFER
|| len
== 0)
1739 * We are already using -ENOMEM to signal
1740 * userspace it may retry with a bigger buffer,
1741 * so we need to use something else for this case
1747 max_irqs
= len
/ sizeof(struct kvm_s390_irq
);
1749 fi
= &kvm
->arch
.float_int
;
1750 spin_lock(&fi
->lock
);
1751 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++) {
1752 list_for_each_entry(inti
, &fi
->lists
[i
], list
) {
1753 if (n
== max_irqs
) {
1754 /* signal userspace to try again */
1758 inti_to_irq(inti
, &buf
[n
]);
1762 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
)) {
1763 if (n
== max_irqs
) {
1764 /* signal userspace to try again */
1768 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1769 irq
->type
= KVM_S390_INT_SERVICE
;
1770 irq
->u
.ext
= fi
->srv_signal
;
1773 if (test_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
1774 if (n
== max_irqs
) {
1775 /* signal userspace to try again */
1779 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1780 irq
->type
= KVM_S390_MCHK
;
1781 irq
->u
.mchk
= fi
->mchk
;
1786 spin_unlock(&fi
->lock
);
1787 if (!ret
&& n
> 0) {
1788 if (copy_to_user(usrbuf
, buf
, sizeof(struct kvm_s390_irq
) * n
))
1793 return ret
< 0 ? ret
: n
;
1796 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1800 switch (attr
->group
) {
1801 case KVM_DEV_FLIC_GET_ALL_IRQS
:
1802 r
= get_all_floating_irqs(dev
->kvm
, (u8 __user
*) attr
->addr
,
1812 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
1815 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
1816 void *target
= NULL
;
1817 void __user
*source
;
1820 if (get_user(inti
->type
, (u64 __user
*)addr
))
1823 switch (inti
->type
) {
1824 case KVM_S390_INT_PFAULT_INIT
:
1825 case KVM_S390_INT_PFAULT_DONE
:
1826 case KVM_S390_INT_VIRTIO
:
1827 case KVM_S390_INT_SERVICE
:
1828 target
= (void *) &inti
->ext
;
1829 source
= &uptr
->u
.ext
;
1830 size
= sizeof(inti
->ext
);
1832 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1833 target
= (void *) &inti
->io
;
1834 source
= &uptr
->u
.io
;
1835 size
= sizeof(inti
->io
);
1838 target
= (void *) &inti
->mchk
;
1839 source
= &uptr
->u
.mchk
;
1840 size
= sizeof(inti
->mchk
);
1846 if (copy_from_user(target
, source
, size
))
1852 static int enqueue_floating_irq(struct kvm_device
*dev
,
1853 struct kvm_device_attr
*attr
)
1855 struct kvm_s390_interrupt_info
*inti
= NULL
;
1857 int len
= attr
->attr
;
1859 if (len
% sizeof(struct kvm_s390_irq
) != 0)
1861 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
1864 while (len
>= sizeof(struct kvm_s390_irq
)) {
1865 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1869 r
= copy_irq_from_user(inti
, attr
->addr
);
1874 r
= __inject_vm(dev
->kvm
, inti
);
1879 len
-= sizeof(struct kvm_s390_irq
);
1880 attr
->addr
+= sizeof(struct kvm_s390_irq
);
1886 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
1888 if (id
>= MAX_S390_IO_ADAPTERS
)
1890 return kvm
->arch
.adapters
[id
];
1893 static int register_io_adapter(struct kvm_device
*dev
,
1894 struct kvm_device_attr
*attr
)
1896 struct s390_io_adapter
*adapter
;
1897 struct kvm_s390_io_adapter adapter_info
;
1899 if (copy_from_user(&adapter_info
,
1900 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
1903 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
1904 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
1907 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
1911 INIT_LIST_HEAD(&adapter
->maps
);
1912 init_rwsem(&adapter
->maps_lock
);
1913 atomic_set(&adapter
->nr_maps
, 0);
1914 adapter
->id
= adapter_info
.id
;
1915 adapter
->isc
= adapter_info
.isc
;
1916 adapter
->maskable
= adapter_info
.maskable
;
1917 adapter
->masked
= false;
1918 adapter
->swap
= adapter_info
.swap
;
1919 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
1924 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
1927 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1929 if (!adapter
|| !adapter
->maskable
)
1931 ret
= adapter
->masked
;
1932 adapter
->masked
= masked
;
1936 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1938 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1939 struct s390_map_info
*map
;
1942 if (!adapter
|| !addr
)
1945 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
1950 INIT_LIST_HEAD(&map
->list
);
1951 map
->guest_addr
= addr
;
1952 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
1953 if (map
->addr
== -EFAULT
) {
1957 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
1961 down_write(&adapter
->maps_lock
);
1962 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
1963 list_add_tail(&map
->list
, &adapter
->maps
);
1966 put_page(map
->page
);
1969 up_write(&adapter
->maps_lock
);
1976 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1978 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1979 struct s390_map_info
*map
, *tmp
;
1982 if (!adapter
|| !addr
)
1985 down_write(&adapter
->maps_lock
);
1986 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
1987 if (map
->guest_addr
== addr
) {
1989 atomic_dec(&adapter
->nr_maps
);
1990 list_del(&map
->list
);
1991 put_page(map
->page
);
1996 up_write(&adapter
->maps_lock
);
1998 return found
? 0 : -EINVAL
;
2001 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
2004 struct s390_map_info
*map
, *tmp
;
2006 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
2007 if (!kvm
->arch
.adapters
[i
])
2009 list_for_each_entry_safe(map
, tmp
,
2010 &kvm
->arch
.adapters
[i
]->maps
, list
) {
2011 list_del(&map
->list
);
2012 put_page(map
->page
);
2015 kfree(kvm
->arch
.adapters
[i
]);
2019 static int modify_io_adapter(struct kvm_device
*dev
,
2020 struct kvm_device_attr
*attr
)
2022 struct kvm_s390_io_adapter_req req
;
2023 struct s390_io_adapter
*adapter
;
2026 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
2029 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
2033 case KVM_S390_IO_ADAPTER_MASK
:
2034 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
2038 case KVM_S390_IO_ADAPTER_MAP
:
2039 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
2041 case KVM_S390_IO_ADAPTER_UNMAP
:
2042 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
2051 static int clear_io_irq(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
2054 const u64 isc_mask
= 0xffUL
<< 24; /* all iscs set */
2059 if (attr
->attr
!= sizeof(schid
))
2061 if (copy_from_user(&schid
, (void __user
*) attr
->addr
, sizeof(schid
)))
2063 kfree(kvm_s390_get_io_int(kvm
, isc_mask
, schid
));
2065 * If userspace is conforming to the architecture, we can have at most
2066 * one pending I/O interrupt per subchannel, so this is effectively a
2072 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2076 struct kvm_vcpu
*vcpu
;
2078 switch (attr
->group
) {
2079 case KVM_DEV_FLIC_ENQUEUE
:
2080 r
= enqueue_floating_irq(dev
, attr
);
2082 case KVM_DEV_FLIC_CLEAR_IRQS
:
2083 kvm_s390_clear_float_irqs(dev
->kvm
);
2085 case KVM_DEV_FLIC_APF_ENABLE
:
2086 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
2088 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2089 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
2091 * Make sure no async faults are in transition when
2092 * clearing the queues. So we don't need to worry
2093 * about late coming workers.
2095 synchronize_srcu(&dev
->kvm
->srcu
);
2096 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
2097 kvm_clear_async_pf_completion_queue(vcpu
);
2099 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2100 r
= register_io_adapter(dev
, attr
);
2102 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2103 r
= modify_io_adapter(dev
, attr
);
2105 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2106 r
= clear_io_irq(dev
->kvm
, attr
);
2115 static int flic_has_attr(struct kvm_device
*dev
,
2116 struct kvm_device_attr
*attr
)
2118 switch (attr
->group
) {
2119 case KVM_DEV_FLIC_GET_ALL_IRQS
:
2120 case KVM_DEV_FLIC_ENQUEUE
:
2121 case KVM_DEV_FLIC_CLEAR_IRQS
:
2122 case KVM_DEV_FLIC_APF_ENABLE
:
2123 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
2124 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
2125 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
2126 case KVM_DEV_FLIC_CLEAR_IO_IRQ
:
2132 static int flic_create(struct kvm_device
*dev
, u32 type
)
2136 if (dev
->kvm
->arch
.flic
)
2138 dev
->kvm
->arch
.flic
= dev
;
2142 static void flic_destroy(struct kvm_device
*dev
)
2144 dev
->kvm
->arch
.flic
= NULL
;
2148 /* s390 floating irq controller (flic) */
2149 struct kvm_device_ops kvm_flic_ops
= {
2151 .get_attr
= flic_get_attr
,
2152 .set_attr
= flic_set_attr
,
2153 .has_attr
= flic_has_attr
,
2154 .create
= flic_create
,
2155 .destroy
= flic_destroy
,
2158 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
2162 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
2164 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
2167 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
2170 struct s390_map_info
*map
;
2175 list_for_each_entry(map
, &adapter
->maps
, list
) {
2176 if (map
->guest_addr
== addr
)
2182 static int adapter_indicators_set(struct kvm
*kvm
,
2183 struct s390_io_adapter
*adapter
,
2184 struct kvm_s390_adapter_int
*adapter_int
)
2187 int summary_set
, idx
;
2188 struct s390_map_info
*info
;
2191 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
2194 map
= page_address(info
->page
);
2195 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
2197 idx
= srcu_read_lock(&kvm
->srcu
);
2198 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2199 set_page_dirty_lock(info
->page
);
2200 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
2202 srcu_read_unlock(&kvm
->srcu
, idx
);
2205 map
= page_address(info
->page
);
2206 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
2208 summary_set
= test_and_set_bit(bit
, map
);
2209 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2210 set_page_dirty_lock(info
->page
);
2211 srcu_read_unlock(&kvm
->srcu
, idx
);
2212 return summary_set
? 0 : 1;
2216 * < 0 - not injected due to error
2217 * = 0 - coalesced, summary indicator already active
2218 * > 0 - injected interrupt
2220 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
2221 struct kvm
*kvm
, int irq_source_id
, int level
,
2225 struct s390_io_adapter
*adapter
;
2227 /* We're only interested in the 0->1 transition. */
2230 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
2233 down_read(&adapter
->maps_lock
);
2234 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
2235 up_read(&adapter
->maps_lock
);
2236 if ((ret
> 0) && !adapter
->masked
) {
2237 struct kvm_s390_interrupt s390int
= {
2238 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
2240 .parm64
= (adapter
->isc
<< 27) | 0x80000000,
2242 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
2249 int kvm_set_routing_entry(struct kvm
*kvm
,
2250 struct kvm_kernel_irq_routing_entry
*e
,
2251 const struct kvm_irq_routing_entry
*ue
)
2256 case KVM_IRQ_ROUTING_S390_ADAPTER
:
2257 e
->set
= set_adapter_int
;
2258 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
2259 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
2260 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
2261 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
2262 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
2272 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
2273 int irq_source_id
, int level
, bool line_status
)
2278 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
, void __user
*irqstate
, int len
)
2280 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2281 struct kvm_s390_irq
*buf
;
2289 if (copy_from_user((void *) buf
, irqstate
, len
)) {
2295 * Don't allow setting the interrupt state
2296 * when there are already interrupts pending
2298 spin_lock(&li
->lock
);
2299 if (li
->pending_irqs
) {
2304 for (n
= 0; n
< len
/ sizeof(*buf
); n
++) {
2305 r
= do_inject_vcpu(vcpu
, &buf
[n
]);
2311 spin_unlock(&li
->lock
);
2318 static void store_local_irq(struct kvm_s390_local_interrupt
*li
,
2319 struct kvm_s390_irq
*irq
,
2320 unsigned long irq_type
)
2323 case IRQ_PEND_MCHK_EX
:
2324 case IRQ_PEND_MCHK_REP
:
2325 irq
->type
= KVM_S390_MCHK
;
2326 irq
->u
.mchk
= li
->irq
.mchk
;
2329 irq
->type
= KVM_S390_PROGRAM_INT
;
2330 irq
->u
.pgm
= li
->irq
.pgm
;
2332 case IRQ_PEND_PFAULT_INIT
:
2333 irq
->type
= KVM_S390_INT_PFAULT_INIT
;
2334 irq
->u
.ext
= li
->irq
.ext
;
2336 case IRQ_PEND_EXT_EXTERNAL
:
2337 irq
->type
= KVM_S390_INT_EXTERNAL_CALL
;
2338 irq
->u
.extcall
= li
->irq
.extcall
;
2340 case IRQ_PEND_EXT_CLOCK_COMP
:
2341 irq
->type
= KVM_S390_INT_CLOCK_COMP
;
2343 case IRQ_PEND_EXT_CPU_TIMER
:
2344 irq
->type
= KVM_S390_INT_CPU_TIMER
;
2346 case IRQ_PEND_SIGP_STOP
:
2347 irq
->type
= KVM_S390_SIGP_STOP
;
2348 irq
->u
.stop
= li
->irq
.stop
;
2350 case IRQ_PEND_RESTART
:
2351 irq
->type
= KVM_S390_RESTART
;
2353 case IRQ_PEND_SET_PREFIX
:
2354 irq
->type
= KVM_S390_SIGP_SET_PREFIX
;
2355 irq
->u
.prefix
= li
->irq
.prefix
;
2360 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
, __u8 __user
*buf
, int len
)
2363 unsigned long sigp_emerg_pending
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
2364 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2365 unsigned long pending_irqs
;
2366 struct kvm_s390_irq irq
;
2367 unsigned long irq_type
;
2371 spin_lock(&li
->lock
);
2372 pending_irqs
= li
->pending_irqs
;
2373 memcpy(&sigp_emerg_pending
, &li
->sigp_emerg_pending
,
2374 sizeof(sigp_emerg_pending
));
2375 spin_unlock(&li
->lock
);
2377 for_each_set_bit(irq_type
, &pending_irqs
, IRQ_PEND_COUNT
) {
2378 memset(&irq
, 0, sizeof(irq
));
2379 if (irq_type
== IRQ_PEND_EXT_EMERGENCY
)
2381 if (n
+ sizeof(irq
) > len
)
2383 store_local_irq(&vcpu
->arch
.local_int
, &irq
, irq_type
);
2384 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2389 if (test_bit(IRQ_PEND_EXT_EMERGENCY
, &pending_irqs
)) {
2390 for_each_set_bit(cpuaddr
, sigp_emerg_pending
, KVM_MAX_VCPUS
) {
2391 memset(&irq
, 0, sizeof(irq
));
2392 if (n
+ sizeof(irq
) > len
)
2394 irq
.type
= KVM_S390_INT_EMERGENCY
;
2395 irq
.u
.emerg
.code
= cpuaddr
;
2396 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2402 if (sca_ext_call_pending(vcpu
, &scn
)) {
2403 if (n
+ sizeof(irq
) > len
)
2405 memset(&irq
, 0, sizeof(irq
));
2406 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
2407 irq
.u
.extcall
.code
= scn
;
2408 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))