2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008, 2015
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <linux/vmalloc.h>
21 #include <asm/asm-offsets.h>
23 #include <asm/uaccess.h>
28 #include "trace-s390.h"
30 #define IOINT_SCHID_MASK 0x0000ffff
31 #define IOINT_SSID_MASK 0x00030000
32 #define IOINT_CSSID_MASK 0x03fc0000
33 #define PFAULT_INIT 0x0600
34 #define PFAULT_DONE 0x0680
35 #define VIRTIO_PARAM 0x0d00
37 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
39 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
42 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
44 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
47 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
49 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
52 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
54 return psw_extint_disabled(vcpu
) &&
55 psw_ioint_disabled(vcpu
) &&
56 psw_mchk_disabled(vcpu
);
59 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
61 if (psw_extint_disabled(vcpu
) ||
62 !(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
64 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
65 /* No timer interrupts when single stepping */
70 static int ckc_irq_pending(struct kvm_vcpu
*vcpu
)
72 if (vcpu
->arch
.sie_block
->ckc
>= kvm_s390_get_tod_clock_fast(vcpu
->kvm
))
74 return ckc_interrupts_enabled(vcpu
);
77 static int cpu_timer_interrupts_enabled(struct kvm_vcpu
*vcpu
)
79 return !psw_extint_disabled(vcpu
) &&
80 (vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
);
83 static int cpu_timer_irq_pending(struct kvm_vcpu
*vcpu
)
85 return (vcpu
->arch
.sie_block
->cputm
>> 63) &&
86 cpu_timer_interrupts_enabled(vcpu
);
89 static inline int is_ioirq(unsigned long irq_type
)
91 return ((irq_type
>= IRQ_PEND_IO_ISC_0
) &&
92 (irq_type
<= IRQ_PEND_IO_ISC_7
));
95 static uint64_t isc_to_isc_bits(int isc
)
97 return (0x80 >> isc
) << 24;
100 static inline u8
int_word_to_isc(u32 int_word
)
102 return (int_word
& 0x38000000) >> 27;
105 static inline unsigned long pending_irqs(struct kvm_vcpu
*vcpu
)
107 return vcpu
->kvm
->arch
.float_int
.pending_irqs
|
108 vcpu
->arch
.local_int
.pending_irqs
;
111 static unsigned long disable_iscs(struct kvm_vcpu
*vcpu
,
112 unsigned long active_mask
)
116 for (i
= 0; i
<= MAX_ISC
; i
++)
117 if (!(vcpu
->arch
.sie_block
->gcr
[6] & isc_to_isc_bits(i
)))
118 active_mask
&= ~(1UL << (IRQ_PEND_IO_ISC_0
+ i
));
123 static unsigned long deliverable_irqs(struct kvm_vcpu
*vcpu
)
125 unsigned long active_mask
;
127 active_mask
= pending_irqs(vcpu
);
131 if (psw_extint_disabled(vcpu
))
132 active_mask
&= ~IRQ_PEND_EXT_MASK
;
133 if (psw_ioint_disabled(vcpu
))
134 active_mask
&= ~IRQ_PEND_IO_MASK
;
136 active_mask
= disable_iscs(vcpu
, active_mask
);
137 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
138 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
139 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
))
140 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
141 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
142 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
143 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
))
144 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
145 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
146 __clear_bit(IRQ_PEND_EXT_SERVICE
, &active_mask
);
147 if (psw_mchk_disabled(vcpu
))
148 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
149 if (!(vcpu
->arch
.sie_block
->gcr
[14] &
150 vcpu
->kvm
->arch
.float_int
.mchk
.cr14
))
151 __clear_bit(IRQ_PEND_MCHK_REP
, &active_mask
);
154 * STOP irqs will never be actively delivered. They are triggered via
155 * intercept requests and cleared when the stop intercept is performed.
157 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
162 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
164 atomic_or(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
165 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
168 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
170 atomic_andnot(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
171 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
174 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
176 atomic_andnot(CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
177 &vcpu
->arch
.sie_block
->cpuflags
);
178 vcpu
->arch
.sie_block
->lctl
= 0x0000;
179 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
181 if (guestdbg_enabled(vcpu
)) {
182 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
183 LCTL_CR10
| LCTL_CR11
);
184 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
188 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
190 atomic_or(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
193 static void set_intercept_indicators_io(struct kvm_vcpu
*vcpu
)
195 if (!(pending_irqs(vcpu
) & IRQ_PEND_IO_MASK
))
197 else if (psw_ioint_disabled(vcpu
))
198 __set_cpuflag(vcpu
, CPUSTAT_IO_INT
);
200 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
203 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
205 if (!(pending_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
207 if (psw_extint_disabled(vcpu
))
208 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
210 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
213 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
215 if (!(pending_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
217 if (psw_mchk_disabled(vcpu
))
218 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
220 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
223 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
225 if (kvm_s390_is_stop_irq_pending(vcpu
))
226 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
229 /* Set interception request for non-deliverable interrupts */
230 static void set_intercept_indicators(struct kvm_vcpu
*vcpu
)
232 set_intercept_indicators_io(vcpu
);
233 set_intercept_indicators_ext(vcpu
);
234 set_intercept_indicators_mchk(vcpu
);
235 set_intercept_indicators_stop(vcpu
);
238 static u16
get_ilc(struct kvm_vcpu
*vcpu
)
240 switch (vcpu
->arch
.sie_block
->icptcode
) {
246 /* last instruction only stored for these icptcodes */
247 return insn_length(vcpu
->arch
.sie_block
->ipa
>> 8);
249 return vcpu
->arch
.sie_block
->pgmilc
;
255 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
257 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
260 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
263 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
264 (u16
*)__LC_EXT_INT_CODE
);
265 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
266 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
267 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
268 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
269 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
270 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
271 return rc
? -EFAULT
: 0;
274 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
276 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
279 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
282 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
283 (u16 __user
*)__LC_EXT_INT_CODE
);
284 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
285 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
286 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
287 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
288 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
289 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
290 return rc
? -EFAULT
: 0;
293 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
295 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
296 struct kvm_s390_ext_info ext
;
299 spin_lock(&li
->lock
);
301 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
302 li
->irq
.ext
.ext_params2
= 0;
303 spin_unlock(&li
->lock
);
305 VCPU_EVENT(vcpu
, 4, "deliver: pfault init token 0x%llx",
307 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
308 KVM_S390_INT_PFAULT_INIT
,
311 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
312 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
313 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
314 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
315 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
316 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
317 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
318 return rc
? -EFAULT
: 0;
321 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
323 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
324 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
325 struct kvm_s390_mchk_info mchk
= {};
326 unsigned long adtl_status_addr
;
330 spin_lock(&fi
->lock
);
331 spin_lock(&li
->lock
);
332 if (test_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
) ||
333 test_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
)) {
335 * If there was an exigent machine check pending, then any
336 * repressible machine checks that might have been pending
337 * are indicated along with it, so always clear bits for
338 * repressible and exigent interrupts
341 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
342 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
343 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
347 * We indicate floating repressible conditions along with
348 * other pending conditions. Channel Report Pending and Channel
349 * Subsystem damage are the only two and and are indicated by
350 * bits in mcic and masked in cr14.
352 if (test_and_clear_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
353 mchk
.mcic
|= fi
->mchk
.mcic
;
354 mchk
.cr14
|= fi
->mchk
.cr14
;
355 memset(&fi
->mchk
, 0, sizeof(mchk
));
358 spin_unlock(&li
->lock
);
359 spin_unlock(&fi
->lock
);
362 VCPU_EVENT(vcpu
, 3, "deliver: machine check mcic 0x%llx",
364 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
366 mchk
.cr14
, mchk
.mcic
);
368 rc
= kvm_s390_vcpu_store_status(vcpu
,
369 KVM_S390_STORE_STATUS_PREFIXED
);
370 rc
|= read_guest_lc(vcpu
, __LC_VX_SAVE_AREA_ADDR
,
372 sizeof(unsigned long));
373 rc
|= kvm_s390_vcpu_store_adtl_status(vcpu
,
375 rc
|= put_guest_lc(vcpu
, mchk
.mcic
,
376 (u64 __user
*) __LC_MCCK_CODE
);
377 rc
|= put_guest_lc(vcpu
, mchk
.failing_storage_address
,
378 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
379 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
,
381 sizeof(mchk
.fixed_logout
));
382 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
383 &vcpu
->arch
.sie_block
->gpsw
,
385 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
386 &vcpu
->arch
.sie_block
->gpsw
,
389 return rc
? -EFAULT
: 0;
392 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
394 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
397 VCPU_EVENT(vcpu
, 3, "%s", "deliver: cpu restart");
398 vcpu
->stat
.deliver_restart_signal
++;
399 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
401 rc
= write_guest_lc(vcpu
,
402 offsetof(struct _lowcore
, restart_old_psw
),
403 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
404 rc
|= read_guest_lc(vcpu
, offsetof(struct _lowcore
, restart_psw
),
405 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
406 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
407 return rc
? -EFAULT
: 0;
410 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
412 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
413 struct kvm_s390_prefix_info prefix
;
415 spin_lock(&li
->lock
);
416 prefix
= li
->irq
.prefix
;
417 li
->irq
.prefix
.address
= 0;
418 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
419 spin_unlock(&li
->lock
);
421 vcpu
->stat
.deliver_prefix_signal
++;
422 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
423 KVM_S390_SIGP_SET_PREFIX
,
426 kvm_s390_set_prefix(vcpu
, prefix
.address
);
430 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
432 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
436 spin_lock(&li
->lock
);
437 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
438 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
439 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
440 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
441 spin_unlock(&li
->lock
);
443 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp emerg");
444 vcpu
->stat
.deliver_emergency_signal
++;
445 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
448 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
449 (u16
*)__LC_EXT_INT_CODE
);
450 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
451 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
452 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
453 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
454 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
455 return rc
? -EFAULT
: 0;
458 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
460 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
461 struct kvm_s390_extcall_info extcall
;
464 spin_lock(&li
->lock
);
465 extcall
= li
->irq
.extcall
;
466 li
->irq
.extcall
.code
= 0;
467 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
468 spin_unlock(&li
->lock
);
470 VCPU_EVENT(vcpu
, 4, "%s", "deliver: sigp ext call");
471 vcpu
->stat
.deliver_external_call
++;
472 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
473 KVM_S390_INT_EXTERNAL_CALL
,
476 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
477 (u16
*)__LC_EXT_INT_CODE
);
478 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
479 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
480 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
481 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
483 return rc
? -EFAULT
: 0;
486 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
488 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
489 struct kvm_s390_pgm_info pgm_info
;
490 int rc
= 0, nullifying
= false;
491 u16 ilc
= get_ilc(vcpu
);
493 spin_lock(&li
->lock
);
494 pgm_info
= li
->irq
.pgm
;
495 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
496 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
497 spin_unlock(&li
->lock
);
499 VCPU_EVENT(vcpu
, 3, "deliver: program irq code 0x%x, ilc:%d",
501 vcpu
->stat
.deliver_program_int
++;
502 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
505 switch (pgm_info
.code
& ~PGM_PER
) {
506 case PGM_AFX_TRANSLATION
:
507 case PGM_ASX_TRANSLATION
:
508 case PGM_EX_TRANSLATION
:
509 case PGM_LFX_TRANSLATION
:
510 case PGM_LSTE_SEQUENCE
:
511 case PGM_LSX_TRANSLATION
:
512 case PGM_LX_TRANSLATION
:
513 case PGM_PRIMARY_AUTHORITY
:
514 case PGM_SECONDARY_AUTHORITY
:
517 case PGM_SPACE_SWITCH
:
518 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
519 (u64
*)__LC_TRANS_EXC_CODE
);
521 case PGM_ALEN_TRANSLATION
:
522 case PGM_ALE_SEQUENCE
:
523 case PGM_ASTE_INSTANCE
:
524 case PGM_ASTE_SEQUENCE
:
525 case PGM_ASTE_VALIDITY
:
526 case PGM_EXTENDED_AUTHORITY
:
527 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
528 (u8
*)__LC_EXC_ACCESS_ID
);
532 case PGM_PAGE_TRANSLATION
:
533 case PGM_REGION_FIRST_TRANS
:
534 case PGM_REGION_SECOND_TRANS
:
535 case PGM_REGION_THIRD_TRANS
:
536 case PGM_SEGMENT_TRANSLATION
:
537 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
538 (u64
*)__LC_TRANS_EXC_CODE
);
539 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
540 (u8
*)__LC_EXC_ACCESS_ID
);
541 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
542 (u8
*)__LC_OP_ACCESS_ID
);
546 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
547 (u16
*)__LC_MON_CLASS_NR
);
548 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
549 (u64
*)__LC_MON_CODE
);
551 case PGM_VECTOR_PROCESSING
:
553 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
554 (u32
*)__LC_DATA_EXC_CODE
);
557 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
558 (u64
*)__LC_TRANS_EXC_CODE
);
559 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
560 (u8
*)__LC_EXC_ACCESS_ID
);
563 case PGM_STACK_EMPTY
:
564 case PGM_STACK_SPECIFICATION
:
566 case PGM_STACK_OPERATION
:
567 case PGM_TRACE_TABEL
:
568 case PGM_CRYPTO_OPERATION
:
573 if (pgm_info
.code
& PGM_PER
) {
574 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
575 (u8
*) __LC_PER_CODE
);
576 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
577 (u8
*)__LC_PER_ATMID
);
578 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
579 (u64
*) __LC_PER_ADDRESS
);
580 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
581 (u8
*) __LC_PER_ACCESS_ID
);
584 if (nullifying
&& vcpu
->arch
.sie_block
->icptcode
== ICPT_INST
)
585 kvm_s390_rewind_psw(vcpu
, ilc
);
587 rc
|= put_guest_lc(vcpu
, ilc
, (u16
*) __LC_PGM_ILC
);
588 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->gbea
,
589 (u64
*) __LC_LAST_BREAK
);
590 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
591 (u16
*)__LC_PGM_INT_CODE
);
592 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
593 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
594 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
595 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
596 return rc
? -EFAULT
: 0;
599 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
)
601 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
602 struct kvm_s390_ext_info ext
;
605 spin_lock(&fi
->lock
);
606 if (!(test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
))) {
607 spin_unlock(&fi
->lock
);
610 ext
= fi
->srv_signal
;
611 memset(&fi
->srv_signal
, 0, sizeof(ext
));
612 clear_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
613 spin_unlock(&fi
->lock
);
615 VCPU_EVENT(vcpu
, 4, "deliver: sclp parameter 0x%x",
617 vcpu
->stat
.deliver_service_signal
++;
618 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
621 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
622 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
623 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
624 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
625 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
626 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
627 rc
|= put_guest_lc(vcpu
, ext
.ext_params
,
628 (u32
*)__LC_EXT_PARAMS
);
630 return rc
? -EFAULT
: 0;
633 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
)
635 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
636 struct kvm_s390_interrupt_info
*inti
;
639 spin_lock(&fi
->lock
);
640 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_PFAULT
],
641 struct kvm_s390_interrupt_info
,
644 list_del(&inti
->list
);
645 fi
->counters
[FIRQ_CNTR_PFAULT
] -= 1;
647 if (list_empty(&fi
->lists
[FIRQ_LIST_PFAULT
]))
648 clear_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
649 spin_unlock(&fi
->lock
);
652 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
653 KVM_S390_INT_PFAULT_DONE
, 0,
654 inti
->ext
.ext_params2
);
655 VCPU_EVENT(vcpu
, 4, "deliver: pfault done token 0x%llx",
656 inti
->ext
.ext_params2
);
658 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
659 (u16
*)__LC_EXT_INT_CODE
);
660 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
,
661 (u16
*)__LC_EXT_CPU_ADDR
);
662 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
663 &vcpu
->arch
.sie_block
->gpsw
,
665 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
666 &vcpu
->arch
.sie_block
->gpsw
,
668 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
669 (u64
*)__LC_EXT_PARAMS2
);
672 return rc
? -EFAULT
: 0;
675 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
)
677 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
678 struct kvm_s390_interrupt_info
*inti
;
681 spin_lock(&fi
->lock
);
682 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_VIRTIO
],
683 struct kvm_s390_interrupt_info
,
687 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
688 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
689 vcpu
->stat
.deliver_virtio_interrupt
++;
690 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
692 inti
->ext
.ext_params
,
693 inti
->ext
.ext_params2
);
694 list_del(&inti
->list
);
695 fi
->counters
[FIRQ_CNTR_VIRTIO
] -= 1;
697 if (list_empty(&fi
->lists
[FIRQ_LIST_VIRTIO
]))
698 clear_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
699 spin_unlock(&fi
->lock
);
702 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
703 (u16
*)__LC_EXT_INT_CODE
);
704 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
,
705 (u16
*)__LC_EXT_CPU_ADDR
);
706 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
707 &vcpu
->arch
.sie_block
->gpsw
,
709 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
710 &vcpu
->arch
.sie_block
->gpsw
,
712 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
713 (u32
*)__LC_EXT_PARAMS
);
714 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
715 (u64
*)__LC_EXT_PARAMS2
);
718 return rc
? -EFAULT
: 0;
721 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
722 unsigned long irq_type
)
724 struct list_head
*isc_list
;
725 struct kvm_s390_float_interrupt
*fi
;
726 struct kvm_s390_interrupt_info
*inti
= NULL
;
729 fi
= &vcpu
->kvm
->arch
.float_int
;
731 spin_lock(&fi
->lock
);
732 isc_list
= &fi
->lists
[irq_type
- IRQ_PEND_IO_ISC_0
];
733 inti
= list_first_entry_or_null(isc_list
,
734 struct kvm_s390_interrupt_info
,
737 VCPU_EVENT(vcpu
, 4, "deliver: I/O 0x%llx", inti
->type
);
738 vcpu
->stat
.deliver_io_int
++;
739 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
741 ((__u32
)inti
->io
.subchannel_id
<< 16) |
742 inti
->io
.subchannel_nr
,
743 ((__u64
)inti
->io
.io_int_parm
<< 32) |
744 inti
->io
.io_int_word
);
745 list_del(&inti
->list
);
746 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
748 if (list_empty(isc_list
))
749 clear_bit(irq_type
, &fi
->pending_irqs
);
750 spin_unlock(&fi
->lock
);
753 rc
= put_guest_lc(vcpu
, inti
->io
.subchannel_id
,
754 (u16
*)__LC_SUBCHANNEL_ID
);
755 rc
|= put_guest_lc(vcpu
, inti
->io
.subchannel_nr
,
756 (u16
*)__LC_SUBCHANNEL_NR
);
757 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_parm
,
758 (u32
*)__LC_IO_INT_PARM
);
759 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_word
,
760 (u32
*)__LC_IO_INT_WORD
);
761 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
762 &vcpu
->arch
.sie_block
->gpsw
,
764 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
765 &vcpu
->arch
.sie_block
->gpsw
,
770 return rc
? -EFAULT
: 0;
773 typedef int (*deliver_irq_t
)(struct kvm_vcpu
*vcpu
);
775 static const deliver_irq_t deliver_irq_funcs
[] = {
776 [IRQ_PEND_MCHK_EX
] = __deliver_machine_check
,
777 [IRQ_PEND_MCHK_REP
] = __deliver_machine_check
,
778 [IRQ_PEND_PROG
] = __deliver_prog
,
779 [IRQ_PEND_EXT_EMERGENCY
] = __deliver_emergency_signal
,
780 [IRQ_PEND_EXT_EXTERNAL
] = __deliver_external_call
,
781 [IRQ_PEND_EXT_CLOCK_COMP
] = __deliver_ckc
,
782 [IRQ_PEND_EXT_CPU_TIMER
] = __deliver_cpu_timer
,
783 [IRQ_PEND_RESTART
] = __deliver_restart
,
784 [IRQ_PEND_SET_PREFIX
] = __deliver_set_prefix
,
785 [IRQ_PEND_PFAULT_INIT
] = __deliver_pfault_init
,
786 [IRQ_PEND_EXT_SERVICE
] = __deliver_service
,
787 [IRQ_PEND_PFAULT_DONE
] = __deliver_pfault_done
,
788 [IRQ_PEND_VIRTIO
] = __deliver_virtio
,
791 /* Check whether an external call is pending (deliverable or not) */
792 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
794 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
795 uint8_t sigp_ctrl
= vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
797 if (!sclp
.has_sigpif
)
798 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
800 return (sigp_ctrl
& SIGP_CTRL_C
) &&
801 (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_ECALL_PEND
);
804 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
806 if (deliverable_irqs(vcpu
))
809 if (kvm_cpu_has_pending_timer(vcpu
))
812 /* external call pending and deliverable */
813 if (kvm_s390_ext_call_pending(vcpu
) &&
814 !psw_extint_disabled(vcpu
) &&
815 (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
818 if (!exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
823 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
825 return ckc_irq_pending(vcpu
) || cpu_timer_irq_pending(vcpu
);
828 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
832 vcpu
->stat
.exit_wait_state
++;
835 if (kvm_arch_vcpu_runnable(vcpu
))
838 if (psw_interrupts_disabled(vcpu
)) {
839 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
840 return -EOPNOTSUPP
; /* disabled wait */
843 if (!ckc_interrupts_enabled(vcpu
)) {
844 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
845 __set_cpu_idle(vcpu
);
849 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
850 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
853 if (vcpu
->arch
.sie_block
->ckc
< now
)
856 __set_cpu_idle(vcpu
);
857 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
858 VCPU_EVENT(vcpu
, 4, "enabled wait via clock comparator: %llu ns", sltime
);
860 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
861 kvm_vcpu_block(vcpu
);
862 __unset_cpu_idle(vcpu
);
863 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
865 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
869 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
871 if (waitqueue_active(&vcpu
->wq
)) {
873 * The vcpu gave up the cpu voluntarily, mark it as a good
876 vcpu
->preempted
= true;
877 wake_up_interruptible(&vcpu
->wq
);
878 vcpu
->stat
.halt_wakeup
++;
882 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
884 struct kvm_vcpu
*vcpu
;
887 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
888 now
= kvm_s390_get_tod_clock_fast(vcpu
->kvm
);
889 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
892 * If the monotonic clock runs faster than the tod clock we might be
893 * woken up too early and have to go back to sleep to avoid deadlocks.
895 if (vcpu
->arch
.sie_block
->ckc
> now
&&
896 hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
897 return HRTIMER_RESTART
;
898 kvm_s390_vcpu_wakeup(vcpu
);
899 return HRTIMER_NORESTART
;
902 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
904 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
906 spin_lock(&li
->lock
);
907 li
->pending_irqs
= 0;
908 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
909 memset(&li
->irq
, 0, sizeof(li
->irq
));
910 spin_unlock(&li
->lock
);
912 /* clear pending external calls set by sigp interpretation facility */
913 atomic_andnot(CPUSTAT_ECALL_PEND
, li
->cpuflags
);
914 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
= 0;
917 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
919 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
922 unsigned long irq_type
;
925 __reset_intercept_indicators(vcpu
);
927 /* pending ckc conditions might have been invalidated */
928 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
929 if (ckc_irq_pending(vcpu
))
930 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
932 /* pending cpu timer conditions might have been invalidated */
933 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
934 if (cpu_timer_irq_pending(vcpu
))
935 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
937 while ((irqs
= deliverable_irqs(vcpu
)) && !rc
) {
938 /* bits are in the order of interrupt priority */
939 irq_type
= find_first_bit(&irqs
, IRQ_PEND_COUNT
);
940 if (is_ioirq(irq_type
)) {
941 rc
= __deliver_io(vcpu
, irq_type
);
943 func
= deliver_irq_funcs
[irq_type
];
945 WARN_ON_ONCE(func
== NULL
);
946 clear_bit(irq_type
, &li
->pending_irqs
);
953 set_intercept_indicators(vcpu
);
958 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
960 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
962 VCPU_EVENT(vcpu
, 3, "inject: program irq code 0x%x", irq
->u
.pgm
.code
);
963 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
966 if (irq
->u
.pgm
.code
== PGM_PER
) {
967 li
->irq
.pgm
.code
|= PGM_PER
;
968 /* only modify PER related information */
969 li
->irq
.pgm
.per_address
= irq
->u
.pgm
.per_address
;
970 li
->irq
.pgm
.per_code
= irq
->u
.pgm
.per_code
;
971 li
->irq
.pgm
.per_atmid
= irq
->u
.pgm
.per_atmid
;
972 li
->irq
.pgm
.per_access_id
= irq
->u
.pgm
.per_access_id
;
973 } else if (!(irq
->u
.pgm
.code
& PGM_PER
)) {
974 li
->irq
.pgm
.code
= (li
->irq
.pgm
.code
& PGM_PER
) |
976 /* only modify non-PER information */
977 li
->irq
.pgm
.trans_exc_code
= irq
->u
.pgm
.trans_exc_code
;
978 li
->irq
.pgm
.mon_code
= irq
->u
.pgm
.mon_code
;
979 li
->irq
.pgm
.data_exc_code
= irq
->u
.pgm
.data_exc_code
;
980 li
->irq
.pgm
.mon_class_nr
= irq
->u
.pgm
.mon_class_nr
;
981 li
->irq
.pgm
.exc_access_id
= irq
->u
.pgm
.exc_access_id
;
982 li
->irq
.pgm
.op_access_id
= irq
->u
.pgm
.op_access_id
;
984 li
->irq
.pgm
= irq
->u
.pgm
;
986 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
990 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
992 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
994 VCPU_EVENT(vcpu
, 4, "inject: pfault init parameter block at 0x%llx",
995 irq
->u
.ext
.ext_params2
);
996 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
997 irq
->u
.ext
.ext_params
,
998 irq
->u
.ext
.ext_params2
);
1000 li
->irq
.ext
= irq
->u
.ext
;
1001 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1002 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1006 static int __inject_extcall_sigpif(struct kvm_vcpu
*vcpu
, uint16_t src_id
)
1008 unsigned char new_val
, old_val
;
1009 uint8_t *sigp_ctrl
= &vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
1011 new_val
= SIGP_CTRL_C
| (src_id
& SIGP_CTRL_SCN_MASK
);
1012 old_val
= *sigp_ctrl
& ~SIGP_CTRL_C
;
1013 if (cmpxchg(sigp_ctrl
, old_val
, new_val
) != old_val
) {
1014 /* another external call is pending */
1017 atomic_or(CPUSTAT_ECALL_PEND
, &vcpu
->arch
.sie_block
->cpuflags
);
1021 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1023 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1024 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1025 uint16_t src_id
= irq
->u
.extcall
.code
;
1027 VCPU_EVENT(vcpu
, 4, "inject: external call source-cpu:%u",
1029 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1032 /* sending vcpu invalid */
1033 if (kvm_get_vcpu_by_id(vcpu
->kvm
, src_id
) == NULL
)
1036 if (sclp
.has_sigpif
)
1037 return __inject_extcall_sigpif(vcpu
, src_id
);
1039 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1041 *extcall
= irq
->u
.extcall
;
1042 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1046 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1048 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1049 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1051 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x",
1052 irq
->u
.prefix
.address
);
1053 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1054 irq
->u
.prefix
.address
, 0);
1056 if (!is_vcpu_stopped(vcpu
))
1059 *prefix
= irq
->u
.prefix
;
1060 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1064 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1065 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1067 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1068 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1071 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0);
1073 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1076 if (is_vcpu_stopped(vcpu
)) {
1077 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1078 rc
= kvm_s390_store_status_unloaded(vcpu
,
1079 KVM_S390_STORE_STATUS_NOADDR
);
1083 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1085 stop
->flags
= irq
->u
.stop
.flags
;
1086 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
1090 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1091 struct kvm_s390_irq
*irq
)
1093 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1095 VCPU_EVENT(vcpu
, 3, "%s", "inject: restart int");
1096 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
1098 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1102 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1103 struct kvm_s390_irq
*irq
)
1105 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1107 VCPU_EVENT(vcpu
, 4, "inject: emergency from cpu %u",
1109 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1110 irq
->u
.emerg
.code
, 0);
1112 /* sending vcpu invalid */
1113 if (kvm_get_vcpu_by_id(vcpu
->kvm
, irq
->u
.emerg
.code
) == NULL
)
1116 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1117 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1118 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1122 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1124 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1125 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1127 VCPU_EVENT(vcpu
, 3, "inject: machine check mcic 0x%llx",
1129 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1133 * Because repressible machine checks can be indicated along with
1134 * exigent machine checks (PoP, Chapter 11, Interruption action)
1135 * we need to combine cr14, mcic and external damage code.
1136 * Failing storage address and the logout area should not be or'ed
1137 * together, we just indicate the last occurrence of the corresponding
1140 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1141 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1142 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1143 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1144 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1145 sizeof(mchk
->fixed_logout
));
1146 if (mchk
->mcic
& MCHK_EX_MASK
)
1147 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1148 else if (mchk
->mcic
& MCHK_REP_MASK
)
1149 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1153 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1155 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1157 VCPU_EVENT(vcpu
, 3, "%s", "inject: clock comparator external");
1158 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1161 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1162 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1166 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1168 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1170 VCPU_EVENT(vcpu
, 3, "%s", "inject: cpu timer external");
1171 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1174 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1175 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1179 static struct kvm_s390_interrupt_info
*get_io_int(struct kvm
*kvm
,
1182 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1183 struct list_head
*isc_list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1184 struct kvm_s390_interrupt_info
*iter
;
1185 u16 id
= (schid
& 0xffff0000U
) >> 16;
1186 u16 nr
= schid
& 0x0000ffffU
;
1188 spin_lock(&fi
->lock
);
1189 list_for_each_entry(iter
, isc_list
, list
) {
1190 if (schid
&& (id
!= iter
->io
.subchannel_id
||
1191 nr
!= iter
->io
.subchannel_nr
))
1193 /* found an appropriate entry */
1194 list_del_init(&iter
->list
);
1195 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1196 if (list_empty(isc_list
))
1197 clear_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1198 spin_unlock(&fi
->lock
);
1201 spin_unlock(&fi
->lock
);
1206 * Dequeue and return an I/O interrupt matching any of the interruption
1207 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1209 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1210 u64 isc_mask
, u32 schid
)
1212 struct kvm_s390_interrupt_info
*inti
= NULL
;
1215 for (isc
= 0; isc
<= MAX_ISC
&& !inti
; isc
++) {
1216 if (isc_mask
& isc_to_isc_bits(isc
))
1217 inti
= get_io_int(kvm
, isc
, schid
);
1222 #define SCCB_MASK 0xFFFFFFF8
1223 #define SCCB_EVENT_PENDING 0x3
1225 static int __inject_service(struct kvm
*kvm
,
1226 struct kvm_s390_interrupt_info
*inti
)
1228 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1230 spin_lock(&fi
->lock
);
1231 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_EVENT_PENDING
;
1233 * Early versions of the QEMU s390 bios will inject several
1234 * service interrupts after another without handling a
1235 * condition code indicating busy.
1236 * We will silently ignore those superfluous sccb values.
1237 * A future version of QEMU will take care of serialization
1240 if (fi
->srv_signal
.ext_params
& SCCB_MASK
)
1242 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_MASK
;
1243 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1245 spin_unlock(&fi
->lock
);
1250 static int __inject_virtio(struct kvm
*kvm
,
1251 struct kvm_s390_interrupt_info
*inti
)
1253 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1255 spin_lock(&fi
->lock
);
1256 if (fi
->counters
[FIRQ_CNTR_VIRTIO
] >= KVM_S390_MAX_VIRTIO_IRQS
) {
1257 spin_unlock(&fi
->lock
);
1260 fi
->counters
[FIRQ_CNTR_VIRTIO
] += 1;
1261 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_VIRTIO
]);
1262 set_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1263 spin_unlock(&fi
->lock
);
1267 static int __inject_pfault_done(struct kvm
*kvm
,
1268 struct kvm_s390_interrupt_info
*inti
)
1270 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1272 spin_lock(&fi
->lock
);
1273 if (fi
->counters
[FIRQ_CNTR_PFAULT
] >=
1274 (ASYNC_PF_PER_VCPU
* KVM_MAX_VCPUS
)) {
1275 spin_unlock(&fi
->lock
);
1278 fi
->counters
[FIRQ_CNTR_PFAULT
] += 1;
1279 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_PFAULT
]);
1280 set_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1281 spin_unlock(&fi
->lock
);
1285 #define CR_PENDING_SUBCLASS 28
1286 static int __inject_float_mchk(struct kvm
*kvm
,
1287 struct kvm_s390_interrupt_info
*inti
)
1289 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1291 spin_lock(&fi
->lock
);
1292 fi
->mchk
.cr14
|= inti
->mchk
.cr14
& (1UL << CR_PENDING_SUBCLASS
);
1293 fi
->mchk
.mcic
|= inti
->mchk
.mcic
;
1294 set_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
);
1295 spin_unlock(&fi
->lock
);
1300 static int __inject_io(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1302 struct kvm_s390_float_interrupt
*fi
;
1303 struct list_head
*list
;
1306 fi
= &kvm
->arch
.float_int
;
1307 spin_lock(&fi
->lock
);
1308 if (fi
->counters
[FIRQ_CNTR_IO
] >= KVM_S390_MAX_FLOAT_IRQS
) {
1309 spin_unlock(&fi
->lock
);
1312 fi
->counters
[FIRQ_CNTR_IO
] += 1;
1314 isc
= int_word_to_isc(inti
->io
.io_int_word
);
1315 list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1316 list_add_tail(&inti
->list
, list
);
1317 set_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1318 spin_unlock(&fi
->lock
);
1323 * Find a destination VCPU for a floating irq and kick it.
1325 static void __floating_irq_kick(struct kvm
*kvm
, u64 type
)
1327 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1328 struct kvm_s390_local_interrupt
*li
;
1329 struct kvm_vcpu
*dst_vcpu
;
1330 int sigcpu
, online_vcpus
, nr_tries
= 0;
1332 online_vcpus
= atomic_read(&kvm
->online_vcpus
);
1336 /* find idle VCPUs first, then round robin */
1337 sigcpu
= find_first_bit(fi
->idle_mask
, online_vcpus
);
1338 if (sigcpu
== online_vcpus
) {
1340 sigcpu
= fi
->next_rr_cpu
;
1341 fi
->next_rr_cpu
= (fi
->next_rr_cpu
+ 1) % online_vcpus
;
1342 /* avoid endless loops if all vcpus are stopped */
1343 if (nr_tries
++ >= online_vcpus
)
1345 } while (is_vcpu_stopped(kvm_get_vcpu(kvm
, sigcpu
)));
1347 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1349 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1350 li
= &dst_vcpu
->arch
.local_int
;
1351 spin_lock(&li
->lock
);
1354 atomic_or(CPUSTAT_STOP_INT
, li
->cpuflags
);
1356 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1357 atomic_or(CPUSTAT_IO_INT
, li
->cpuflags
);
1360 atomic_or(CPUSTAT_EXT_INT
, li
->cpuflags
);
1363 spin_unlock(&li
->lock
);
1364 kvm_s390_vcpu_wakeup(dst_vcpu
);
1367 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1369 u64 type
= READ_ONCE(inti
->type
);
1374 rc
= __inject_float_mchk(kvm
, inti
);
1376 case KVM_S390_INT_VIRTIO
:
1377 rc
= __inject_virtio(kvm
, inti
);
1379 case KVM_S390_INT_SERVICE
:
1380 rc
= __inject_service(kvm
, inti
);
1382 case KVM_S390_INT_PFAULT_DONE
:
1383 rc
= __inject_pfault_done(kvm
, inti
);
1385 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1386 rc
= __inject_io(kvm
, inti
);
1394 __floating_irq_kick(kvm
, type
);
1398 int kvm_s390_inject_vm(struct kvm
*kvm
,
1399 struct kvm_s390_interrupt
*s390int
)
1401 struct kvm_s390_interrupt_info
*inti
;
1404 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1408 inti
->type
= s390int
->type
;
1409 switch (inti
->type
) {
1410 case KVM_S390_INT_VIRTIO
:
1411 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1412 s390int
->parm
, s390int
->parm64
);
1413 inti
->ext
.ext_params
= s390int
->parm
;
1414 inti
->ext
.ext_params2
= s390int
->parm64
;
1416 case KVM_S390_INT_SERVICE
:
1417 VM_EVENT(kvm
, 4, "inject: sclp parm:%x", s390int
->parm
);
1418 inti
->ext
.ext_params
= s390int
->parm
;
1420 case KVM_S390_INT_PFAULT_DONE
:
1421 inti
->ext
.ext_params2
= s390int
->parm64
;
1424 VM_EVENT(kvm
, 3, "inject: machine check mcic 0x%llx",
1426 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1427 inti
->mchk
.mcic
= s390int
->parm64
;
1429 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1430 if (inti
->type
& KVM_S390_INT_IO_AI_MASK
)
1431 VM_EVENT(kvm
, 5, "%s", "inject: I/O (AI)");
1433 VM_EVENT(kvm
, 5, "inject: I/O css %x ss %x schid %04x",
1434 s390int
->type
& IOINT_CSSID_MASK
,
1435 s390int
->type
& IOINT_SSID_MASK
,
1436 s390int
->type
& IOINT_SCHID_MASK
);
1437 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1438 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1439 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1440 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1446 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1449 rc
= __inject_vm(kvm
, inti
);
1455 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
1456 struct kvm_s390_interrupt_info
*inti
)
1458 return __inject_vm(kvm
, inti
);
1461 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1462 struct kvm_s390_irq
*irq
)
1464 irq
->type
= s390int
->type
;
1465 switch (irq
->type
) {
1466 case KVM_S390_PROGRAM_INT
:
1467 if (s390int
->parm
& 0xffff0000)
1469 irq
->u
.pgm
.code
= s390int
->parm
;
1471 case KVM_S390_SIGP_SET_PREFIX
:
1472 irq
->u
.prefix
.address
= s390int
->parm
;
1474 case KVM_S390_SIGP_STOP
:
1475 irq
->u
.stop
.flags
= s390int
->parm
;
1477 case KVM_S390_INT_EXTERNAL_CALL
:
1478 if (s390int
->parm
& 0xffff0000)
1480 irq
->u
.extcall
.code
= s390int
->parm
;
1482 case KVM_S390_INT_EMERGENCY
:
1483 if (s390int
->parm
& 0xffff0000)
1485 irq
->u
.emerg
.code
= s390int
->parm
;
1488 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1494 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
1496 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1498 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1501 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
1503 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1505 spin_lock(&li
->lock
);
1506 li
->irq
.stop
.flags
= 0;
1507 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1508 spin_unlock(&li
->lock
);
1511 static int do_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1515 switch (irq
->type
) {
1516 case KVM_S390_PROGRAM_INT
:
1517 rc
= __inject_prog(vcpu
, irq
);
1519 case KVM_S390_SIGP_SET_PREFIX
:
1520 rc
= __inject_set_prefix(vcpu
, irq
);
1522 case KVM_S390_SIGP_STOP
:
1523 rc
= __inject_sigp_stop(vcpu
, irq
);
1525 case KVM_S390_RESTART
:
1526 rc
= __inject_sigp_restart(vcpu
, irq
);
1528 case KVM_S390_INT_CLOCK_COMP
:
1529 rc
= __inject_ckc(vcpu
);
1531 case KVM_S390_INT_CPU_TIMER
:
1532 rc
= __inject_cpu_timer(vcpu
);
1534 case KVM_S390_INT_EXTERNAL_CALL
:
1535 rc
= __inject_extcall(vcpu
, irq
);
1537 case KVM_S390_INT_EMERGENCY
:
1538 rc
= __inject_sigp_emergency(vcpu
, irq
);
1541 rc
= __inject_mchk(vcpu
, irq
);
1543 case KVM_S390_INT_PFAULT_INIT
:
1544 rc
= __inject_pfault_init(vcpu
, irq
);
1546 case KVM_S390_INT_VIRTIO
:
1547 case KVM_S390_INT_SERVICE
:
1548 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1556 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1558 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1561 spin_lock(&li
->lock
);
1562 rc
= do_inject_vcpu(vcpu
, irq
);
1563 spin_unlock(&li
->lock
);
1565 kvm_s390_vcpu_wakeup(vcpu
);
1569 static inline void clear_irq_list(struct list_head
*_list
)
1571 struct kvm_s390_interrupt_info
*inti
, *n
;
1573 list_for_each_entry_safe(inti
, n
, _list
, list
) {
1574 list_del(&inti
->list
);
1579 static void inti_to_irq(struct kvm_s390_interrupt_info
*inti
,
1580 struct kvm_s390_irq
*irq
)
1582 irq
->type
= inti
->type
;
1583 switch (inti
->type
) {
1584 case KVM_S390_INT_PFAULT_INIT
:
1585 case KVM_S390_INT_PFAULT_DONE
:
1586 case KVM_S390_INT_VIRTIO
:
1587 irq
->u
.ext
= inti
->ext
;
1589 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1590 irq
->u
.io
= inti
->io
;
1595 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1597 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1600 spin_lock(&fi
->lock
);
1601 fi
->pending_irqs
= 0;
1602 memset(&fi
->srv_signal
, 0, sizeof(fi
->srv_signal
));
1603 memset(&fi
->mchk
, 0, sizeof(fi
->mchk
));
1604 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1605 clear_irq_list(&fi
->lists
[i
]);
1606 for (i
= 0; i
< FIRQ_MAX_COUNT
; i
++)
1607 fi
->counters
[i
] = 0;
1608 spin_unlock(&fi
->lock
);
1611 static int get_all_floating_irqs(struct kvm
*kvm
, u8 __user
*usrbuf
, u64 len
)
1613 struct kvm_s390_interrupt_info
*inti
;
1614 struct kvm_s390_float_interrupt
*fi
;
1615 struct kvm_s390_irq
*buf
;
1616 struct kvm_s390_irq
*irq
;
1622 if (len
> KVM_S390_FLIC_MAX_BUFFER
|| len
== 0)
1626 * We are already using -ENOMEM to signal
1627 * userspace it may retry with a bigger buffer,
1628 * so we need to use something else for this case
1634 max_irqs
= len
/ sizeof(struct kvm_s390_irq
);
1636 fi
= &kvm
->arch
.float_int
;
1637 spin_lock(&fi
->lock
);
1638 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++) {
1639 list_for_each_entry(inti
, &fi
->lists
[i
], list
) {
1640 if (n
== max_irqs
) {
1641 /* signal userspace to try again */
1645 inti_to_irq(inti
, &buf
[n
]);
1649 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
)) {
1650 if (n
== max_irqs
) {
1651 /* signal userspace to try again */
1655 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1656 irq
->type
= KVM_S390_INT_SERVICE
;
1657 irq
->u
.ext
= fi
->srv_signal
;
1660 if (test_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
1661 if (n
== max_irqs
) {
1662 /* signal userspace to try again */
1666 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1667 irq
->type
= KVM_S390_MCHK
;
1668 irq
->u
.mchk
= fi
->mchk
;
1673 spin_unlock(&fi
->lock
);
1674 if (!ret
&& n
> 0) {
1675 if (copy_to_user(usrbuf
, buf
, sizeof(struct kvm_s390_irq
) * n
))
1680 return ret
< 0 ? ret
: n
;
1683 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1687 switch (attr
->group
) {
1688 case KVM_DEV_FLIC_GET_ALL_IRQS
:
1689 r
= get_all_floating_irqs(dev
->kvm
, (u8 __user
*) attr
->addr
,
1699 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
1702 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
1703 void *target
= NULL
;
1704 void __user
*source
;
1707 if (get_user(inti
->type
, (u64 __user
*)addr
))
1710 switch (inti
->type
) {
1711 case KVM_S390_INT_PFAULT_INIT
:
1712 case KVM_S390_INT_PFAULT_DONE
:
1713 case KVM_S390_INT_VIRTIO
:
1714 case KVM_S390_INT_SERVICE
:
1715 target
= (void *) &inti
->ext
;
1716 source
= &uptr
->u
.ext
;
1717 size
= sizeof(inti
->ext
);
1719 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1720 target
= (void *) &inti
->io
;
1721 source
= &uptr
->u
.io
;
1722 size
= sizeof(inti
->io
);
1725 target
= (void *) &inti
->mchk
;
1726 source
= &uptr
->u
.mchk
;
1727 size
= sizeof(inti
->mchk
);
1733 if (copy_from_user(target
, source
, size
))
1739 static int enqueue_floating_irq(struct kvm_device
*dev
,
1740 struct kvm_device_attr
*attr
)
1742 struct kvm_s390_interrupt_info
*inti
= NULL
;
1744 int len
= attr
->attr
;
1746 if (len
% sizeof(struct kvm_s390_irq
) != 0)
1748 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
1751 while (len
>= sizeof(struct kvm_s390_irq
)) {
1752 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1756 r
= copy_irq_from_user(inti
, attr
->addr
);
1761 r
= __inject_vm(dev
->kvm
, inti
);
1766 len
-= sizeof(struct kvm_s390_irq
);
1767 attr
->addr
+= sizeof(struct kvm_s390_irq
);
1773 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
1775 if (id
>= MAX_S390_IO_ADAPTERS
)
1777 return kvm
->arch
.adapters
[id
];
1780 static int register_io_adapter(struct kvm_device
*dev
,
1781 struct kvm_device_attr
*attr
)
1783 struct s390_io_adapter
*adapter
;
1784 struct kvm_s390_io_adapter adapter_info
;
1786 if (copy_from_user(&adapter_info
,
1787 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
1790 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
1791 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
1794 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
1798 INIT_LIST_HEAD(&adapter
->maps
);
1799 init_rwsem(&adapter
->maps_lock
);
1800 atomic_set(&adapter
->nr_maps
, 0);
1801 adapter
->id
= adapter_info
.id
;
1802 adapter
->isc
= adapter_info
.isc
;
1803 adapter
->maskable
= adapter_info
.maskable
;
1804 adapter
->masked
= false;
1805 adapter
->swap
= adapter_info
.swap
;
1806 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
1811 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
1814 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1816 if (!adapter
|| !adapter
->maskable
)
1818 ret
= adapter
->masked
;
1819 adapter
->masked
= masked
;
1823 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1825 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1826 struct s390_map_info
*map
;
1829 if (!adapter
|| !addr
)
1832 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
1837 INIT_LIST_HEAD(&map
->list
);
1838 map
->guest_addr
= addr
;
1839 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
1840 if (map
->addr
== -EFAULT
) {
1844 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
1848 down_write(&adapter
->maps_lock
);
1849 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
1850 list_add_tail(&map
->list
, &adapter
->maps
);
1853 put_page(map
->page
);
1856 up_write(&adapter
->maps_lock
);
1863 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1865 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1866 struct s390_map_info
*map
, *tmp
;
1869 if (!adapter
|| !addr
)
1872 down_write(&adapter
->maps_lock
);
1873 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
1874 if (map
->guest_addr
== addr
) {
1876 atomic_dec(&adapter
->nr_maps
);
1877 list_del(&map
->list
);
1878 put_page(map
->page
);
1883 up_write(&adapter
->maps_lock
);
1885 return found
? 0 : -EINVAL
;
1888 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
1891 struct s390_map_info
*map
, *tmp
;
1893 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
1894 if (!kvm
->arch
.adapters
[i
])
1896 list_for_each_entry_safe(map
, tmp
,
1897 &kvm
->arch
.adapters
[i
]->maps
, list
) {
1898 list_del(&map
->list
);
1899 put_page(map
->page
);
1902 kfree(kvm
->arch
.adapters
[i
]);
1906 static int modify_io_adapter(struct kvm_device
*dev
,
1907 struct kvm_device_attr
*attr
)
1909 struct kvm_s390_io_adapter_req req
;
1910 struct s390_io_adapter
*adapter
;
1913 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
1916 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
1920 case KVM_S390_IO_ADAPTER_MASK
:
1921 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
1925 case KVM_S390_IO_ADAPTER_MAP
:
1926 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
1928 case KVM_S390_IO_ADAPTER_UNMAP
:
1929 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
1938 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1942 struct kvm_vcpu
*vcpu
;
1944 switch (attr
->group
) {
1945 case KVM_DEV_FLIC_ENQUEUE
:
1946 r
= enqueue_floating_irq(dev
, attr
);
1948 case KVM_DEV_FLIC_CLEAR_IRQS
:
1949 kvm_s390_clear_float_irqs(dev
->kvm
);
1951 case KVM_DEV_FLIC_APF_ENABLE
:
1952 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
1954 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
1955 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
1957 * Make sure no async faults are in transition when
1958 * clearing the queues. So we don't need to worry
1959 * about late coming workers.
1961 synchronize_srcu(&dev
->kvm
->srcu
);
1962 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
1963 kvm_clear_async_pf_completion_queue(vcpu
);
1965 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
1966 r
= register_io_adapter(dev
, attr
);
1968 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
1969 r
= modify_io_adapter(dev
, attr
);
1978 static int flic_create(struct kvm_device
*dev
, u32 type
)
1982 if (dev
->kvm
->arch
.flic
)
1984 dev
->kvm
->arch
.flic
= dev
;
1988 static void flic_destroy(struct kvm_device
*dev
)
1990 dev
->kvm
->arch
.flic
= NULL
;
1994 /* s390 floating irq controller (flic) */
1995 struct kvm_device_ops kvm_flic_ops
= {
1997 .get_attr
= flic_get_attr
,
1998 .set_attr
= flic_set_attr
,
1999 .create
= flic_create
,
2000 .destroy
= flic_destroy
,
2003 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
2007 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
2009 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
2012 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
2015 struct s390_map_info
*map
;
2020 list_for_each_entry(map
, &adapter
->maps
, list
) {
2021 if (map
->guest_addr
== addr
)
2027 static int adapter_indicators_set(struct kvm
*kvm
,
2028 struct s390_io_adapter
*adapter
,
2029 struct kvm_s390_adapter_int
*adapter_int
)
2032 int summary_set
, idx
;
2033 struct s390_map_info
*info
;
2036 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
2039 map
= page_address(info
->page
);
2040 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
2042 idx
= srcu_read_lock(&kvm
->srcu
);
2043 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2044 set_page_dirty_lock(info
->page
);
2045 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
2047 srcu_read_unlock(&kvm
->srcu
, idx
);
2050 map
= page_address(info
->page
);
2051 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
2053 summary_set
= test_and_set_bit(bit
, map
);
2054 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2055 set_page_dirty_lock(info
->page
);
2056 srcu_read_unlock(&kvm
->srcu
, idx
);
2057 return summary_set
? 0 : 1;
2061 * < 0 - not injected due to error
2062 * = 0 - coalesced, summary indicator already active
2063 * > 0 - injected interrupt
2065 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
2066 struct kvm
*kvm
, int irq_source_id
, int level
,
2070 struct s390_io_adapter
*adapter
;
2072 /* We're only interested in the 0->1 transition. */
2075 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
2078 down_read(&adapter
->maps_lock
);
2079 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
2080 up_read(&adapter
->maps_lock
);
2081 if ((ret
> 0) && !adapter
->masked
) {
2082 struct kvm_s390_interrupt s390int
= {
2083 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
2085 .parm64
= (adapter
->isc
<< 27) | 0x80000000,
2087 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
2094 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry
*e
,
2095 const struct kvm_irq_routing_entry
*ue
)
2100 case KVM_IRQ_ROUTING_S390_ADAPTER
:
2101 e
->set
= set_adapter_int
;
2102 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
2103 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
2104 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
2105 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
2106 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
2116 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
2117 int irq_source_id
, int level
, bool line_status
)
2122 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
, void __user
*irqstate
, int len
)
2124 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2125 struct kvm_s390_irq
*buf
;
2133 if (copy_from_user((void *) buf
, irqstate
, len
)) {
2139 * Don't allow setting the interrupt state
2140 * when there are already interrupts pending
2142 spin_lock(&li
->lock
);
2143 if (li
->pending_irqs
) {
2148 for (n
= 0; n
< len
/ sizeof(*buf
); n
++) {
2149 r
= do_inject_vcpu(vcpu
, &buf
[n
]);
2155 spin_unlock(&li
->lock
);
2162 static void store_local_irq(struct kvm_s390_local_interrupt
*li
,
2163 struct kvm_s390_irq
*irq
,
2164 unsigned long irq_type
)
2167 case IRQ_PEND_MCHK_EX
:
2168 case IRQ_PEND_MCHK_REP
:
2169 irq
->type
= KVM_S390_MCHK
;
2170 irq
->u
.mchk
= li
->irq
.mchk
;
2173 irq
->type
= KVM_S390_PROGRAM_INT
;
2174 irq
->u
.pgm
= li
->irq
.pgm
;
2176 case IRQ_PEND_PFAULT_INIT
:
2177 irq
->type
= KVM_S390_INT_PFAULT_INIT
;
2178 irq
->u
.ext
= li
->irq
.ext
;
2180 case IRQ_PEND_EXT_EXTERNAL
:
2181 irq
->type
= KVM_S390_INT_EXTERNAL_CALL
;
2182 irq
->u
.extcall
= li
->irq
.extcall
;
2184 case IRQ_PEND_EXT_CLOCK_COMP
:
2185 irq
->type
= KVM_S390_INT_CLOCK_COMP
;
2187 case IRQ_PEND_EXT_CPU_TIMER
:
2188 irq
->type
= KVM_S390_INT_CPU_TIMER
;
2190 case IRQ_PEND_SIGP_STOP
:
2191 irq
->type
= KVM_S390_SIGP_STOP
;
2192 irq
->u
.stop
= li
->irq
.stop
;
2194 case IRQ_PEND_RESTART
:
2195 irq
->type
= KVM_S390_RESTART
;
2197 case IRQ_PEND_SET_PREFIX
:
2198 irq
->type
= KVM_S390_SIGP_SET_PREFIX
;
2199 irq
->u
.prefix
= li
->irq
.prefix
;
2204 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
, __u8 __user
*buf
, int len
)
2206 uint8_t sigp_ctrl
= vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
2207 unsigned long sigp_emerg_pending
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
2208 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2209 unsigned long pending_irqs
;
2210 struct kvm_s390_irq irq
;
2211 unsigned long irq_type
;
2215 spin_lock(&li
->lock
);
2216 pending_irqs
= li
->pending_irqs
;
2217 memcpy(&sigp_emerg_pending
, &li
->sigp_emerg_pending
,
2218 sizeof(sigp_emerg_pending
));
2219 spin_unlock(&li
->lock
);
2221 for_each_set_bit(irq_type
, &pending_irqs
, IRQ_PEND_COUNT
) {
2222 memset(&irq
, 0, sizeof(irq
));
2223 if (irq_type
== IRQ_PEND_EXT_EMERGENCY
)
2225 if (n
+ sizeof(irq
) > len
)
2227 store_local_irq(&vcpu
->arch
.local_int
, &irq
, irq_type
);
2228 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2233 if (test_bit(IRQ_PEND_EXT_EMERGENCY
, &pending_irqs
)) {
2234 for_each_set_bit(cpuaddr
, sigp_emerg_pending
, KVM_MAX_VCPUS
) {
2235 memset(&irq
, 0, sizeof(irq
));
2236 if (n
+ sizeof(irq
) > len
)
2238 irq
.type
= KVM_S390_INT_EMERGENCY
;
2239 irq
.u
.emerg
.code
= cpuaddr
;
2240 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2246 if ((sigp_ctrl
& SIGP_CTRL_C
) &&
2247 (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) &
2248 CPUSTAT_ECALL_PEND
)) {
2249 if (n
+ sizeof(irq
) > len
)
2251 memset(&irq
, 0, sizeof(irq
));
2252 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
2253 irq
.u
.extcall
.code
= sigp_ctrl
& SIGP_CTRL_SCN_MASK
;
2254 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))