2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008, 2015
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <linux/vmalloc.h>
21 #include <asm/asm-offsets.h>
23 #include <asm/uaccess.h>
28 #include "trace-s390.h"
30 #define IOINT_SCHID_MASK 0x0000ffff
31 #define IOINT_SSID_MASK 0x00030000
32 #define IOINT_CSSID_MASK 0x03fc0000
33 #define IOINT_AI_MASK 0x04000000
34 #define PFAULT_INIT 0x0600
35 #define PFAULT_DONE 0x0680
36 #define VIRTIO_PARAM 0x0d00
38 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
40 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
43 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
45 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
48 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
50 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
53 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
55 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
56 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
57 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
62 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
64 if (psw_extint_disabled(vcpu
) ||
65 !(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
67 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
68 /* No timer interrupts when single stepping */
73 static int ckc_irq_pending(struct kvm_vcpu
*vcpu
)
75 if (!(vcpu
->arch
.sie_block
->ckc
<
76 get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
))
78 return ckc_interrupts_enabled(vcpu
);
81 static int cpu_timer_interrupts_enabled(struct kvm_vcpu
*vcpu
)
83 return !psw_extint_disabled(vcpu
) &&
84 (vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
);
87 static int cpu_timer_irq_pending(struct kvm_vcpu
*vcpu
)
89 return (vcpu
->arch
.sie_block
->cputm
>> 63) &&
90 cpu_timer_interrupts_enabled(vcpu
);
93 static inline int is_ioirq(unsigned long irq_type
)
95 return ((irq_type
>= IRQ_PEND_IO_ISC_0
) &&
96 (irq_type
<= IRQ_PEND_IO_ISC_7
));
99 static uint64_t isc_to_isc_bits(int isc
)
101 return (0x80 >> isc
) << 24;
104 static inline u8
int_word_to_isc(u32 int_word
)
106 return (int_word
& 0x38000000) >> 27;
109 static inline unsigned long pending_floating_irqs(struct kvm_vcpu
*vcpu
)
111 return vcpu
->kvm
->arch
.float_int
.pending_irqs
;
114 static inline unsigned long pending_local_irqs(struct kvm_vcpu
*vcpu
)
116 return vcpu
->arch
.local_int
.pending_irqs
;
119 static unsigned long disable_iscs(struct kvm_vcpu
*vcpu
,
120 unsigned long active_mask
)
124 for (i
= 0; i
<= MAX_ISC
; i
++)
125 if (!(vcpu
->arch
.sie_block
->gcr
[6] & isc_to_isc_bits(i
)))
126 active_mask
&= ~(1UL << (IRQ_PEND_IO_ISC_0
+ i
));
131 static unsigned long deliverable_irqs(struct kvm_vcpu
*vcpu
)
133 unsigned long active_mask
;
135 active_mask
= pending_local_irqs(vcpu
);
136 active_mask
|= pending_floating_irqs(vcpu
);
140 if (psw_extint_disabled(vcpu
))
141 active_mask
&= ~IRQ_PEND_EXT_MASK
;
142 if (psw_ioint_disabled(vcpu
))
143 active_mask
&= ~IRQ_PEND_IO_MASK
;
145 active_mask
= disable_iscs(vcpu
, active_mask
);
146 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
147 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
148 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
))
149 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
150 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
151 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
152 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
))
153 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
154 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
155 __clear_bit(IRQ_PEND_EXT_SERVICE
, &active_mask
);
156 if (psw_mchk_disabled(vcpu
))
157 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
158 if (!(vcpu
->arch
.sie_block
->gcr
[14] &
159 vcpu
->kvm
->arch
.float_int
.mchk
.cr14
))
160 __clear_bit(IRQ_PEND_MCHK_REP
, &active_mask
);
163 * STOP irqs will never be actively delivered. They are triggered via
164 * intercept requests and cleared when the stop intercept is performed.
166 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
171 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
173 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
174 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
177 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
179 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
180 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
183 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
185 atomic_clear_mask(CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
186 &vcpu
->arch
.sie_block
->cpuflags
);
187 vcpu
->arch
.sie_block
->lctl
= 0x0000;
188 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
190 if (guestdbg_enabled(vcpu
)) {
191 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
192 LCTL_CR10
| LCTL_CR11
);
193 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
197 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
199 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
202 static void set_intercept_indicators_io(struct kvm_vcpu
*vcpu
)
204 if (!(pending_floating_irqs(vcpu
) & IRQ_PEND_IO_MASK
))
206 else if (psw_ioint_disabled(vcpu
))
207 __set_cpuflag(vcpu
, CPUSTAT_IO_INT
);
209 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
212 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
214 if (!(pending_local_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
216 if (psw_extint_disabled(vcpu
))
217 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
219 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
222 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
224 if (!(pending_local_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
226 if (psw_mchk_disabled(vcpu
))
227 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
229 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
232 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
234 if (kvm_s390_is_stop_irq_pending(vcpu
))
235 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
238 /* Set interception request for non-deliverable interrupts */
239 static void set_intercept_indicators(struct kvm_vcpu
*vcpu
)
241 set_intercept_indicators_io(vcpu
);
242 set_intercept_indicators_ext(vcpu
);
243 set_intercept_indicators_mchk(vcpu
);
244 set_intercept_indicators_stop(vcpu
);
247 static u16
get_ilc(struct kvm_vcpu
*vcpu
)
249 switch (vcpu
->arch
.sie_block
->icptcode
) {
255 /* last instruction only stored for these icptcodes */
256 return insn_length(vcpu
->arch
.sie_block
->ipa
>> 8);
258 return vcpu
->arch
.sie_block
->pgmilc
;
264 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
266 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
269 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
272 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
273 (u16
*)__LC_EXT_INT_CODE
);
274 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
275 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
276 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
277 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
278 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
279 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
280 return rc
? -EFAULT
: 0;
283 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
285 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
288 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
291 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
292 (u16 __user
*)__LC_EXT_INT_CODE
);
293 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
294 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
295 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
296 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
297 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
298 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
299 return rc
? -EFAULT
: 0;
302 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
304 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
305 struct kvm_s390_ext_info ext
;
308 spin_lock(&li
->lock
);
310 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
311 li
->irq
.ext
.ext_params2
= 0;
312 spin_unlock(&li
->lock
);
314 VCPU_EVENT(vcpu
, 4, "interrupt: pfault init parm:%x,parm64:%llx",
316 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
317 KVM_S390_INT_PFAULT_INIT
,
320 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
321 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
322 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
323 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
324 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
325 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
326 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
327 return rc
? -EFAULT
: 0;
330 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
332 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
333 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
334 struct kvm_s390_mchk_info mchk
= {};
335 unsigned long adtl_status_addr
;
339 spin_lock(&fi
->lock
);
340 spin_lock(&li
->lock
);
341 if (test_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
) ||
342 test_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
)) {
344 * If there was an exigent machine check pending, then any
345 * repressible machine checks that might have been pending
346 * are indicated along with it, so always clear bits for
347 * repressible and exigent interrupts
350 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
351 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
352 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
356 * We indicate floating repressible conditions along with
357 * other pending conditions. Channel Report Pending and Channel
358 * Subsystem damage are the only two and and are indicated by
359 * bits in mcic and masked in cr14.
361 if (test_and_clear_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
362 mchk
.mcic
|= fi
->mchk
.mcic
;
363 mchk
.cr14
|= fi
->mchk
.cr14
;
364 memset(&fi
->mchk
, 0, sizeof(mchk
));
367 spin_unlock(&li
->lock
);
368 spin_unlock(&fi
->lock
);
371 VCPU_EVENT(vcpu
, 4, "interrupt: machine check mcic=%llx",
373 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
375 mchk
.cr14
, mchk
.mcic
);
377 rc
= kvm_s390_vcpu_store_status(vcpu
,
378 KVM_S390_STORE_STATUS_PREFIXED
);
379 rc
|= read_guest_lc(vcpu
, __LC_VX_SAVE_AREA_ADDR
,
381 sizeof(unsigned long));
382 rc
|= kvm_s390_vcpu_store_adtl_status(vcpu
,
384 rc
|= put_guest_lc(vcpu
, mchk
.mcic
,
385 (u64 __user
*) __LC_MCCK_CODE
);
386 rc
|= put_guest_lc(vcpu
, mchk
.failing_storage_address
,
387 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
388 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
,
390 sizeof(mchk
.fixed_logout
));
391 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
392 &vcpu
->arch
.sie_block
->gpsw
,
394 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
395 &vcpu
->arch
.sie_block
->gpsw
,
398 return rc
? -EFAULT
: 0;
401 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
403 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
406 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
407 vcpu
->stat
.deliver_restart_signal
++;
408 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
410 rc
= write_guest_lc(vcpu
,
411 offsetof(struct _lowcore
, restart_old_psw
),
412 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
413 rc
|= read_guest_lc(vcpu
, offsetof(struct _lowcore
, restart_psw
),
414 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
415 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
416 return rc
? -EFAULT
: 0;
419 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
421 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
422 struct kvm_s390_prefix_info prefix
;
424 spin_lock(&li
->lock
);
425 prefix
= li
->irq
.prefix
;
426 li
->irq
.prefix
.address
= 0;
427 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
428 spin_unlock(&li
->lock
);
430 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x", prefix
.address
);
431 vcpu
->stat
.deliver_prefix_signal
++;
432 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
433 KVM_S390_SIGP_SET_PREFIX
,
436 kvm_s390_set_prefix(vcpu
, prefix
.address
);
440 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
442 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
446 spin_lock(&li
->lock
);
447 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
448 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
449 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
450 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
451 spin_unlock(&li
->lock
);
453 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
454 vcpu
->stat
.deliver_emergency_signal
++;
455 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
458 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
459 (u16
*)__LC_EXT_INT_CODE
);
460 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
461 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
462 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
463 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
464 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
465 return rc
? -EFAULT
: 0;
468 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
470 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
471 struct kvm_s390_extcall_info extcall
;
474 spin_lock(&li
->lock
);
475 extcall
= li
->irq
.extcall
;
476 li
->irq
.extcall
.code
= 0;
477 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
478 spin_unlock(&li
->lock
);
480 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp ext call");
481 vcpu
->stat
.deliver_external_call
++;
482 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
483 KVM_S390_INT_EXTERNAL_CALL
,
486 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
487 (u16
*)__LC_EXT_INT_CODE
);
488 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
489 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
490 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
491 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
493 return rc
? -EFAULT
: 0;
496 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
498 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
499 struct kvm_s390_pgm_info pgm_info
;
500 int rc
= 0, nullifying
= false;
501 u16 ilc
= get_ilc(vcpu
);
503 spin_lock(&li
->lock
);
504 pgm_info
= li
->irq
.pgm
;
505 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
506 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
507 spin_unlock(&li
->lock
);
509 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
511 vcpu
->stat
.deliver_program_int
++;
512 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
515 switch (pgm_info
.code
& ~PGM_PER
) {
516 case PGM_AFX_TRANSLATION
:
517 case PGM_ASX_TRANSLATION
:
518 case PGM_EX_TRANSLATION
:
519 case PGM_LFX_TRANSLATION
:
520 case PGM_LSTE_SEQUENCE
:
521 case PGM_LSX_TRANSLATION
:
522 case PGM_LX_TRANSLATION
:
523 case PGM_PRIMARY_AUTHORITY
:
524 case PGM_SECONDARY_AUTHORITY
:
527 case PGM_SPACE_SWITCH
:
528 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
529 (u64
*)__LC_TRANS_EXC_CODE
);
531 case PGM_ALEN_TRANSLATION
:
532 case PGM_ALE_SEQUENCE
:
533 case PGM_ASTE_INSTANCE
:
534 case PGM_ASTE_SEQUENCE
:
535 case PGM_ASTE_VALIDITY
:
536 case PGM_EXTENDED_AUTHORITY
:
537 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
538 (u8
*)__LC_EXC_ACCESS_ID
);
542 case PGM_PAGE_TRANSLATION
:
543 case PGM_REGION_FIRST_TRANS
:
544 case PGM_REGION_SECOND_TRANS
:
545 case PGM_REGION_THIRD_TRANS
:
546 case PGM_SEGMENT_TRANSLATION
:
547 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
548 (u64
*)__LC_TRANS_EXC_CODE
);
549 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
550 (u8
*)__LC_EXC_ACCESS_ID
);
551 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
552 (u8
*)__LC_OP_ACCESS_ID
);
556 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
557 (u16
*)__LC_MON_CLASS_NR
);
558 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
559 (u64
*)__LC_MON_CODE
);
561 case PGM_VECTOR_PROCESSING
:
563 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
564 (u32
*)__LC_DATA_EXC_CODE
);
567 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
568 (u64
*)__LC_TRANS_EXC_CODE
);
569 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
570 (u8
*)__LC_EXC_ACCESS_ID
);
573 case PGM_STACK_EMPTY
:
574 case PGM_STACK_SPECIFICATION
:
576 case PGM_STACK_OPERATION
:
577 case PGM_TRACE_TABEL
:
578 case PGM_CRYPTO_OPERATION
:
583 if (pgm_info
.code
& PGM_PER
) {
584 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
585 (u8
*) __LC_PER_CODE
);
586 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
587 (u8
*)__LC_PER_ATMID
);
588 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
589 (u64
*) __LC_PER_ADDRESS
);
590 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
591 (u8
*) __LC_PER_ACCESS_ID
);
594 if (nullifying
&& vcpu
->arch
.sie_block
->icptcode
== ICPT_INST
)
595 kvm_s390_rewind_psw(vcpu
, ilc
);
597 rc
|= put_guest_lc(vcpu
, ilc
, (u16
*) __LC_PGM_ILC
);
598 rc
|= put_guest_lc(vcpu
, vcpu
->arch
.sie_block
->gbea
,
599 (u64
*) __LC_LAST_BREAK
);
600 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
601 (u16
*)__LC_PGM_INT_CODE
);
602 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
603 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
604 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
605 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
606 return rc
? -EFAULT
: 0;
609 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
)
611 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
612 struct kvm_s390_ext_info ext
;
615 spin_lock(&fi
->lock
);
616 if (!(test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
))) {
617 spin_unlock(&fi
->lock
);
620 ext
= fi
->srv_signal
;
621 memset(&fi
->srv_signal
, 0, sizeof(ext
));
622 clear_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
623 spin_unlock(&fi
->lock
);
625 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
627 vcpu
->stat
.deliver_service_signal
++;
628 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_SERVICE
,
631 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
632 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
633 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
634 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
635 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
636 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
637 rc
|= put_guest_lc(vcpu
, ext
.ext_params
,
638 (u32
*)__LC_EXT_PARAMS
);
640 return rc
? -EFAULT
: 0;
643 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
)
645 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
646 struct kvm_s390_interrupt_info
*inti
;
649 spin_lock(&fi
->lock
);
650 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_PFAULT
],
651 struct kvm_s390_interrupt_info
,
654 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
655 KVM_S390_INT_PFAULT_DONE
, 0,
656 inti
->ext
.ext_params2
);
657 list_del(&inti
->list
);
658 fi
->counters
[FIRQ_CNTR_PFAULT
] -= 1;
660 if (list_empty(&fi
->lists
[FIRQ_LIST_PFAULT
]))
661 clear_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
662 spin_unlock(&fi
->lock
);
665 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
666 (u16
*)__LC_EXT_INT_CODE
);
667 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
,
668 (u16
*)__LC_EXT_CPU_ADDR
);
669 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
670 &vcpu
->arch
.sie_block
->gpsw
,
672 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
673 &vcpu
->arch
.sie_block
->gpsw
,
675 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
676 (u64
*)__LC_EXT_PARAMS2
);
679 return rc
? -EFAULT
: 0;
682 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
)
684 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
685 struct kvm_s390_interrupt_info
*inti
;
688 spin_lock(&fi
->lock
);
689 inti
= list_first_entry_or_null(&fi
->lists
[FIRQ_LIST_VIRTIO
],
690 struct kvm_s390_interrupt_info
,
694 "interrupt: virtio parm:%x,parm64:%llx",
695 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
696 vcpu
->stat
.deliver_virtio_interrupt
++;
697 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
699 inti
->ext
.ext_params
,
700 inti
->ext
.ext_params2
);
701 list_del(&inti
->list
);
702 fi
->counters
[FIRQ_CNTR_VIRTIO
] -= 1;
704 if (list_empty(&fi
->lists
[FIRQ_LIST_VIRTIO
]))
705 clear_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
706 spin_unlock(&fi
->lock
);
709 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
,
710 (u16
*)__LC_EXT_INT_CODE
);
711 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
,
712 (u16
*)__LC_EXT_CPU_ADDR
);
713 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
714 &vcpu
->arch
.sie_block
->gpsw
,
716 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
717 &vcpu
->arch
.sie_block
->gpsw
,
719 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
720 (u32
*)__LC_EXT_PARAMS
);
721 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
722 (u64
*)__LC_EXT_PARAMS2
);
725 return rc
? -EFAULT
: 0;
728 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
729 unsigned long irq_type
)
731 struct list_head
*isc_list
;
732 struct kvm_s390_float_interrupt
*fi
;
733 struct kvm_s390_interrupt_info
*inti
= NULL
;
736 fi
= &vcpu
->kvm
->arch
.float_int
;
738 spin_lock(&fi
->lock
);
739 isc_list
= &fi
->lists
[irq_type
- IRQ_PEND_IO_ISC_0
];
740 inti
= list_first_entry_or_null(isc_list
,
741 struct kvm_s390_interrupt_info
,
744 VCPU_EVENT(vcpu
, 4, "interrupt: I/O %llx", inti
->type
);
745 vcpu
->stat
.deliver_io_int
++;
746 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
748 ((__u32
)inti
->io
.subchannel_id
<< 16) |
749 inti
->io
.subchannel_nr
,
750 ((__u64
)inti
->io
.io_int_parm
<< 32) |
751 inti
->io
.io_int_word
);
752 list_del(&inti
->list
);
753 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
755 if (list_empty(isc_list
))
756 clear_bit(irq_type
, &fi
->pending_irqs
);
757 spin_unlock(&fi
->lock
);
760 rc
= put_guest_lc(vcpu
, inti
->io
.subchannel_id
,
761 (u16
*)__LC_SUBCHANNEL_ID
);
762 rc
|= put_guest_lc(vcpu
, inti
->io
.subchannel_nr
,
763 (u16
*)__LC_SUBCHANNEL_NR
);
764 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_parm
,
765 (u32
*)__LC_IO_INT_PARM
);
766 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_word
,
767 (u32
*)__LC_IO_INT_WORD
);
768 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
769 &vcpu
->arch
.sie_block
->gpsw
,
771 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
772 &vcpu
->arch
.sie_block
->gpsw
,
777 return rc
? -EFAULT
: 0;
780 typedef int (*deliver_irq_t
)(struct kvm_vcpu
*vcpu
);
782 static const deliver_irq_t deliver_irq_funcs
[] = {
783 [IRQ_PEND_MCHK_EX
] = __deliver_machine_check
,
784 [IRQ_PEND_MCHK_REP
] = __deliver_machine_check
,
785 [IRQ_PEND_PROG
] = __deliver_prog
,
786 [IRQ_PEND_EXT_EMERGENCY
] = __deliver_emergency_signal
,
787 [IRQ_PEND_EXT_EXTERNAL
] = __deliver_external_call
,
788 [IRQ_PEND_EXT_CLOCK_COMP
] = __deliver_ckc
,
789 [IRQ_PEND_EXT_CPU_TIMER
] = __deliver_cpu_timer
,
790 [IRQ_PEND_RESTART
] = __deliver_restart
,
791 [IRQ_PEND_SET_PREFIX
] = __deliver_set_prefix
,
792 [IRQ_PEND_PFAULT_INIT
] = __deliver_pfault_init
,
793 [IRQ_PEND_EXT_SERVICE
] = __deliver_service
,
794 [IRQ_PEND_PFAULT_DONE
] = __deliver_pfault_done
,
795 [IRQ_PEND_VIRTIO
] = __deliver_virtio
,
798 /* Check whether an external call is pending (deliverable or not) */
799 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
801 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
802 uint8_t sigp_ctrl
= vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
804 if (!sclp
.has_sigpif
)
805 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
807 return (sigp_ctrl
& SIGP_CTRL_C
) &&
808 (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_ECALL_PEND
);
811 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
815 rc
= !!deliverable_irqs(vcpu
);
817 if (!rc
&& kvm_cpu_has_pending_timer(vcpu
))
820 /* external call pending and deliverable */
821 if (!rc
&& kvm_s390_ext_call_pending(vcpu
) &&
822 !psw_extint_disabled(vcpu
) &&
823 (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
826 if (!rc
&& !exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
832 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
834 return ckc_irq_pending(vcpu
) || cpu_timer_irq_pending(vcpu
);
837 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
841 vcpu
->stat
.exit_wait_state
++;
844 if (kvm_cpu_has_pending_timer(vcpu
) || kvm_arch_vcpu_runnable(vcpu
))
847 if (psw_interrupts_disabled(vcpu
)) {
848 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
849 return -EOPNOTSUPP
; /* disabled wait */
852 if (!ckc_interrupts_enabled(vcpu
)) {
853 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
854 __set_cpu_idle(vcpu
);
858 now
= get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
;
859 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
862 if (vcpu
->arch
.sie_block
->ckc
< now
)
865 __set_cpu_idle(vcpu
);
866 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
867 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
869 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
870 kvm_vcpu_block(vcpu
);
871 __unset_cpu_idle(vcpu
);
872 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
874 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
878 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
880 if (waitqueue_active(&vcpu
->wq
)) {
882 * The vcpu gave up the cpu voluntarily, mark it as a good
885 vcpu
->preempted
= true;
886 wake_up_interruptible(&vcpu
->wq
);
887 vcpu
->stat
.halt_wakeup
++;
891 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
893 struct kvm_vcpu
*vcpu
;
896 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
897 now
= get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
;
898 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
901 * If the monotonic clock runs faster than the tod clock we might be
902 * woken up too early and have to go back to sleep to avoid deadlocks.
904 if (vcpu
->arch
.sie_block
->ckc
> now
&&
905 hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
906 return HRTIMER_RESTART
;
907 kvm_s390_vcpu_wakeup(vcpu
);
908 return HRTIMER_NORESTART
;
911 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
913 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
915 spin_lock(&li
->lock
);
916 li
->pending_irqs
= 0;
917 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
918 memset(&li
->irq
, 0, sizeof(li
->irq
));
919 spin_unlock(&li
->lock
);
921 /* clear pending external calls set by sigp interpretation facility */
922 atomic_clear_mask(CPUSTAT_ECALL_PEND
, li
->cpuflags
);
923 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
= 0;
926 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
928 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
931 unsigned long irq_type
;
934 __reset_intercept_indicators(vcpu
);
936 /* pending ckc conditions might have been invalidated */
937 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
938 if (ckc_irq_pending(vcpu
))
939 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
941 /* pending cpu timer conditions might have been invalidated */
942 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
943 if (cpu_timer_irq_pending(vcpu
))
944 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
946 while ((irqs
= deliverable_irqs(vcpu
)) && !rc
) {
947 /* bits are in the order of interrupt priority */
948 irq_type
= find_first_bit(&irqs
, IRQ_PEND_COUNT
);
949 if (is_ioirq(irq_type
)) {
950 rc
= __deliver_io(vcpu
, irq_type
);
952 func
= deliver_irq_funcs
[irq_type
];
954 WARN_ON_ONCE(func
== NULL
);
955 clear_bit(irq_type
, &li
->pending_irqs
);
962 set_intercept_indicators(vcpu
);
967 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
969 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
971 li
->irq
.pgm
= irq
->u
.pgm
;
972 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
976 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
978 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
979 struct kvm_s390_irq irq
;
981 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
982 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
, code
,
984 spin_lock(&li
->lock
);
985 irq
.u
.pgm
.code
= code
;
986 __inject_prog(vcpu
, &irq
);
987 BUG_ON(waitqueue_active(li
->wq
));
988 spin_unlock(&li
->lock
);
992 int kvm_s390_inject_prog_irq(struct kvm_vcpu
*vcpu
,
993 struct kvm_s390_pgm_info
*pgm_info
)
995 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
996 struct kvm_s390_irq irq
;
999 VCPU_EVENT(vcpu
, 3, "inject: prog irq %d (from kernel)",
1001 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
1002 pgm_info
->code
, 0, 1);
1003 spin_lock(&li
->lock
);
1004 irq
.u
.pgm
= *pgm_info
;
1005 rc
= __inject_prog(vcpu
, &irq
);
1006 BUG_ON(waitqueue_active(li
->wq
));
1007 spin_unlock(&li
->lock
);
1011 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1013 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1015 VCPU_EVENT(vcpu
, 3, "inject: external irq params:%x, params2:%llx",
1016 irq
->u
.ext
.ext_params
, irq
->u
.ext
.ext_params2
);
1017 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
1018 irq
->u
.ext
.ext_params
,
1019 irq
->u
.ext
.ext_params2
, 2);
1021 li
->irq
.ext
= irq
->u
.ext
;
1022 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1023 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1027 static int __inject_extcall_sigpif(struct kvm_vcpu
*vcpu
, uint16_t src_id
)
1029 unsigned char new_val
, old_val
;
1030 uint8_t *sigp_ctrl
= &vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
1032 new_val
= SIGP_CTRL_C
| (src_id
& SIGP_CTRL_SCN_MASK
);
1033 old_val
= *sigp_ctrl
& ~SIGP_CTRL_C
;
1034 if (cmpxchg(sigp_ctrl
, old_val
, new_val
) != old_val
) {
1035 /* another external call is pending */
1038 atomic_set_mask(CPUSTAT_ECALL_PEND
, &vcpu
->arch
.sie_block
->cpuflags
);
1042 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1044 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1045 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1046 uint16_t src_id
= irq
->u
.extcall
.code
;
1048 VCPU_EVENT(vcpu
, 3, "inject: external call source-cpu:%u",
1050 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1053 /* sending vcpu invalid */
1054 if (src_id
>= KVM_MAX_VCPUS
||
1055 kvm_get_vcpu(vcpu
->kvm
, src_id
) == NULL
)
1058 if (sclp
.has_sigpif
)
1059 return __inject_extcall_sigpif(vcpu
, src_id
);
1061 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1063 *extcall
= irq
->u
.extcall
;
1064 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1068 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1070 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1071 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1073 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
1074 irq
->u
.prefix
.address
);
1075 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1076 irq
->u
.prefix
.address
, 0, 2);
1078 if (!is_vcpu_stopped(vcpu
))
1081 *prefix
= irq
->u
.prefix
;
1082 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1086 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1087 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1089 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1090 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1093 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0, 2);
1095 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1098 if (is_vcpu_stopped(vcpu
)) {
1099 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1100 rc
= kvm_s390_store_status_unloaded(vcpu
,
1101 KVM_S390_STORE_STATUS_NOADDR
);
1105 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1107 stop
->flags
= irq
->u
.stop
.flags
;
1108 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
1112 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1113 struct kvm_s390_irq
*irq
)
1115 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1117 VCPU_EVENT(vcpu
, 3, "inject: restart type %llx", irq
->type
);
1118 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0, 2);
1120 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1124 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1125 struct kvm_s390_irq
*irq
)
1127 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1129 VCPU_EVENT(vcpu
, 3, "inject: emergency %u\n",
1131 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1132 irq
->u
.emerg
.code
, 0, 2);
1134 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1135 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1136 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1140 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1142 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1143 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1145 VCPU_EVENT(vcpu
, 5, "inject: machine check parm64:%llx",
1147 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1148 irq
->u
.mchk
.mcic
, 2);
1151 * Because repressible machine checks can be indicated along with
1152 * exigent machine checks (PoP, Chapter 11, Interruption action)
1153 * we need to combine cr14, mcic and external damage code.
1154 * Failing storage address and the logout area should not be or'ed
1155 * together, we just indicate the last occurrence of the corresponding
1158 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1159 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1160 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1161 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1162 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1163 sizeof(mchk
->fixed_logout
));
1164 if (mchk
->mcic
& MCHK_EX_MASK
)
1165 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1166 else if (mchk
->mcic
& MCHK_REP_MASK
)
1167 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1171 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1173 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1175 VCPU_EVENT(vcpu
, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP
);
1176 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1179 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1180 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1184 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1186 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1188 VCPU_EVENT(vcpu
, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER
);
1189 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1192 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1193 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1197 static struct kvm_s390_interrupt_info
*get_io_int(struct kvm
*kvm
,
1200 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1201 struct list_head
*isc_list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1202 struct kvm_s390_interrupt_info
*iter
;
1203 u16 id
= (schid
& 0xffff0000U
) >> 16;
1204 u16 nr
= schid
& 0x0000ffffU
;
1206 spin_lock(&fi
->lock
);
1207 list_for_each_entry(iter
, isc_list
, list
) {
1208 if (schid
&& (id
!= iter
->io
.subchannel_id
||
1209 nr
!= iter
->io
.subchannel_nr
))
1211 /* found an appropriate entry */
1212 list_del_init(&iter
->list
);
1213 fi
->counters
[FIRQ_CNTR_IO
] -= 1;
1214 if (list_empty(isc_list
))
1215 clear_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1216 spin_unlock(&fi
->lock
);
1219 spin_unlock(&fi
->lock
);
1224 * Dequeue and return an I/O interrupt matching any of the interruption
1225 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1227 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1228 u64 isc_mask
, u32 schid
)
1230 struct kvm_s390_interrupt_info
*inti
= NULL
;
1233 for (isc
= 0; isc
<= MAX_ISC
&& !inti
; isc
++) {
1234 if (isc_mask
& isc_to_isc_bits(isc
))
1235 inti
= get_io_int(kvm
, isc
, schid
);
1240 #define SCCB_MASK 0xFFFFFFF8
1241 #define SCCB_EVENT_PENDING 0x3
1243 static int __inject_service(struct kvm
*kvm
,
1244 struct kvm_s390_interrupt_info
*inti
)
1246 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1248 spin_lock(&fi
->lock
);
1249 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_EVENT_PENDING
;
1251 * Early versions of the QEMU s390 bios will inject several
1252 * service interrupts after another without handling a
1253 * condition code indicating busy.
1254 * We will silently ignore those superfluous sccb values.
1255 * A future version of QEMU will take care of serialization
1258 if (fi
->srv_signal
.ext_params
& SCCB_MASK
)
1260 fi
->srv_signal
.ext_params
|= inti
->ext
.ext_params
& SCCB_MASK
;
1261 set_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
);
1263 spin_unlock(&fi
->lock
);
1268 static int __inject_virtio(struct kvm
*kvm
,
1269 struct kvm_s390_interrupt_info
*inti
)
1271 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1273 spin_lock(&fi
->lock
);
1274 if (fi
->counters
[FIRQ_CNTR_VIRTIO
] >= KVM_S390_MAX_VIRTIO_IRQS
) {
1275 spin_unlock(&fi
->lock
);
1278 fi
->counters
[FIRQ_CNTR_VIRTIO
] += 1;
1279 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_VIRTIO
]);
1280 set_bit(IRQ_PEND_VIRTIO
, &fi
->pending_irqs
);
1281 spin_unlock(&fi
->lock
);
1285 static int __inject_pfault_done(struct kvm
*kvm
,
1286 struct kvm_s390_interrupt_info
*inti
)
1288 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1290 spin_lock(&fi
->lock
);
1291 if (fi
->counters
[FIRQ_CNTR_PFAULT
] >=
1292 (ASYNC_PF_PER_VCPU
* KVM_MAX_VCPUS
)) {
1293 spin_unlock(&fi
->lock
);
1296 fi
->counters
[FIRQ_CNTR_PFAULT
] += 1;
1297 list_add_tail(&inti
->list
, &fi
->lists
[FIRQ_LIST_PFAULT
]);
1298 set_bit(IRQ_PEND_PFAULT_DONE
, &fi
->pending_irqs
);
1299 spin_unlock(&fi
->lock
);
1303 #define CR_PENDING_SUBCLASS 28
1304 static int __inject_float_mchk(struct kvm
*kvm
,
1305 struct kvm_s390_interrupt_info
*inti
)
1307 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1309 spin_lock(&fi
->lock
);
1310 fi
->mchk
.cr14
|= inti
->mchk
.cr14
& (1UL << CR_PENDING_SUBCLASS
);
1311 fi
->mchk
.mcic
|= inti
->mchk
.mcic
;
1312 set_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
);
1313 spin_unlock(&fi
->lock
);
1318 static int __inject_io(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1320 struct kvm_s390_float_interrupt
*fi
;
1321 struct list_head
*list
;
1324 fi
= &kvm
->arch
.float_int
;
1325 spin_lock(&fi
->lock
);
1326 if (fi
->counters
[FIRQ_CNTR_IO
] >= KVM_S390_MAX_FLOAT_IRQS
) {
1327 spin_unlock(&fi
->lock
);
1330 fi
->counters
[FIRQ_CNTR_IO
] += 1;
1332 isc
= int_word_to_isc(inti
->io
.io_int_word
);
1333 list
= &fi
->lists
[FIRQ_LIST_IO_ISC_0
+ isc
];
1334 list_add_tail(&inti
->list
, list
);
1335 set_bit(IRQ_PEND_IO_ISC_0
+ isc
, &fi
->pending_irqs
);
1336 spin_unlock(&fi
->lock
);
1341 * Find a destination VCPU for a floating irq and kick it.
1343 static void __floating_irq_kick(struct kvm
*kvm
, u64 type
)
1345 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1346 struct kvm_s390_local_interrupt
*li
;
1347 struct kvm_vcpu
*dst_vcpu
;
1348 int sigcpu
, online_vcpus
, nr_tries
= 0;
1350 online_vcpus
= atomic_read(&kvm
->online_vcpus
);
1354 /* find idle VCPUs first, then round robin */
1355 sigcpu
= find_first_bit(fi
->idle_mask
, online_vcpus
);
1356 if (sigcpu
== online_vcpus
) {
1358 sigcpu
= fi
->next_rr_cpu
;
1359 fi
->next_rr_cpu
= (fi
->next_rr_cpu
+ 1) % online_vcpus
;
1360 /* avoid endless loops if all vcpus are stopped */
1361 if (nr_tries
++ >= online_vcpus
)
1363 } while (is_vcpu_stopped(kvm_get_vcpu(kvm
, sigcpu
)));
1365 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1367 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1368 li
= &dst_vcpu
->arch
.local_int
;
1369 spin_lock(&li
->lock
);
1372 atomic_set_mask(CPUSTAT_STOP_INT
, li
->cpuflags
);
1374 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1375 atomic_set_mask(CPUSTAT_IO_INT
, li
->cpuflags
);
1378 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1381 spin_unlock(&li
->lock
);
1382 kvm_s390_vcpu_wakeup(dst_vcpu
);
1385 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1387 struct kvm_s390_float_interrupt
*fi
;
1388 u64 type
= READ_ONCE(inti
->type
);
1391 fi
= &kvm
->arch
.float_int
;
1395 rc
= __inject_float_mchk(kvm
, inti
);
1397 case KVM_S390_INT_VIRTIO
:
1398 rc
= __inject_virtio(kvm
, inti
);
1400 case KVM_S390_INT_SERVICE
:
1401 rc
= __inject_service(kvm
, inti
);
1403 case KVM_S390_INT_PFAULT_DONE
:
1404 rc
= __inject_pfault_done(kvm
, inti
);
1406 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1407 rc
= __inject_io(kvm
, inti
);
1415 __floating_irq_kick(kvm
, type
);
1419 int kvm_s390_inject_vm(struct kvm
*kvm
,
1420 struct kvm_s390_interrupt
*s390int
)
1422 struct kvm_s390_interrupt_info
*inti
;
1425 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1429 inti
->type
= s390int
->type
;
1430 switch (inti
->type
) {
1431 case KVM_S390_INT_VIRTIO
:
1432 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1433 s390int
->parm
, s390int
->parm64
);
1434 inti
->ext
.ext_params
= s390int
->parm
;
1435 inti
->ext
.ext_params2
= s390int
->parm64
;
1437 case KVM_S390_INT_SERVICE
:
1438 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
1439 inti
->ext
.ext_params
= s390int
->parm
;
1441 case KVM_S390_INT_PFAULT_DONE
:
1442 inti
->ext
.ext_params2
= s390int
->parm64
;
1445 VM_EVENT(kvm
, 5, "inject: machine check parm64:%llx",
1447 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1448 inti
->mchk
.mcic
= s390int
->parm64
;
1450 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1451 if (inti
->type
& IOINT_AI_MASK
)
1452 VM_EVENT(kvm
, 5, "%s", "inject: I/O (AI)");
1454 VM_EVENT(kvm
, 5, "inject: I/O css %x ss %x schid %04x",
1455 s390int
->type
& IOINT_CSSID_MASK
,
1456 s390int
->type
& IOINT_SSID_MASK
,
1457 s390int
->type
& IOINT_SCHID_MASK
);
1458 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1459 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1460 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1461 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1467 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1470 rc
= __inject_vm(kvm
, inti
);
1476 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
1477 struct kvm_s390_interrupt_info
*inti
)
1479 return __inject_vm(kvm
, inti
);
1482 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1483 struct kvm_s390_irq
*irq
)
1485 irq
->type
= s390int
->type
;
1486 switch (irq
->type
) {
1487 case KVM_S390_PROGRAM_INT
:
1488 if (s390int
->parm
& 0xffff0000)
1490 irq
->u
.pgm
.code
= s390int
->parm
;
1492 case KVM_S390_SIGP_SET_PREFIX
:
1493 irq
->u
.prefix
.address
= s390int
->parm
;
1495 case KVM_S390_SIGP_STOP
:
1496 irq
->u
.stop
.flags
= s390int
->parm
;
1498 case KVM_S390_INT_EXTERNAL_CALL
:
1499 if (s390int
->parm
& 0xffff0000)
1501 irq
->u
.extcall
.code
= s390int
->parm
;
1503 case KVM_S390_INT_EMERGENCY
:
1504 if (s390int
->parm
& 0xffff0000)
1506 irq
->u
.emerg
.code
= s390int
->parm
;
1509 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1515 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
1517 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1519 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1522 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
1524 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1526 spin_lock(&li
->lock
);
1527 li
->irq
.stop
.flags
= 0;
1528 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1529 spin_unlock(&li
->lock
);
1532 static int do_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1536 switch (irq
->type
) {
1537 case KVM_S390_PROGRAM_INT
:
1538 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
1540 rc
= __inject_prog(vcpu
, irq
);
1542 case KVM_S390_SIGP_SET_PREFIX
:
1543 rc
= __inject_set_prefix(vcpu
, irq
);
1545 case KVM_S390_SIGP_STOP
:
1546 rc
= __inject_sigp_stop(vcpu
, irq
);
1548 case KVM_S390_RESTART
:
1549 rc
= __inject_sigp_restart(vcpu
, irq
);
1551 case KVM_S390_INT_CLOCK_COMP
:
1552 rc
= __inject_ckc(vcpu
);
1554 case KVM_S390_INT_CPU_TIMER
:
1555 rc
= __inject_cpu_timer(vcpu
);
1557 case KVM_S390_INT_EXTERNAL_CALL
:
1558 rc
= __inject_extcall(vcpu
, irq
);
1560 case KVM_S390_INT_EMERGENCY
:
1561 rc
= __inject_sigp_emergency(vcpu
, irq
);
1564 rc
= __inject_mchk(vcpu
, irq
);
1566 case KVM_S390_INT_PFAULT_INIT
:
1567 rc
= __inject_pfault_init(vcpu
, irq
);
1569 case KVM_S390_INT_VIRTIO
:
1570 case KVM_S390_INT_SERVICE
:
1571 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1579 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1581 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1584 spin_lock(&li
->lock
);
1585 rc
= do_inject_vcpu(vcpu
, irq
);
1586 spin_unlock(&li
->lock
);
1588 kvm_s390_vcpu_wakeup(vcpu
);
1592 static inline void clear_irq_list(struct list_head
*_list
)
1594 struct kvm_s390_interrupt_info
*inti
, *n
;
1596 list_for_each_entry_safe(inti
, n
, _list
, list
) {
1597 list_del(&inti
->list
);
1602 static void inti_to_irq(struct kvm_s390_interrupt_info
*inti
,
1603 struct kvm_s390_irq
*irq
)
1605 irq
->type
= inti
->type
;
1606 switch (inti
->type
) {
1607 case KVM_S390_INT_PFAULT_INIT
:
1608 case KVM_S390_INT_PFAULT_DONE
:
1609 case KVM_S390_INT_VIRTIO
:
1610 irq
->u
.ext
= inti
->ext
;
1612 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1613 irq
->u
.io
= inti
->io
;
1618 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1620 struct kvm_s390_float_interrupt
*fi
= &kvm
->arch
.float_int
;
1623 spin_lock(&fi
->lock
);
1624 fi
->pending_irqs
= 0;
1625 memset(&fi
->srv_signal
, 0, sizeof(fi
->srv_signal
));
1626 memset(&fi
->mchk
, 0, sizeof(fi
->mchk
));
1627 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1628 clear_irq_list(&fi
->lists
[i
]);
1629 for (i
= 0; i
< FIRQ_MAX_COUNT
; i
++)
1630 fi
->counters
[i
] = 0;
1631 spin_unlock(&fi
->lock
);
1634 static int get_all_floating_irqs(struct kvm
*kvm
, u8 __user
*usrbuf
, u64 len
)
1636 struct kvm_s390_interrupt_info
*inti
;
1637 struct kvm_s390_float_interrupt
*fi
;
1638 struct kvm_s390_irq
*buf
;
1639 struct kvm_s390_irq
*irq
;
1645 if (len
> KVM_S390_FLIC_MAX_BUFFER
|| len
== 0)
1649 * We are already using -ENOMEM to signal
1650 * userspace it may retry with a bigger buffer,
1651 * so we need to use something else for this case
1657 max_irqs
= len
/ sizeof(struct kvm_s390_irq
);
1659 fi
= &kvm
->arch
.float_int
;
1660 spin_lock(&fi
->lock
);
1661 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++) {
1662 list_for_each_entry(inti
, &fi
->lists
[i
], list
) {
1663 if (n
== max_irqs
) {
1664 /* signal userspace to try again */
1668 inti_to_irq(inti
, &buf
[n
]);
1672 if (test_bit(IRQ_PEND_EXT_SERVICE
, &fi
->pending_irqs
)) {
1673 if (n
== max_irqs
) {
1674 /* signal userspace to try again */
1678 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1679 irq
->type
= KVM_S390_INT_SERVICE
;
1680 irq
->u
.ext
= fi
->srv_signal
;
1683 if (test_bit(IRQ_PEND_MCHK_REP
, &fi
->pending_irqs
)) {
1684 if (n
== max_irqs
) {
1685 /* signal userspace to try again */
1689 irq
= (struct kvm_s390_irq
*) &buf
[n
];
1690 irq
->type
= KVM_S390_MCHK
;
1691 irq
->u
.mchk
= fi
->mchk
;
1696 spin_unlock(&fi
->lock
);
1697 if (!ret
&& n
> 0) {
1698 if (copy_to_user(usrbuf
, buf
, sizeof(struct kvm_s390_irq
) * n
))
1703 return ret
< 0 ? ret
: n
;
1706 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1710 switch (attr
->group
) {
1711 case KVM_DEV_FLIC_GET_ALL_IRQS
:
1712 r
= get_all_floating_irqs(dev
->kvm
, (u8 __user
*) attr
->addr
,
1722 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
1725 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
1726 void *target
= NULL
;
1727 void __user
*source
;
1730 if (get_user(inti
->type
, (u64 __user
*)addr
))
1733 switch (inti
->type
) {
1734 case KVM_S390_INT_PFAULT_INIT
:
1735 case KVM_S390_INT_PFAULT_DONE
:
1736 case KVM_S390_INT_VIRTIO
:
1737 case KVM_S390_INT_SERVICE
:
1738 target
= (void *) &inti
->ext
;
1739 source
= &uptr
->u
.ext
;
1740 size
= sizeof(inti
->ext
);
1742 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1743 target
= (void *) &inti
->io
;
1744 source
= &uptr
->u
.io
;
1745 size
= sizeof(inti
->io
);
1748 target
= (void *) &inti
->mchk
;
1749 source
= &uptr
->u
.mchk
;
1750 size
= sizeof(inti
->mchk
);
1756 if (copy_from_user(target
, source
, size
))
1762 static int enqueue_floating_irq(struct kvm_device
*dev
,
1763 struct kvm_device_attr
*attr
)
1765 struct kvm_s390_interrupt_info
*inti
= NULL
;
1767 int len
= attr
->attr
;
1769 if (len
% sizeof(struct kvm_s390_irq
) != 0)
1771 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
1774 while (len
>= sizeof(struct kvm_s390_irq
)) {
1775 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1779 r
= copy_irq_from_user(inti
, attr
->addr
);
1784 r
= __inject_vm(dev
->kvm
, inti
);
1789 len
-= sizeof(struct kvm_s390_irq
);
1790 attr
->addr
+= sizeof(struct kvm_s390_irq
);
1796 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
1798 if (id
>= MAX_S390_IO_ADAPTERS
)
1800 return kvm
->arch
.adapters
[id
];
1803 static int register_io_adapter(struct kvm_device
*dev
,
1804 struct kvm_device_attr
*attr
)
1806 struct s390_io_adapter
*adapter
;
1807 struct kvm_s390_io_adapter adapter_info
;
1809 if (copy_from_user(&adapter_info
,
1810 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
1813 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
1814 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
1817 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
1821 INIT_LIST_HEAD(&adapter
->maps
);
1822 init_rwsem(&adapter
->maps_lock
);
1823 atomic_set(&adapter
->nr_maps
, 0);
1824 adapter
->id
= adapter_info
.id
;
1825 adapter
->isc
= adapter_info
.isc
;
1826 adapter
->maskable
= adapter_info
.maskable
;
1827 adapter
->masked
= false;
1828 adapter
->swap
= adapter_info
.swap
;
1829 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
1834 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
1837 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1839 if (!adapter
|| !adapter
->maskable
)
1841 ret
= adapter
->masked
;
1842 adapter
->masked
= masked
;
1846 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1848 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1849 struct s390_map_info
*map
;
1852 if (!adapter
|| !addr
)
1855 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
1860 INIT_LIST_HEAD(&map
->list
);
1861 map
->guest_addr
= addr
;
1862 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
1863 if (map
->addr
== -EFAULT
) {
1867 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
1871 down_write(&adapter
->maps_lock
);
1872 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
1873 list_add_tail(&map
->list
, &adapter
->maps
);
1876 put_page(map
->page
);
1879 up_write(&adapter
->maps_lock
);
1886 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1888 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1889 struct s390_map_info
*map
, *tmp
;
1892 if (!adapter
|| !addr
)
1895 down_write(&adapter
->maps_lock
);
1896 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
1897 if (map
->guest_addr
== addr
) {
1899 atomic_dec(&adapter
->nr_maps
);
1900 list_del(&map
->list
);
1901 put_page(map
->page
);
1906 up_write(&adapter
->maps_lock
);
1908 return found
? 0 : -EINVAL
;
1911 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
1914 struct s390_map_info
*map
, *tmp
;
1916 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
1917 if (!kvm
->arch
.adapters
[i
])
1919 list_for_each_entry_safe(map
, tmp
,
1920 &kvm
->arch
.adapters
[i
]->maps
, list
) {
1921 list_del(&map
->list
);
1922 put_page(map
->page
);
1925 kfree(kvm
->arch
.adapters
[i
]);
1929 static int modify_io_adapter(struct kvm_device
*dev
,
1930 struct kvm_device_attr
*attr
)
1932 struct kvm_s390_io_adapter_req req
;
1933 struct s390_io_adapter
*adapter
;
1936 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
1939 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
1943 case KVM_S390_IO_ADAPTER_MASK
:
1944 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
1948 case KVM_S390_IO_ADAPTER_MAP
:
1949 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
1951 case KVM_S390_IO_ADAPTER_UNMAP
:
1952 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
1961 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1965 struct kvm_vcpu
*vcpu
;
1967 switch (attr
->group
) {
1968 case KVM_DEV_FLIC_ENQUEUE
:
1969 r
= enqueue_floating_irq(dev
, attr
);
1971 case KVM_DEV_FLIC_CLEAR_IRQS
:
1972 kvm_s390_clear_float_irqs(dev
->kvm
);
1974 case KVM_DEV_FLIC_APF_ENABLE
:
1975 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
1977 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
1978 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
1980 * Make sure no async faults are in transition when
1981 * clearing the queues. So we don't need to worry
1982 * about late coming workers.
1984 synchronize_srcu(&dev
->kvm
->srcu
);
1985 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
1986 kvm_clear_async_pf_completion_queue(vcpu
);
1988 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
1989 r
= register_io_adapter(dev
, attr
);
1991 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
1992 r
= modify_io_adapter(dev
, attr
);
2001 static int flic_create(struct kvm_device
*dev
, u32 type
)
2005 if (dev
->kvm
->arch
.flic
)
2007 dev
->kvm
->arch
.flic
= dev
;
2011 static void flic_destroy(struct kvm_device
*dev
)
2013 dev
->kvm
->arch
.flic
= NULL
;
2017 /* s390 floating irq controller (flic) */
2018 struct kvm_device_ops kvm_flic_ops
= {
2020 .get_attr
= flic_get_attr
,
2021 .set_attr
= flic_set_attr
,
2022 .create
= flic_create
,
2023 .destroy
= flic_destroy
,
2026 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
2030 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
2032 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
2035 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
2038 struct s390_map_info
*map
;
2043 list_for_each_entry(map
, &adapter
->maps
, list
) {
2044 if (map
->guest_addr
== addr
)
2050 static int adapter_indicators_set(struct kvm
*kvm
,
2051 struct s390_io_adapter
*adapter
,
2052 struct kvm_s390_adapter_int
*adapter_int
)
2055 int summary_set
, idx
;
2056 struct s390_map_info
*info
;
2059 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
2062 map
= page_address(info
->page
);
2063 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
2065 idx
= srcu_read_lock(&kvm
->srcu
);
2066 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2067 set_page_dirty_lock(info
->page
);
2068 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
2070 srcu_read_unlock(&kvm
->srcu
, idx
);
2073 map
= page_address(info
->page
);
2074 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
2076 summary_set
= test_and_set_bit(bit
, map
);
2077 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
2078 set_page_dirty_lock(info
->page
);
2079 srcu_read_unlock(&kvm
->srcu
, idx
);
2080 return summary_set
? 0 : 1;
2084 * < 0 - not injected due to error
2085 * = 0 - coalesced, summary indicator already active
2086 * > 0 - injected interrupt
2088 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
2089 struct kvm
*kvm
, int irq_source_id
, int level
,
2093 struct s390_io_adapter
*adapter
;
2095 /* We're only interested in the 0->1 transition. */
2098 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
2101 down_read(&adapter
->maps_lock
);
2102 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
2103 up_read(&adapter
->maps_lock
);
2104 if ((ret
> 0) && !adapter
->masked
) {
2105 struct kvm_s390_interrupt s390int
= {
2106 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
2108 .parm64
= (adapter
->isc
<< 27) | 0x80000000,
2110 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
2117 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry
*e
,
2118 const struct kvm_irq_routing_entry
*ue
)
2123 case KVM_IRQ_ROUTING_S390_ADAPTER
:
2124 e
->set
= set_adapter_int
;
2125 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
2126 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
2127 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
2128 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
2129 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
2139 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
2140 int irq_source_id
, int level
, bool line_status
)
2145 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
, void __user
*irqstate
, int len
)
2147 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2148 struct kvm_s390_irq
*buf
;
2156 if (copy_from_user((void *) buf
, irqstate
, len
)) {
2162 * Don't allow setting the interrupt state
2163 * when there are already interrupts pending
2165 spin_lock(&li
->lock
);
2166 if (li
->pending_irqs
) {
2171 for (n
= 0; n
< len
/ sizeof(*buf
); n
++) {
2172 r
= do_inject_vcpu(vcpu
, &buf
[n
]);
2178 spin_unlock(&li
->lock
);
2185 static void store_local_irq(struct kvm_s390_local_interrupt
*li
,
2186 struct kvm_s390_irq
*irq
,
2187 unsigned long irq_type
)
2190 case IRQ_PEND_MCHK_EX
:
2191 case IRQ_PEND_MCHK_REP
:
2192 irq
->type
= KVM_S390_MCHK
;
2193 irq
->u
.mchk
= li
->irq
.mchk
;
2196 irq
->type
= KVM_S390_PROGRAM_INT
;
2197 irq
->u
.pgm
= li
->irq
.pgm
;
2199 case IRQ_PEND_PFAULT_INIT
:
2200 irq
->type
= KVM_S390_INT_PFAULT_INIT
;
2201 irq
->u
.ext
= li
->irq
.ext
;
2203 case IRQ_PEND_EXT_EXTERNAL
:
2204 irq
->type
= KVM_S390_INT_EXTERNAL_CALL
;
2205 irq
->u
.extcall
= li
->irq
.extcall
;
2207 case IRQ_PEND_EXT_CLOCK_COMP
:
2208 irq
->type
= KVM_S390_INT_CLOCK_COMP
;
2210 case IRQ_PEND_EXT_CPU_TIMER
:
2211 irq
->type
= KVM_S390_INT_CPU_TIMER
;
2213 case IRQ_PEND_SIGP_STOP
:
2214 irq
->type
= KVM_S390_SIGP_STOP
;
2215 irq
->u
.stop
= li
->irq
.stop
;
2217 case IRQ_PEND_RESTART
:
2218 irq
->type
= KVM_S390_RESTART
;
2220 case IRQ_PEND_SET_PREFIX
:
2221 irq
->type
= KVM_S390_SIGP_SET_PREFIX
;
2222 irq
->u
.prefix
= li
->irq
.prefix
;
2227 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
, __u8 __user
*buf
, int len
)
2229 uint8_t sigp_ctrl
= vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
2230 unsigned long sigp_emerg_pending
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
2231 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
2232 unsigned long pending_irqs
;
2233 struct kvm_s390_irq irq
;
2234 unsigned long irq_type
;
2238 spin_lock(&li
->lock
);
2239 pending_irqs
= li
->pending_irqs
;
2240 memcpy(&sigp_emerg_pending
, &li
->sigp_emerg_pending
,
2241 sizeof(sigp_emerg_pending
));
2242 spin_unlock(&li
->lock
);
2244 for_each_set_bit(irq_type
, &pending_irqs
, IRQ_PEND_COUNT
) {
2245 memset(&irq
, 0, sizeof(irq
));
2246 if (irq_type
== IRQ_PEND_EXT_EMERGENCY
)
2248 if (n
+ sizeof(irq
) > len
)
2250 store_local_irq(&vcpu
->arch
.local_int
, &irq
, irq_type
);
2251 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2256 if (test_bit(IRQ_PEND_EXT_EMERGENCY
, &pending_irqs
)) {
2257 for_each_set_bit(cpuaddr
, sigp_emerg_pending
, KVM_MAX_VCPUS
) {
2258 memset(&irq
, 0, sizeof(irq
));
2259 if (n
+ sizeof(irq
) > len
)
2261 irq
.type
= KVM_S390_INT_EMERGENCY
;
2262 irq
.u
.emerg
.code
= cpuaddr
;
2263 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))
2269 if ((sigp_ctrl
& SIGP_CTRL_C
) &&
2270 (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) &
2271 CPUSTAT_ECALL_PEND
)) {
2272 if (n
+ sizeof(irq
) > len
)
2274 memset(&irq
, 0, sizeof(irq
));
2275 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
2276 irq
.u
.extcall
.code
= sigp_ctrl
& SIGP_CTRL_SCN_MASK
;
2277 if (copy_to_user(&buf
[n
], &irq
, sizeof(irq
)))