2 * interrupt.c - handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
23 static int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
25 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
28 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
30 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
31 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
32 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
37 static int __interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
38 struct kvm_s390_interrupt_info
*inti
)
41 case KVM_S390_INT_EMERGENCY
:
42 if (psw_extint_disabled(vcpu
))
44 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
47 case KVM_S390_INT_SERVICE
:
48 if (psw_extint_disabled(vcpu
))
50 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
53 case KVM_S390_INT_VIRTIO
:
54 if (psw_extint_disabled(vcpu
))
56 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
59 case KVM_S390_PROGRAM_INT
:
60 case KVM_S390_SIGP_STOP
:
61 case KVM_S390_SIGP_SET_PREFIX
:
62 case KVM_S390_RESTART
:
70 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
72 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
73 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
74 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
77 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
79 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
80 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
81 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
84 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
86 atomic_clear_mask(CPUSTAT_ECALL_PEND
|
87 CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
88 &vcpu
->arch
.sie_block
->cpuflags
);
89 vcpu
->arch
.sie_block
->lctl
= 0x0000;
92 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
94 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
97 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
98 struct kvm_s390_interrupt_info
*inti
)
100 switch (inti
->type
) {
101 case KVM_S390_INT_EMERGENCY
:
102 case KVM_S390_INT_SERVICE
:
103 case KVM_S390_INT_VIRTIO
:
104 if (psw_extint_disabled(vcpu
))
105 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
107 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
109 case KVM_S390_SIGP_STOP
:
110 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
117 static void __do_deliver_interrupt(struct kvm_vcpu
*vcpu
,
118 struct kvm_s390_interrupt_info
*inti
)
120 const unsigned short table
[] = { 2, 4, 4, 6 };
121 int rc
, exception
= 0;
123 switch (inti
->type
) {
124 case KVM_S390_INT_EMERGENCY
:
125 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
126 vcpu
->stat
.deliver_emergency_signal
++;
127 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1201);
131 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
132 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
136 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
137 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
142 case KVM_S390_INT_SERVICE
:
143 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
144 inti
->ext
.ext_params
);
145 vcpu
->stat
.deliver_service_signal
++;
146 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2401);
150 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
151 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
155 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
156 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
160 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
165 case KVM_S390_INT_VIRTIO
:
166 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%llx",
167 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
168 vcpu
->stat
.deliver_virtio_interrupt
++;
169 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2603);
173 rc
= put_guest_u16(vcpu
, __LC_CPU_ADDRESS
, 0x0d00);
177 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
178 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
182 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
183 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
187 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
191 rc
= put_guest_u64(vcpu
, __LC_EXT_PARAMS2
,
192 inti
->ext
.ext_params2
);
197 case KVM_S390_SIGP_STOP
:
198 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
199 vcpu
->stat
.deliver_stop_signal
++;
200 __set_intercept_indicator(vcpu
, inti
);
203 case KVM_S390_SIGP_SET_PREFIX
:
204 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x",
205 inti
->prefix
.address
);
206 vcpu
->stat
.deliver_prefix_signal
++;
207 vcpu
->arch
.sie_block
->prefix
= inti
->prefix
.address
;
208 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
211 case KVM_S390_RESTART
:
212 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
213 vcpu
->stat
.deliver_restart_signal
++;
214 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
,
215 restart_old_psw
), &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
219 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
220 offsetof(struct _lowcore
, restart_psw
), sizeof(psw_t
));
225 case KVM_S390_PROGRAM_INT
:
226 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
228 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
229 vcpu
->stat
.deliver_program_int
++;
230 rc
= put_guest_u16(vcpu
, __LC_PGM_INT_CODE
, inti
->pgm
.code
);
234 rc
= put_guest_u16(vcpu
, __LC_PGM_ILC
,
235 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
239 rc
= copy_to_guest(vcpu
, __LC_PGM_OLD_PSW
,
240 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
244 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
245 __LC_PGM_NEW_PSW
, sizeof(psw_t
));
254 printk("kvm: The guest lowcore is not mapped during interrupt "
255 "delivery, killing userspace\n");
260 static int __try_deliver_ckc_interrupt(struct kvm_vcpu
*vcpu
)
262 int rc
, exception
= 0;
264 if (psw_extint_disabled(vcpu
))
266 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
268 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1004);
271 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
272 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
275 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
276 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
280 printk("kvm: The guest lowcore is not mapped during interrupt "
281 "delivery, killing userspace\n");
287 static int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
289 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
290 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
291 struct kvm_s390_interrupt_info
*inti
;
294 if (atomic_read(&li
->active
)) {
295 spin_lock_bh(&li
->lock
);
296 list_for_each_entry(inti
, &li
->list
, list
)
297 if (__interrupt_is_deliverable(vcpu
, inti
)) {
301 spin_unlock_bh(&li
->lock
);
304 if ((!rc
) && atomic_read(&fi
->active
)) {
305 spin_lock(&fi
->lock
);
306 list_for_each_entry(inti
, &fi
->list
, list
)
307 if (__interrupt_is_deliverable(vcpu
, inti
)) {
311 spin_unlock(&fi
->lock
);
314 if ((!rc
) && (vcpu
->arch
.sie_block
->ckc
<
315 get_clock() + vcpu
->arch
.sie_block
->epoch
)) {
316 if ((!psw_extint_disabled(vcpu
)) &&
317 (vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
324 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
329 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
332 DECLARE_WAITQUEUE(wait
, current
);
334 vcpu
->stat
.exit_wait_state
++;
335 if (kvm_cpu_has_interrupt(vcpu
))
338 __set_cpu_idle(vcpu
);
339 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
340 vcpu
->arch
.local_int
.timer_due
= 0;
341 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
343 if (psw_interrupts_disabled(vcpu
)) {
344 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
345 __unset_cpu_idle(vcpu
);
346 return -EOPNOTSUPP
; /* disabled wait */
349 if (psw_extint_disabled(vcpu
) ||
350 (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))) {
351 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
355 now
= get_clock() + vcpu
->arch
.sie_block
->epoch
;
356 if (vcpu
->arch
.sie_block
->ckc
< now
) {
357 __unset_cpu_idle(vcpu
);
361 sltime
= ((vcpu
->arch
.sie_block
->ckc
- now
)*125)>>9;
363 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
364 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
366 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
367 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
368 add_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
369 while (list_empty(&vcpu
->arch
.local_int
.list
) &&
370 list_empty(&vcpu
->arch
.local_int
.float_int
->list
) &&
371 (!vcpu
->arch
.local_int
.timer_due
) &&
372 !signal_pending(current
)) {
373 set_current_state(TASK_INTERRUPTIBLE
);
374 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
375 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
379 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
380 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
382 __unset_cpu_idle(vcpu
);
383 __set_current_state(TASK_RUNNING
);
384 remove_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
385 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
386 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
387 hrtimer_try_to_cancel(&vcpu
->arch
.ckc_timer
);
391 void kvm_s390_tasklet(unsigned long parm
)
393 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*) parm
;
395 spin_lock(&vcpu
->arch
.local_int
.lock
);
396 vcpu
->arch
.local_int
.timer_due
= 1;
397 if (waitqueue_active(&vcpu
->arch
.local_int
.wq
))
398 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
399 spin_unlock(&vcpu
->arch
.local_int
.lock
);
403 * low level hrtimer wake routine. Because this runs in hardirq context
404 * we schedule a tasklet to do the real work.
406 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
408 struct kvm_vcpu
*vcpu
;
410 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
411 tasklet_schedule(&vcpu
->arch
.tasklet
);
413 return HRTIMER_NORESTART
;
416 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
418 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
419 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
420 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
423 __reset_intercept_indicators(vcpu
);
424 if (atomic_read(&li
->active
)) {
427 spin_lock_bh(&li
->lock
);
428 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
429 if (__interrupt_is_deliverable(vcpu
, inti
)) {
430 list_del(&inti
->list
);
434 __set_intercept_indicator(vcpu
, inti
);
436 if (list_empty(&li
->list
))
437 atomic_set(&li
->active
, 0);
438 spin_unlock_bh(&li
->lock
);
440 __do_deliver_interrupt(vcpu
, inti
);
446 if ((vcpu
->arch
.sie_block
->ckc
<
447 get_clock() + vcpu
->arch
.sie_block
->epoch
))
448 __try_deliver_ckc_interrupt(vcpu
);
450 if (atomic_read(&fi
->active
)) {
453 spin_lock(&fi
->lock
);
454 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
455 if (__interrupt_is_deliverable(vcpu
, inti
)) {
456 list_del(&inti
->list
);
460 __set_intercept_indicator(vcpu
, inti
);
462 if (list_empty(&fi
->list
))
463 atomic_set(&fi
->active
, 0);
464 spin_unlock(&fi
->lock
);
466 __do_deliver_interrupt(vcpu
, inti
);
473 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
475 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
476 struct kvm_s390_interrupt_info
*inti
;
478 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
482 inti
->type
= KVM_S390_PROGRAM_INT
;
483 inti
->pgm
.code
= code
;
485 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
486 spin_lock_bh(&li
->lock
);
487 list_add(&inti
->list
, &li
->list
);
488 atomic_set(&li
->active
, 1);
489 BUG_ON(waitqueue_active(&li
->wq
));
490 spin_unlock_bh(&li
->lock
);
494 int kvm_s390_inject_vm(struct kvm
*kvm
,
495 struct kvm_s390_interrupt
*s390int
)
497 struct kvm_s390_local_interrupt
*li
;
498 struct kvm_s390_float_interrupt
*fi
;
499 struct kvm_s390_interrupt_info
*inti
;
502 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
506 switch (s390int
->type
) {
507 case KVM_S390_INT_VIRTIO
:
508 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
509 s390int
->parm
, s390int
->parm64
);
510 inti
->type
= s390int
->type
;
511 inti
->ext
.ext_params
= s390int
->parm
;
512 inti
->ext
.ext_params2
= s390int
->parm64
;
514 case KVM_S390_INT_SERVICE
:
515 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
516 inti
->type
= s390int
->type
;
517 inti
->ext
.ext_params
= s390int
->parm
;
519 case KVM_S390_PROGRAM_INT
:
520 case KVM_S390_SIGP_STOP
:
521 case KVM_S390_INT_EMERGENCY
:
527 mutex_lock(&kvm
->lock
);
528 fi
= &kvm
->arch
.float_int
;
529 spin_lock(&fi
->lock
);
530 list_add_tail(&inti
->list
, &fi
->list
);
531 atomic_set(&fi
->active
, 1);
532 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
533 if (sigcpu
== KVM_MAX_VCPUS
) {
535 sigcpu
= fi
->next_rr_cpu
++;
536 if (sigcpu
== KVM_MAX_VCPUS
)
537 sigcpu
= fi
->next_rr_cpu
= 0;
538 } while (fi
->local_int
[sigcpu
] == NULL
);
540 li
= fi
->local_int
[sigcpu
];
541 spin_lock_bh(&li
->lock
);
542 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
543 if (waitqueue_active(&li
->wq
))
544 wake_up_interruptible(&li
->wq
);
545 spin_unlock_bh(&li
->lock
);
546 spin_unlock(&fi
->lock
);
547 mutex_unlock(&kvm
->lock
);
551 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
552 struct kvm_s390_interrupt
*s390int
)
554 struct kvm_s390_local_interrupt
*li
;
555 struct kvm_s390_interrupt_info
*inti
;
557 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
561 switch (s390int
->type
) {
562 case KVM_S390_PROGRAM_INT
:
563 if (s390int
->parm
& 0xffff0000) {
567 inti
->type
= s390int
->type
;
568 inti
->pgm
.code
= s390int
->parm
;
569 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
572 case KVM_S390_SIGP_SET_PREFIX
:
573 inti
->prefix
.address
= s390int
->parm
;
574 inti
->type
= s390int
->type
;
575 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
578 case KVM_S390_SIGP_STOP
:
579 case KVM_S390_RESTART
:
580 case KVM_S390_INT_EMERGENCY
:
581 VCPU_EVENT(vcpu
, 3, "inject: type %x", s390int
->type
);
582 inti
->type
= s390int
->type
;
584 case KVM_S390_INT_VIRTIO
:
585 case KVM_S390_INT_SERVICE
:
591 mutex_lock(&vcpu
->kvm
->lock
);
592 li
= &vcpu
->arch
.local_int
;
593 spin_lock_bh(&li
->lock
);
594 if (inti
->type
== KVM_S390_PROGRAM_INT
)
595 list_add(&inti
->list
, &li
->list
);
597 list_add_tail(&inti
->list
, &li
->list
);
598 atomic_set(&li
->active
, 1);
599 if (inti
->type
== KVM_S390_SIGP_STOP
)
600 li
->action_bits
|= ACTION_STOP_ON_STOP
;
601 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
602 if (waitqueue_active(&li
->wq
))
603 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
604 spin_unlock_bh(&li
->lock
);
605 mutex_unlock(&vcpu
->kvm
->lock
);