2 * interrupt.c - handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
23 static int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
25 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
28 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
30 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
31 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
32 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
37 static int __interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
38 struct kvm_s390_interrupt_info
*inti
)
41 case KVM_S390_INT_EMERGENCY
:
42 if (psw_extint_disabled(vcpu
))
44 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
47 case KVM_S390_INT_SERVICE
:
48 if (psw_extint_disabled(vcpu
))
50 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
53 case KVM_S390_INT_VIRTIO
:
54 if (psw_extint_disabled(vcpu
))
56 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
59 case KVM_S390_PROGRAM_INT
:
60 case KVM_S390_SIGP_STOP
:
61 case KVM_S390_SIGP_SET_PREFIX
:
62 case KVM_S390_RESTART
:
70 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
72 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
73 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
74 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
77 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
79 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
80 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
81 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
84 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
86 atomic_clear_mask(CPUSTAT_ECALL_PEND
|
87 CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
88 &vcpu
->arch
.sie_block
->cpuflags
);
89 vcpu
->arch
.sie_block
->lctl
= 0x0000;
92 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
94 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
97 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
98 struct kvm_s390_interrupt_info
*inti
)
100 switch (inti
->type
) {
101 case KVM_S390_INT_EMERGENCY
:
102 case KVM_S390_INT_SERVICE
:
103 case KVM_S390_INT_VIRTIO
:
104 if (psw_extint_disabled(vcpu
))
105 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
107 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
109 case KVM_S390_SIGP_STOP
:
110 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
117 static void __do_deliver_interrupt(struct kvm_vcpu
*vcpu
,
118 struct kvm_s390_interrupt_info
*inti
)
120 const unsigned short table
[] = { 2, 4, 4, 6 };
121 int rc
, exception
= 0;
123 switch (inti
->type
) {
124 case KVM_S390_INT_EMERGENCY
:
125 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
126 vcpu
->stat
.deliver_emergency_signal
++;
127 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1201);
131 rc
= put_guest_u16(vcpu
, __LC_CPU_ADDRESS
, inti
->emerg
.code
);
135 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
136 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
140 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
141 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
146 case KVM_S390_INT_SERVICE
:
147 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
148 inti
->ext
.ext_params
);
149 vcpu
->stat
.deliver_service_signal
++;
150 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2401);
154 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
155 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
159 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
160 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
164 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
169 case KVM_S390_INT_VIRTIO
:
170 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%llx",
171 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
172 vcpu
->stat
.deliver_virtio_interrupt
++;
173 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2603);
177 rc
= put_guest_u16(vcpu
, __LC_CPU_ADDRESS
, 0x0d00);
181 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
182 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
186 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
187 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
191 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
195 rc
= put_guest_u64(vcpu
, __LC_EXT_PARAMS2
,
196 inti
->ext
.ext_params2
);
201 case KVM_S390_SIGP_STOP
:
202 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
203 vcpu
->stat
.deliver_stop_signal
++;
204 __set_intercept_indicator(vcpu
, inti
);
207 case KVM_S390_SIGP_SET_PREFIX
:
208 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x",
209 inti
->prefix
.address
);
210 vcpu
->stat
.deliver_prefix_signal
++;
211 vcpu
->arch
.sie_block
->prefix
= inti
->prefix
.address
;
212 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
215 case KVM_S390_RESTART
:
216 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
217 vcpu
->stat
.deliver_restart_signal
++;
218 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
,
219 restart_old_psw
), &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
223 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
224 offsetof(struct _lowcore
, restart_psw
), sizeof(psw_t
));
229 case KVM_S390_PROGRAM_INT
:
230 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
232 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
233 vcpu
->stat
.deliver_program_int
++;
234 rc
= put_guest_u16(vcpu
, __LC_PGM_INT_CODE
, inti
->pgm
.code
);
238 rc
= put_guest_u16(vcpu
, __LC_PGM_ILC
,
239 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
243 rc
= copy_to_guest(vcpu
, __LC_PGM_OLD_PSW
,
244 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
248 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
249 __LC_PGM_NEW_PSW
, sizeof(psw_t
));
258 printk("kvm: The guest lowcore is not mapped during interrupt "
259 "delivery, killing userspace\n");
264 static int __try_deliver_ckc_interrupt(struct kvm_vcpu
*vcpu
)
266 int rc
, exception
= 0;
268 if (psw_extint_disabled(vcpu
))
270 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
272 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1004);
275 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
276 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
279 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
280 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
284 printk("kvm: The guest lowcore is not mapped during interrupt "
285 "delivery, killing userspace\n");
291 static int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
293 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
294 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
295 struct kvm_s390_interrupt_info
*inti
;
298 if (atomic_read(&li
->active
)) {
299 spin_lock_bh(&li
->lock
);
300 list_for_each_entry(inti
, &li
->list
, list
)
301 if (__interrupt_is_deliverable(vcpu
, inti
)) {
305 spin_unlock_bh(&li
->lock
);
308 if ((!rc
) && atomic_read(&fi
->active
)) {
309 spin_lock(&fi
->lock
);
310 list_for_each_entry(inti
, &fi
->list
, list
)
311 if (__interrupt_is_deliverable(vcpu
, inti
)) {
315 spin_unlock(&fi
->lock
);
318 if ((!rc
) && (vcpu
->arch
.sie_block
->ckc
<
319 get_clock() + vcpu
->arch
.sie_block
->epoch
)) {
320 if ((!psw_extint_disabled(vcpu
)) &&
321 (vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
328 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
333 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
336 DECLARE_WAITQUEUE(wait
, current
);
338 vcpu
->stat
.exit_wait_state
++;
339 if (kvm_cpu_has_interrupt(vcpu
))
342 __set_cpu_idle(vcpu
);
343 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
344 vcpu
->arch
.local_int
.timer_due
= 0;
345 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
347 if (psw_interrupts_disabled(vcpu
)) {
348 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
349 __unset_cpu_idle(vcpu
);
350 return -EOPNOTSUPP
; /* disabled wait */
353 if (psw_extint_disabled(vcpu
) ||
354 (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))) {
355 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
359 now
= get_clock() + vcpu
->arch
.sie_block
->epoch
;
360 if (vcpu
->arch
.sie_block
->ckc
< now
) {
361 __unset_cpu_idle(vcpu
);
365 sltime
= ((vcpu
->arch
.sie_block
->ckc
- now
)*125)>>9;
367 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
368 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
370 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
371 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
372 add_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
373 while (list_empty(&vcpu
->arch
.local_int
.list
) &&
374 list_empty(&vcpu
->arch
.local_int
.float_int
->list
) &&
375 (!vcpu
->arch
.local_int
.timer_due
) &&
376 !signal_pending(current
)) {
377 set_current_state(TASK_INTERRUPTIBLE
);
378 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
379 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
383 spin_lock(&vcpu
->arch
.local_int
.float_int
->lock
);
384 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
386 __unset_cpu_idle(vcpu
);
387 __set_current_state(TASK_RUNNING
);
388 remove_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
389 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
390 spin_unlock(&vcpu
->arch
.local_int
.float_int
->lock
);
391 hrtimer_try_to_cancel(&vcpu
->arch
.ckc_timer
);
395 void kvm_s390_tasklet(unsigned long parm
)
397 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*) parm
;
399 spin_lock(&vcpu
->arch
.local_int
.lock
);
400 vcpu
->arch
.local_int
.timer_due
= 1;
401 if (waitqueue_active(&vcpu
->arch
.local_int
.wq
))
402 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
403 spin_unlock(&vcpu
->arch
.local_int
.lock
);
407 * low level hrtimer wake routine. Because this runs in hardirq context
408 * we schedule a tasklet to do the real work.
410 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
412 struct kvm_vcpu
*vcpu
;
414 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
415 tasklet_schedule(&vcpu
->arch
.tasklet
);
417 return HRTIMER_NORESTART
;
420 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
422 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
423 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
424 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
427 __reset_intercept_indicators(vcpu
);
428 if (atomic_read(&li
->active
)) {
431 spin_lock_bh(&li
->lock
);
432 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
433 if (__interrupt_is_deliverable(vcpu
, inti
)) {
434 list_del(&inti
->list
);
438 __set_intercept_indicator(vcpu
, inti
);
440 if (list_empty(&li
->list
))
441 atomic_set(&li
->active
, 0);
442 spin_unlock_bh(&li
->lock
);
444 __do_deliver_interrupt(vcpu
, inti
);
450 if ((vcpu
->arch
.sie_block
->ckc
<
451 get_clock() + vcpu
->arch
.sie_block
->epoch
))
452 __try_deliver_ckc_interrupt(vcpu
);
454 if (atomic_read(&fi
->active
)) {
457 spin_lock(&fi
->lock
);
458 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
459 if (__interrupt_is_deliverable(vcpu
, inti
)) {
460 list_del(&inti
->list
);
464 __set_intercept_indicator(vcpu
, inti
);
466 if (list_empty(&fi
->list
))
467 atomic_set(&fi
->active
, 0);
468 spin_unlock(&fi
->lock
);
470 __do_deliver_interrupt(vcpu
, inti
);
477 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
479 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
480 struct kvm_s390_interrupt_info
*inti
;
482 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
486 inti
->type
= KVM_S390_PROGRAM_INT
;
487 inti
->pgm
.code
= code
;
489 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
490 spin_lock_bh(&li
->lock
);
491 list_add(&inti
->list
, &li
->list
);
492 atomic_set(&li
->active
, 1);
493 BUG_ON(waitqueue_active(&li
->wq
));
494 spin_unlock_bh(&li
->lock
);
498 int kvm_s390_inject_vm(struct kvm
*kvm
,
499 struct kvm_s390_interrupt
*s390int
)
501 struct kvm_s390_local_interrupt
*li
;
502 struct kvm_s390_float_interrupt
*fi
;
503 struct kvm_s390_interrupt_info
*inti
;
506 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
510 switch (s390int
->type
) {
511 case KVM_S390_INT_VIRTIO
:
512 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
513 s390int
->parm
, s390int
->parm64
);
514 inti
->type
= s390int
->type
;
515 inti
->ext
.ext_params
= s390int
->parm
;
516 inti
->ext
.ext_params2
= s390int
->parm64
;
518 case KVM_S390_INT_SERVICE
:
519 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
520 inti
->type
= s390int
->type
;
521 inti
->ext
.ext_params
= s390int
->parm
;
523 case KVM_S390_PROGRAM_INT
:
524 case KVM_S390_SIGP_STOP
:
525 case KVM_S390_INT_EMERGENCY
:
531 mutex_lock(&kvm
->lock
);
532 fi
= &kvm
->arch
.float_int
;
533 spin_lock(&fi
->lock
);
534 list_add_tail(&inti
->list
, &fi
->list
);
535 atomic_set(&fi
->active
, 1);
536 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
537 if (sigcpu
== KVM_MAX_VCPUS
) {
539 sigcpu
= fi
->next_rr_cpu
++;
540 if (sigcpu
== KVM_MAX_VCPUS
)
541 sigcpu
= fi
->next_rr_cpu
= 0;
542 } while (fi
->local_int
[sigcpu
] == NULL
);
544 li
= fi
->local_int
[sigcpu
];
545 spin_lock_bh(&li
->lock
);
546 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
547 if (waitqueue_active(&li
->wq
))
548 wake_up_interruptible(&li
->wq
);
549 spin_unlock_bh(&li
->lock
);
550 spin_unlock(&fi
->lock
);
551 mutex_unlock(&kvm
->lock
);
555 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
556 struct kvm_s390_interrupt
*s390int
)
558 struct kvm_s390_local_interrupt
*li
;
559 struct kvm_s390_interrupt_info
*inti
;
561 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
565 switch (s390int
->type
) {
566 case KVM_S390_PROGRAM_INT
:
567 if (s390int
->parm
& 0xffff0000) {
571 inti
->type
= s390int
->type
;
572 inti
->pgm
.code
= s390int
->parm
;
573 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
576 case KVM_S390_SIGP_SET_PREFIX
:
577 inti
->prefix
.address
= s390int
->parm
;
578 inti
->type
= s390int
->type
;
579 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
582 case KVM_S390_SIGP_STOP
:
583 case KVM_S390_RESTART
:
584 case KVM_S390_INT_EMERGENCY
:
585 VCPU_EVENT(vcpu
, 3, "inject: type %x", s390int
->type
);
586 inti
->type
= s390int
->type
;
588 case KVM_S390_INT_VIRTIO
:
589 case KVM_S390_INT_SERVICE
:
595 mutex_lock(&vcpu
->kvm
->lock
);
596 li
= &vcpu
->arch
.local_int
;
597 spin_lock_bh(&li
->lock
);
598 if (inti
->type
== KVM_S390_PROGRAM_INT
)
599 list_add(&inti
->list
, &li
->list
);
601 list_add_tail(&inti
->list
, &li
->list
);
602 atomic_set(&li
->active
, 1);
603 if (inti
->type
== KVM_S390_SIGP_STOP
)
604 li
->action_bits
|= ACTION_STOP_ON_STOP
;
605 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
606 if (waitqueue_active(&li
->wq
))
607 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
608 spin_unlock_bh(&li
->lock
);
609 mutex_unlock(&vcpu
->kvm
->lock
);