2 * interrupt.c - handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <asm/lowcore.h>
14 #include <asm/uaccess.h>
15 #include <linux/kvm_host.h>
16 #include <linux/signal.h>
20 static int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
22 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
25 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
27 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
28 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
29 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
34 static int __interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
35 struct kvm_s390_interrupt_info
*inti
)
38 case KVM_S390_INT_EMERGENCY
:
39 if (psw_extint_disabled(vcpu
))
41 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
44 case KVM_S390_INT_SERVICE
:
45 if (psw_extint_disabled(vcpu
))
47 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
50 case KVM_S390_INT_VIRTIO
:
51 if (psw_extint_disabled(vcpu
))
53 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
56 case KVM_S390_PROGRAM_INT
:
57 case KVM_S390_SIGP_STOP
:
58 case KVM_S390_SIGP_SET_PREFIX
:
59 case KVM_S390_RESTART
:
67 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
69 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
70 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
71 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
74 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
76 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
77 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
78 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
81 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
83 atomic_clear_mask(CPUSTAT_ECALL_PEND
|
84 CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
85 &vcpu
->arch
.sie_block
->cpuflags
);
86 vcpu
->arch
.sie_block
->lctl
= 0x0000;
89 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
91 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
94 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
95 struct kvm_s390_interrupt_info
*inti
)
98 case KVM_S390_INT_EMERGENCY
:
99 case KVM_S390_INT_SERVICE
:
100 case KVM_S390_INT_VIRTIO
:
101 if (psw_extint_disabled(vcpu
))
102 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
104 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
106 case KVM_S390_SIGP_STOP
:
107 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
114 static void __do_deliver_interrupt(struct kvm_vcpu
*vcpu
,
115 struct kvm_s390_interrupt_info
*inti
)
117 const unsigned short table
[] = { 2, 4, 4, 6 };
118 int rc
, exception
= 0;
120 switch (inti
->type
) {
121 case KVM_S390_INT_EMERGENCY
:
122 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
123 vcpu
->stat
.deliver_emergency_signal
++;
124 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1201);
128 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
129 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
133 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
134 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
139 case KVM_S390_INT_SERVICE
:
140 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
141 inti
->ext
.ext_params
);
142 vcpu
->stat
.deliver_service_signal
++;
143 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2401);
147 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
148 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
152 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
153 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
157 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
162 case KVM_S390_INT_VIRTIO
:
163 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%lx",
164 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
165 vcpu
->stat
.deliver_virtio_interrupt
++;
166 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2603);
170 rc
= put_guest_u16(vcpu
, __LC_CPU_ADDRESS
, 0x0d00);
174 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
175 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
179 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
180 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
184 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
188 rc
= put_guest_u64(vcpu
, __LC_PFAULT_INTPARM
,
189 inti
->ext
.ext_params2
);
194 case KVM_S390_SIGP_STOP
:
195 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
196 vcpu
->stat
.deliver_stop_signal
++;
197 __set_intercept_indicator(vcpu
, inti
);
200 case KVM_S390_SIGP_SET_PREFIX
:
201 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x",
202 inti
->prefix
.address
);
203 vcpu
->stat
.deliver_prefix_signal
++;
204 vcpu
->arch
.sie_block
->prefix
= inti
->prefix
.address
;
205 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
208 case KVM_S390_RESTART
:
209 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
210 vcpu
->stat
.deliver_restart_signal
++;
211 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
,
212 restart_old_psw
), &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
216 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
217 offsetof(struct _lowcore
, restart_psw
), sizeof(psw_t
));
222 case KVM_S390_PROGRAM_INT
:
223 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
225 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
226 vcpu
->stat
.deliver_program_int
++;
227 rc
= put_guest_u16(vcpu
, __LC_PGM_INT_CODE
, inti
->pgm
.code
);
231 rc
= put_guest_u16(vcpu
, __LC_PGM_ILC
,
232 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
236 rc
= copy_to_guest(vcpu
, __LC_PGM_OLD_PSW
,
237 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
241 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
242 __LC_PGM_NEW_PSW
, sizeof(psw_t
));
251 printk("kvm: The guest lowcore is not mapped during interrupt "
252 "delivery, killing userspace\n");
257 static int __try_deliver_ckc_interrupt(struct kvm_vcpu
*vcpu
)
259 int rc
, exception
= 0;
261 if (psw_extint_disabled(vcpu
))
263 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
265 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1004);
268 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
269 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
272 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
273 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
277 printk("kvm: The guest lowcore is not mapped during interrupt "
278 "delivery, killing userspace\n");
284 int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
286 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
287 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
288 struct kvm_s390_interrupt_info
*inti
;
291 if (atomic_read(&li
->active
)) {
292 spin_lock_bh(&li
->lock
);
293 list_for_each_entry(inti
, &li
->list
, list
)
294 if (__interrupt_is_deliverable(vcpu
, inti
)) {
298 spin_unlock_bh(&li
->lock
);
301 if ((!rc
) && atomic_read(&fi
->active
)) {
302 spin_lock_bh(&fi
->lock
);
303 list_for_each_entry(inti
, &fi
->list
, list
)
304 if (__interrupt_is_deliverable(vcpu
, inti
)) {
308 spin_unlock_bh(&fi
->lock
);
311 if ((!rc
) && (vcpu
->arch
.sie_block
->ckc
<
312 get_clock() + vcpu
->arch
.sie_block
->epoch
)) {
313 if ((!psw_extint_disabled(vcpu
)) &&
314 (vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
321 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
326 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
329 DECLARE_WAITQUEUE(wait
, current
);
331 vcpu
->stat
.exit_wait_state
++;
332 if (kvm_cpu_has_interrupt(vcpu
))
335 __set_cpu_idle(vcpu
);
336 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
337 vcpu
->arch
.local_int
.timer_due
= 0;
338 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
340 if (psw_interrupts_disabled(vcpu
)) {
341 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
342 __unset_cpu_idle(vcpu
);
343 return -ENOTSUPP
; /* disabled wait */
346 if (psw_extint_disabled(vcpu
) ||
347 (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))) {
348 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
352 now
= get_clock() + vcpu
->arch
.sie_block
->epoch
;
353 if (vcpu
->arch
.sie_block
->ckc
< now
) {
354 __unset_cpu_idle(vcpu
);
358 sltime
= (vcpu
->arch
.sie_block
->ckc
- now
) / (0xf4240000ul
/ HZ
) + 1;
360 vcpu
->arch
.ckc_timer
.expires
= jiffies
+ sltime
;
362 add_timer(&vcpu
->arch
.ckc_timer
);
363 VCPU_EVENT(vcpu
, 5, "enabled wait timer:%lx jiffies", sltime
);
365 spin_lock_bh(&vcpu
->arch
.local_int
.float_int
->lock
);
366 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
367 add_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
368 while (list_empty(&vcpu
->arch
.local_int
.list
) &&
369 list_empty(&vcpu
->arch
.local_int
.float_int
->list
) &&
370 (!vcpu
->arch
.local_int
.timer_due
) &&
371 !signal_pending(current
)) {
372 set_current_state(TASK_INTERRUPTIBLE
);
373 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
374 spin_unlock_bh(&vcpu
->arch
.local_int
.float_int
->lock
);
378 spin_lock_bh(&vcpu
->arch
.local_int
.float_int
->lock
);
379 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
381 __unset_cpu_idle(vcpu
);
382 __set_current_state(TASK_RUNNING
);
383 remove_wait_queue(&vcpu
->wq
, &wait
);
384 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
385 spin_unlock_bh(&vcpu
->arch
.local_int
.float_int
->lock
);
386 del_timer(&vcpu
->arch
.ckc_timer
);
390 void kvm_s390_idle_wakeup(unsigned long data
)
392 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
394 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
395 vcpu
->arch
.local_int
.timer_due
= 1;
396 if (waitqueue_active(&vcpu
->arch
.local_int
.wq
))
397 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
398 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
402 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
404 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
405 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
406 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
409 __reset_intercept_indicators(vcpu
);
410 if (atomic_read(&li
->active
)) {
413 spin_lock_bh(&li
->lock
);
414 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
415 if (__interrupt_is_deliverable(vcpu
, inti
)) {
416 list_del(&inti
->list
);
420 __set_intercept_indicator(vcpu
, inti
);
422 if (list_empty(&li
->list
))
423 atomic_set(&li
->active
, 0);
424 spin_unlock_bh(&li
->lock
);
426 __do_deliver_interrupt(vcpu
, inti
);
432 if ((vcpu
->arch
.sie_block
->ckc
<
433 get_clock() + vcpu
->arch
.sie_block
->epoch
))
434 __try_deliver_ckc_interrupt(vcpu
);
436 if (atomic_read(&fi
->active
)) {
439 spin_lock_bh(&fi
->lock
);
440 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
441 if (__interrupt_is_deliverable(vcpu
, inti
)) {
442 list_del(&inti
->list
);
446 __set_intercept_indicator(vcpu
, inti
);
448 if (list_empty(&fi
->list
))
449 atomic_set(&fi
->active
, 0);
450 spin_unlock_bh(&fi
->lock
);
452 __do_deliver_interrupt(vcpu
, inti
);
459 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
461 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
462 struct kvm_s390_interrupt_info
*inti
;
464 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
468 inti
->type
= KVM_S390_PROGRAM_INT
;;
469 inti
->pgm
.code
= code
;
471 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
472 spin_lock_bh(&li
->lock
);
473 list_add(&inti
->list
, &li
->list
);
474 atomic_set(&li
->active
, 1);
475 BUG_ON(waitqueue_active(&li
->wq
));
476 spin_unlock_bh(&li
->lock
);
480 int kvm_s390_inject_vm(struct kvm
*kvm
,
481 struct kvm_s390_interrupt
*s390int
)
483 struct kvm_s390_local_interrupt
*li
;
484 struct kvm_s390_float_interrupt
*fi
;
485 struct kvm_s390_interrupt_info
*inti
;
488 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
492 switch (s390int
->type
) {
493 case KVM_S390_INT_VIRTIO
:
494 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%lx",
495 s390int
->parm
, s390int
->parm64
);
496 inti
->type
= s390int
->type
;
497 inti
->ext
.ext_params
= s390int
->parm
;
498 inti
->ext
.ext_params2
= s390int
->parm64
;
500 case KVM_S390_INT_SERVICE
:
501 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
502 inti
->type
= s390int
->type
;
503 inti
->ext
.ext_params
= s390int
->parm
;
505 case KVM_S390_PROGRAM_INT
:
506 case KVM_S390_SIGP_STOP
:
507 case KVM_S390_INT_EMERGENCY
:
513 mutex_lock(&kvm
->lock
);
514 fi
= &kvm
->arch
.float_int
;
515 spin_lock_bh(&fi
->lock
);
516 list_add_tail(&inti
->list
, &fi
->list
);
517 atomic_set(&fi
->active
, 1);
518 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
519 if (sigcpu
== KVM_MAX_VCPUS
) {
521 sigcpu
= fi
->next_rr_cpu
++;
522 if (sigcpu
== KVM_MAX_VCPUS
)
523 sigcpu
= fi
->next_rr_cpu
= 0;
524 } while (fi
->local_int
[sigcpu
] == NULL
);
526 li
= fi
->local_int
[sigcpu
];
527 spin_lock_bh(&li
->lock
);
528 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
529 if (waitqueue_active(&li
->wq
))
530 wake_up_interruptible(&li
->wq
);
531 spin_unlock_bh(&li
->lock
);
532 spin_unlock_bh(&fi
->lock
);
533 mutex_unlock(&kvm
->lock
);
537 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
538 struct kvm_s390_interrupt
*s390int
)
540 struct kvm_s390_local_interrupt
*li
;
541 struct kvm_s390_interrupt_info
*inti
;
543 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
547 switch (s390int
->type
) {
548 case KVM_S390_PROGRAM_INT
:
549 if (s390int
->parm
& 0xffff0000) {
553 inti
->type
= s390int
->type
;
554 inti
->pgm
.code
= s390int
->parm
;
555 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
558 case KVM_S390_SIGP_STOP
:
559 case KVM_S390_RESTART
:
560 case KVM_S390_SIGP_SET_PREFIX
:
561 case KVM_S390_INT_EMERGENCY
:
562 VCPU_EVENT(vcpu
, 3, "inject: type %x", s390int
->type
);
563 inti
->type
= s390int
->type
;
565 case KVM_S390_INT_VIRTIO
:
566 case KVM_S390_INT_SERVICE
:
572 mutex_lock(&vcpu
->kvm
->lock
);
573 li
= &vcpu
->arch
.local_int
;
574 spin_lock_bh(&li
->lock
);
575 if (inti
->type
== KVM_S390_PROGRAM_INT
)
576 list_add(&inti
->list
, &li
->list
);
578 list_add_tail(&inti
->list
, &li
->list
);
579 atomic_set(&li
->active
, 1);
580 if (inti
->type
== KVM_S390_SIGP_STOP
)
581 li
->action_bits
|= ACTION_STOP_ON_STOP
;
582 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
583 if (waitqueue_active(&li
->wq
))
584 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
585 spin_unlock_bh(&li
->lock
);
586 mutex_unlock(&vcpu
->kvm
->lock
);