PRCM: 34XX: Fix wrong shift value used in dpll4_m4x2_ck enable bit
[linux-ginger.git] / arch / s390 / kvm / interrupt.c
blob84a7fed4cd4e1462199cf68733db2196dbc2df69
1 /*
2 * interrupt.c - handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <asm/lowcore.h>
14 #include <asm/uaccess.h>
15 #include <linux/kvm_host.h>
16 #include "kvm-s390.h"
17 #include "gaccess.h"
19 static int psw_extint_disabled(struct kvm_vcpu *vcpu)
21 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
24 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
26 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
27 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
28 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
29 return 0;
30 return 1;
33 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
34 struct interrupt_info *inti)
36 switch (inti->type) {
37 case KVM_S390_INT_EMERGENCY:
38 if (psw_extint_disabled(vcpu))
39 return 0;
40 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
41 return 1;
42 return 0;
43 case KVM_S390_INT_SERVICE:
44 if (psw_extint_disabled(vcpu))
45 return 0;
46 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
47 return 1;
48 return 0;
49 case KVM_S390_INT_VIRTIO:
50 if (psw_extint_disabled(vcpu))
51 return 0;
52 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
53 return 1;
54 return 0;
55 case KVM_S390_PROGRAM_INT:
56 case KVM_S390_SIGP_STOP:
57 case KVM_S390_SIGP_SET_PREFIX:
58 case KVM_S390_RESTART:
59 return 1;
60 default:
61 BUG();
63 return 0;
66 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
68 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
69 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
70 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
73 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
75 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
76 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
77 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
80 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
82 atomic_clear_mask(CPUSTAT_ECALL_PEND |
83 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
84 &vcpu->arch.sie_block->cpuflags);
85 vcpu->arch.sie_block->lctl = 0x0000;
88 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
90 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
93 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
94 struct interrupt_info *inti)
96 switch (inti->type) {
97 case KVM_S390_INT_EMERGENCY:
98 case KVM_S390_INT_SERVICE:
99 case KVM_S390_INT_VIRTIO:
100 if (psw_extint_disabled(vcpu))
101 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
102 else
103 vcpu->arch.sie_block->lctl |= LCTL_CR0;
104 break;
105 case KVM_S390_SIGP_STOP:
106 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
107 break;
108 default:
109 BUG();
113 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
114 struct interrupt_info *inti)
116 const unsigned short table[] = { 2, 4, 4, 6 };
117 int rc, exception = 0;
119 switch (inti->type) {
120 case KVM_S390_INT_EMERGENCY:
121 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
122 vcpu->stat.deliver_emergency_signal++;
123 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
124 if (rc == -EFAULT)
125 exception = 1;
127 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
128 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
129 if (rc == -EFAULT)
130 exception = 1;
132 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
133 __LC_EXT_NEW_PSW, sizeof(psw_t));
134 if (rc == -EFAULT)
135 exception = 1;
136 break;
138 case KVM_S390_INT_SERVICE:
139 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
140 inti->ext.ext_params);
141 vcpu->stat.deliver_service_signal++;
142 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
143 if (rc == -EFAULT)
144 exception = 1;
146 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
147 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
148 if (rc == -EFAULT)
149 exception = 1;
151 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
152 __LC_EXT_NEW_PSW, sizeof(psw_t));
153 if (rc == -EFAULT)
154 exception = 1;
156 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
157 if (rc == -EFAULT)
158 exception = 1;
159 break;
161 case KVM_S390_INT_VIRTIO:
162 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
163 inti->ext.ext_params, inti->ext.ext_params2);
164 vcpu->stat.deliver_virtio_interrupt++;
165 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
166 if (rc == -EFAULT)
167 exception = 1;
169 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
170 if (rc == -EFAULT)
171 exception = 1;
173 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
174 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
175 if (rc == -EFAULT)
176 exception = 1;
178 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
179 __LC_EXT_NEW_PSW, sizeof(psw_t));
180 if (rc == -EFAULT)
181 exception = 1;
183 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
184 if (rc == -EFAULT)
185 exception = 1;
187 rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM,
188 inti->ext.ext_params2);
189 if (rc == -EFAULT)
190 exception = 1;
191 break;
193 case KVM_S390_SIGP_STOP:
194 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
195 vcpu->stat.deliver_stop_signal++;
196 __set_intercept_indicator(vcpu, inti);
197 break;
199 case KVM_S390_SIGP_SET_PREFIX:
200 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
201 inti->prefix.address);
202 vcpu->stat.deliver_prefix_signal++;
203 vcpu->arch.sie_block->prefix = inti->prefix.address;
204 vcpu->arch.sie_block->ihcpu = 0xffff;
205 break;
207 case KVM_S390_RESTART:
208 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
209 vcpu->stat.deliver_restart_signal++;
210 rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
211 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
212 if (rc == -EFAULT)
213 exception = 1;
215 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
216 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
217 if (rc == -EFAULT)
218 exception = 1;
219 break;
221 case KVM_S390_PROGRAM_INT:
222 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
223 inti->pgm.code,
224 table[vcpu->arch.sie_block->ipa >> 14]);
225 vcpu->stat.deliver_program_int++;
226 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
227 if (rc == -EFAULT)
228 exception = 1;
230 rc = put_guest_u16(vcpu, __LC_PGM_ILC,
231 table[vcpu->arch.sie_block->ipa >> 14]);
232 if (rc == -EFAULT)
233 exception = 1;
235 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
236 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
237 if (rc == -EFAULT)
238 exception = 1;
240 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
241 __LC_PGM_NEW_PSW, sizeof(psw_t));
242 if (rc == -EFAULT)
243 exception = 1;
244 break;
246 default:
247 BUG();
250 if (exception) {
251 VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering"
252 " interrupt");
253 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
254 if (inti->type == KVM_S390_PROGRAM_INT) {
255 printk(KERN_WARNING "kvm: recursive program check\n");
256 BUG();
261 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
263 int rc, exception = 0;
265 if (psw_extint_disabled(vcpu))
266 return 0;
267 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
268 return 0;
269 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
270 if (rc == -EFAULT)
271 exception = 1;
272 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
273 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
274 if (rc == -EFAULT)
275 exception = 1;
276 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
277 __LC_EXT_NEW_PSW, sizeof(psw_t));
278 if (rc == -EFAULT)
279 exception = 1;
281 if (exception) {
282 VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \
283 " ckc interrupt");
284 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
285 return 0;
288 return 1;
291 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
293 struct local_interrupt *li = &vcpu->arch.local_int;
294 struct float_interrupt *fi = vcpu->arch.local_int.float_int;
295 struct interrupt_info *inti;
296 int rc = 0;
298 if (atomic_read(&li->active)) {
299 spin_lock_bh(&li->lock);
300 list_for_each_entry(inti, &li->list, list)
301 if (__interrupt_is_deliverable(vcpu, inti)) {
302 rc = 1;
303 break;
305 spin_unlock_bh(&li->lock);
308 if ((!rc) && atomic_read(&fi->active)) {
309 spin_lock_bh(&fi->lock);
310 list_for_each_entry(inti, &fi->list, list)
311 if (__interrupt_is_deliverable(vcpu, inti)) {
312 rc = 1;
313 break;
315 spin_unlock_bh(&fi->lock);
318 if ((!rc) && (vcpu->arch.sie_block->ckc <
319 get_clock() + vcpu->arch.sie_block->epoch)) {
320 if ((!psw_extint_disabled(vcpu)) &&
321 (vcpu->arch.sie_block->gcr[0] & 0x800ul))
322 rc = 1;
325 return rc;
328 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
330 return 0;
333 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
335 u64 now, sltime;
336 DECLARE_WAITQUEUE(wait, current);
338 vcpu->stat.exit_wait_state++;
339 if (kvm_cpu_has_interrupt(vcpu))
340 return 0;
342 __set_cpu_idle(vcpu);
343 spin_lock_bh(&vcpu->arch.local_int.lock);
344 vcpu->arch.local_int.timer_due = 0;
345 spin_unlock_bh(&vcpu->arch.local_int.lock);
347 if (psw_interrupts_disabled(vcpu)) {
348 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
349 __unset_cpu_idle(vcpu);
350 return -ENOTSUPP; /* disabled wait */
353 if (psw_extint_disabled(vcpu) ||
354 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
355 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
356 goto no_timer;
359 now = get_clock() + vcpu->arch.sie_block->epoch;
360 if (vcpu->arch.sie_block->ckc < now) {
361 __unset_cpu_idle(vcpu);
362 return 0;
365 sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1;
367 vcpu->arch.ckc_timer.expires = jiffies + sltime;
369 add_timer(&vcpu->arch.ckc_timer);
370 VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
371 no_timer:
372 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
373 spin_lock_bh(&vcpu->arch.local_int.lock);
374 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
375 while (list_empty(&vcpu->arch.local_int.list) &&
376 list_empty(&vcpu->arch.local_int.float_int->list) &&
377 (!vcpu->arch.local_int.timer_due) &&
378 !signal_pending(current)) {
379 set_current_state(TASK_INTERRUPTIBLE);
380 spin_unlock_bh(&vcpu->arch.local_int.lock);
381 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
382 vcpu_put(vcpu);
383 schedule();
384 vcpu_load(vcpu);
385 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
386 spin_lock_bh(&vcpu->arch.local_int.lock);
388 __unset_cpu_idle(vcpu);
389 __set_current_state(TASK_RUNNING);
390 remove_wait_queue(&vcpu->wq, &wait);
391 spin_unlock_bh(&vcpu->arch.local_int.lock);
392 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
393 del_timer(&vcpu->arch.ckc_timer);
394 return 0;
397 void kvm_s390_idle_wakeup(unsigned long data)
399 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
401 spin_lock_bh(&vcpu->arch.local_int.lock);
402 vcpu->arch.local_int.timer_due = 1;
403 if (waitqueue_active(&vcpu->arch.local_int.wq))
404 wake_up_interruptible(&vcpu->arch.local_int.wq);
405 spin_unlock_bh(&vcpu->arch.local_int.lock);
409 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
411 struct local_interrupt *li = &vcpu->arch.local_int;
412 struct float_interrupt *fi = vcpu->arch.local_int.float_int;
413 struct interrupt_info *n, *inti = NULL;
414 int deliver;
416 __reset_intercept_indicators(vcpu);
417 if (atomic_read(&li->active)) {
418 do {
419 deliver = 0;
420 spin_lock_bh(&li->lock);
421 list_for_each_entry_safe(inti, n, &li->list, list) {
422 if (__interrupt_is_deliverable(vcpu, inti)) {
423 list_del(&inti->list);
424 deliver = 1;
425 break;
427 __set_intercept_indicator(vcpu, inti);
429 if (list_empty(&li->list))
430 atomic_set(&li->active, 0);
431 spin_unlock_bh(&li->lock);
432 if (deliver) {
433 __do_deliver_interrupt(vcpu, inti);
434 kfree(inti);
436 } while (deliver);
439 if ((vcpu->arch.sie_block->ckc <
440 get_clock() + vcpu->arch.sie_block->epoch))
441 __try_deliver_ckc_interrupt(vcpu);
443 if (atomic_read(&fi->active)) {
444 do {
445 deliver = 0;
446 spin_lock_bh(&fi->lock);
447 list_for_each_entry_safe(inti, n, &fi->list, list) {
448 if (__interrupt_is_deliverable(vcpu, inti)) {
449 list_del(&inti->list);
450 deliver = 1;
451 break;
453 __set_intercept_indicator(vcpu, inti);
455 if (list_empty(&fi->list))
456 atomic_set(&fi->active, 0);
457 spin_unlock_bh(&fi->lock);
458 if (deliver) {
459 __do_deliver_interrupt(vcpu, inti);
460 kfree(inti);
462 } while (deliver);
466 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
468 struct local_interrupt *li = &vcpu->arch.local_int;
469 struct interrupt_info *inti;
471 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
472 if (!inti)
473 return -ENOMEM;
475 inti->type = KVM_S390_PROGRAM_INT;;
476 inti->pgm.code = code;
478 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
479 spin_lock_bh(&li->lock);
480 list_add(&inti->list, &li->list);
481 atomic_set(&li->active, 1);
482 BUG_ON(waitqueue_active(&li->wq));
483 spin_unlock_bh(&li->lock);
484 return 0;
487 int kvm_s390_inject_vm(struct kvm *kvm,
488 struct kvm_s390_interrupt *s390int)
490 struct local_interrupt *li;
491 struct float_interrupt *fi;
492 struct interrupt_info *inti;
493 int sigcpu;
495 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
496 if (!inti)
497 return -ENOMEM;
499 switch (s390int->type) {
500 case KVM_S390_INT_VIRTIO:
501 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
502 s390int->parm, s390int->parm64);
503 inti->type = s390int->type;
504 inti->ext.ext_params = s390int->parm;
505 inti->ext.ext_params2 = s390int->parm64;
506 break;
507 case KVM_S390_INT_SERVICE:
508 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
509 inti->type = s390int->type;
510 inti->ext.ext_params = s390int->parm;
511 break;
512 case KVM_S390_PROGRAM_INT:
513 case KVM_S390_SIGP_STOP:
514 case KVM_S390_INT_EMERGENCY:
515 default:
516 kfree(inti);
517 return -EINVAL;
520 mutex_lock(&kvm->lock);
521 fi = &kvm->arch.float_int;
522 spin_lock_bh(&fi->lock);
523 list_add_tail(&inti->list, &fi->list);
524 atomic_set(&fi->active, 1);
525 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
526 if (sigcpu == KVM_MAX_VCPUS) {
527 do {
528 sigcpu = fi->next_rr_cpu++;
529 if (sigcpu == KVM_MAX_VCPUS)
530 sigcpu = fi->next_rr_cpu = 0;
531 } while (fi->local_int[sigcpu] == NULL);
533 li = fi->local_int[sigcpu];
534 spin_lock_bh(&li->lock);
535 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
536 if (waitqueue_active(&li->wq))
537 wake_up_interruptible(&li->wq);
538 spin_unlock_bh(&li->lock);
539 spin_unlock_bh(&fi->lock);
540 mutex_unlock(&kvm->lock);
541 return 0;
544 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
545 struct kvm_s390_interrupt *s390int)
547 struct local_interrupt *li;
548 struct interrupt_info *inti;
550 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
551 if (!inti)
552 return -ENOMEM;
554 switch (s390int->type) {
555 case KVM_S390_PROGRAM_INT:
556 if (s390int->parm & 0xffff0000) {
557 kfree(inti);
558 return -EINVAL;
560 inti->type = s390int->type;
561 inti->pgm.code = s390int->parm;
562 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
563 s390int->parm);
564 break;
565 case KVM_S390_SIGP_STOP:
566 case KVM_S390_RESTART:
567 case KVM_S390_SIGP_SET_PREFIX:
568 case KVM_S390_INT_EMERGENCY:
569 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
570 inti->type = s390int->type;
571 break;
572 case KVM_S390_INT_VIRTIO:
573 case KVM_S390_INT_SERVICE:
574 default:
575 kfree(inti);
576 return -EINVAL;
579 mutex_lock(&vcpu->kvm->lock);
580 li = &vcpu->arch.local_int;
581 spin_lock_bh(&li->lock);
582 if (inti->type == KVM_S390_PROGRAM_INT)
583 list_add(&inti->list, &li->list);
584 else
585 list_add_tail(&inti->list, &li->list);
586 atomic_set(&li->active, 1);
587 if (inti->type == KVM_S390_SIGP_STOP)
588 li->action_bits |= ACTION_STOP_ON_STOP;
589 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
590 if (waitqueue_active(&li->wq))
591 wake_up_interruptible(&vcpu->arch.local_int.wq);
592 spin_unlock_bh(&li->lock);
593 mutex_unlock(&vcpu->kvm->lock);
594 return 0;