Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / arch / s390 / kvm / sigp.c
blob87c2b3a3bd3ed6f889c987de9610152959296a7b
1 /*
2 * handling interprocessor communication
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21 #include "trace.h"
23 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24 u64 *reg)
26 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
27 int rc;
29 if (cpu_addr >= KVM_MAX_VCPUS)
30 return SIGP_CC_NOT_OPERATIONAL;
32 spin_lock(&fi->lock);
33 if (fi->local_int[cpu_addr] == NULL)
34 rc = SIGP_CC_NOT_OPERATIONAL;
35 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
36 & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
37 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
38 else {
39 *reg &= 0xffffffff00000000UL;
40 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
41 & CPUSTAT_ECALL_PEND)
42 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
43 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
44 & CPUSTAT_STOPPED)
45 *reg |= SIGP_STATUS_STOPPED;
46 rc = SIGP_CC_STATUS_STORED;
48 spin_unlock(&fi->lock);
50 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
51 return rc;
54 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
56 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
57 struct kvm_s390_local_interrupt *li;
58 struct kvm_s390_interrupt_info *inti;
59 int rc;
61 if (cpu_addr >= KVM_MAX_VCPUS)
62 return SIGP_CC_NOT_OPERATIONAL;
64 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
65 if (!inti)
66 return -ENOMEM;
68 inti->type = KVM_S390_INT_EMERGENCY;
69 inti->emerg.code = vcpu->vcpu_id;
71 spin_lock(&fi->lock);
72 li = fi->local_int[cpu_addr];
73 if (li == NULL) {
74 rc = SIGP_CC_NOT_OPERATIONAL;
75 kfree(inti);
76 goto unlock;
78 spin_lock_bh(&li->lock);
79 list_add_tail(&inti->list, &li->list);
80 atomic_set(&li->active, 1);
81 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82 if (waitqueue_active(li->wq))
83 wake_up_interruptible(li->wq);
84 spin_unlock_bh(&li->lock);
85 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
87 unlock:
88 spin_unlock(&fi->lock);
89 return rc;
92 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
93 u16 asn, u64 *reg)
95 struct kvm_vcpu *dst_vcpu = NULL;
96 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
97 u16 p_asn, s_asn;
98 psw_t *psw;
99 u32 flags;
101 if (cpu_addr < KVM_MAX_VCPUS)
102 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
103 if (!dst_vcpu)
104 return SIGP_CC_NOT_OPERATIONAL;
105 flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
106 psw = &dst_vcpu->arch.sie_block->gpsw;
107 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
108 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
110 /* Deliver the emergency signal? */
111 if (!(flags & CPUSTAT_STOPPED)
112 || (psw->mask & psw_int_mask) != psw_int_mask
113 || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
114 || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
115 return __sigp_emergency(vcpu, cpu_addr);
116 } else {
117 *reg &= 0xffffffff00000000UL;
118 *reg |= SIGP_STATUS_INCORRECT_STATE;
119 return SIGP_CC_STATUS_STORED;
123 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
125 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
126 struct kvm_s390_local_interrupt *li;
127 struct kvm_s390_interrupt_info *inti;
128 int rc;
130 if (cpu_addr >= KVM_MAX_VCPUS)
131 return SIGP_CC_NOT_OPERATIONAL;
133 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
134 if (!inti)
135 return -ENOMEM;
137 inti->type = KVM_S390_INT_EXTERNAL_CALL;
138 inti->extcall.code = vcpu->vcpu_id;
140 spin_lock(&fi->lock);
141 li = fi->local_int[cpu_addr];
142 if (li == NULL) {
143 rc = SIGP_CC_NOT_OPERATIONAL;
144 kfree(inti);
145 goto unlock;
147 spin_lock_bh(&li->lock);
148 list_add_tail(&inti->list, &li->list);
149 atomic_set(&li->active, 1);
150 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
151 if (waitqueue_active(li->wq))
152 wake_up_interruptible(li->wq);
153 spin_unlock_bh(&li->lock);
154 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
155 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
156 unlock:
157 spin_unlock(&fi->lock);
158 return rc;
161 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
163 struct kvm_s390_interrupt_info *inti;
164 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
166 inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
167 if (!inti)
168 return -ENOMEM;
169 inti->type = KVM_S390_SIGP_STOP;
171 spin_lock_bh(&li->lock);
172 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
173 kfree(inti);
174 if ((action & ACTION_STORE_ON_STOP) != 0)
175 rc = -ESHUTDOWN;
176 goto out;
178 list_add_tail(&inti->list, &li->list);
179 atomic_set(&li->active, 1);
180 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
181 li->action_bits |= action;
182 if (waitqueue_active(li->wq))
183 wake_up_interruptible(li->wq);
184 out:
185 spin_unlock_bh(&li->lock);
187 return rc;
190 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
192 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
193 struct kvm_s390_local_interrupt *li;
194 int rc;
196 if (cpu_addr >= KVM_MAX_VCPUS)
197 return SIGP_CC_NOT_OPERATIONAL;
199 spin_lock(&fi->lock);
200 li = fi->local_int[cpu_addr];
201 if (li == NULL) {
202 rc = SIGP_CC_NOT_OPERATIONAL;
203 goto unlock;
206 rc = __inject_sigp_stop(li, action);
208 unlock:
209 spin_unlock(&fi->lock);
210 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
212 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
213 /* If the CPU has already been stopped, we still have
214 * to save the status when doing stop-and-store. This
215 * has to be done after unlocking all spinlocks. */
216 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
217 rc = kvm_s390_store_status_unloaded(dst_vcpu,
218 KVM_S390_STORE_STATUS_NOADDR);
221 return rc;
224 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
226 int rc;
228 switch (parameter & 0xff) {
229 case 0:
230 rc = SIGP_CC_NOT_OPERATIONAL;
231 break;
232 case 1:
233 case 2:
234 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
235 break;
236 default:
237 rc = -EOPNOTSUPP;
239 return rc;
242 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
243 u64 *reg)
245 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
246 struct kvm_s390_local_interrupt *li = NULL;
247 struct kvm_s390_interrupt_info *inti;
248 int rc;
249 u8 tmp;
251 /* make sure that the new value is valid memory */
252 address = address & 0x7fffe000u;
253 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
254 copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
255 *reg &= 0xffffffff00000000UL;
256 *reg |= SIGP_STATUS_INVALID_PARAMETER;
257 return SIGP_CC_STATUS_STORED;
260 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
261 if (!inti)
262 return SIGP_CC_BUSY;
264 spin_lock(&fi->lock);
265 if (cpu_addr < KVM_MAX_VCPUS)
266 li = fi->local_int[cpu_addr];
268 if (li == NULL) {
269 *reg &= 0xffffffff00000000UL;
270 *reg |= SIGP_STATUS_INCORRECT_STATE;
271 rc = SIGP_CC_STATUS_STORED;
272 kfree(inti);
273 goto out_fi;
276 spin_lock_bh(&li->lock);
277 /* cpu must be in stopped state */
278 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
279 *reg &= 0xffffffff00000000UL;
280 *reg |= SIGP_STATUS_INCORRECT_STATE;
281 rc = SIGP_CC_STATUS_STORED;
282 kfree(inti);
283 goto out_li;
286 inti->type = KVM_S390_SIGP_SET_PREFIX;
287 inti->prefix.address = address;
289 list_add_tail(&inti->list, &li->list);
290 atomic_set(&li->active, 1);
291 if (waitqueue_active(li->wq))
292 wake_up_interruptible(li->wq);
293 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
295 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
296 out_li:
297 spin_unlock_bh(&li->lock);
298 out_fi:
299 spin_unlock(&fi->lock);
300 return rc;
303 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
304 u32 addr, u64 *reg)
306 struct kvm_vcpu *dst_vcpu = NULL;
307 int flags;
308 int rc;
310 if (cpu_id < KVM_MAX_VCPUS)
311 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
312 if (!dst_vcpu)
313 return SIGP_CC_NOT_OPERATIONAL;
315 spin_lock_bh(&dst_vcpu->arch.local_int.lock);
316 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
317 spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
318 if (!(flags & CPUSTAT_STOPPED)) {
319 *reg &= 0xffffffff00000000UL;
320 *reg |= SIGP_STATUS_INCORRECT_STATE;
321 return SIGP_CC_STATUS_STORED;
324 addr &= 0x7ffffe00;
325 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
326 if (rc == -EFAULT) {
327 *reg &= 0xffffffff00000000UL;
328 *reg |= SIGP_STATUS_INVALID_PARAMETER;
329 rc = SIGP_CC_STATUS_STORED;
331 return rc;
334 static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
335 u64 *reg)
337 int rc;
338 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
340 if (cpu_addr >= KVM_MAX_VCPUS)
341 return SIGP_CC_NOT_OPERATIONAL;
343 spin_lock(&fi->lock);
344 if (fi->local_int[cpu_addr] == NULL)
345 rc = SIGP_CC_NOT_OPERATIONAL;
346 else {
347 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
348 & CPUSTAT_RUNNING) {
349 /* running */
350 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
351 } else {
352 /* not running */
353 *reg &= 0xffffffff00000000UL;
354 *reg |= SIGP_STATUS_NOT_RUNNING;
355 rc = SIGP_CC_STATUS_STORED;
358 spin_unlock(&fi->lock);
360 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
361 rc);
363 return rc;
366 /* Test whether the destination CPU is available and not busy */
367 static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
369 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
370 struct kvm_s390_local_interrupt *li;
371 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
373 if (cpu_addr >= KVM_MAX_VCPUS)
374 return SIGP_CC_NOT_OPERATIONAL;
376 spin_lock(&fi->lock);
377 li = fi->local_int[cpu_addr];
378 if (li == NULL) {
379 rc = SIGP_CC_NOT_OPERATIONAL;
380 goto out;
383 spin_lock_bh(&li->lock);
384 if (li->action_bits & ACTION_STOP_ON_STOP)
385 rc = SIGP_CC_BUSY;
386 spin_unlock_bh(&li->lock);
387 out:
388 spin_unlock(&fi->lock);
389 return rc;
392 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
394 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
395 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
396 u32 parameter;
397 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
398 u8 order_code;
399 int rc;
401 /* sigp in userspace can exit */
402 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
403 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
405 order_code = kvm_s390_get_base_disp_rs(vcpu);
407 if (r1 % 2)
408 parameter = vcpu->run->s.regs.gprs[r1];
409 else
410 parameter = vcpu->run->s.regs.gprs[r1 + 1];
412 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
413 switch (order_code) {
414 case SIGP_SENSE:
415 vcpu->stat.instruction_sigp_sense++;
416 rc = __sigp_sense(vcpu, cpu_addr,
417 &vcpu->run->s.regs.gprs[r1]);
418 break;
419 case SIGP_EXTERNAL_CALL:
420 vcpu->stat.instruction_sigp_external_call++;
421 rc = __sigp_external_call(vcpu, cpu_addr);
422 break;
423 case SIGP_EMERGENCY_SIGNAL:
424 vcpu->stat.instruction_sigp_emergency++;
425 rc = __sigp_emergency(vcpu, cpu_addr);
426 break;
427 case SIGP_STOP:
428 vcpu->stat.instruction_sigp_stop++;
429 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
430 break;
431 case SIGP_STOP_AND_STORE_STATUS:
432 vcpu->stat.instruction_sigp_stop++;
433 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
434 ACTION_STOP_ON_STOP);
435 break;
436 case SIGP_STORE_STATUS_AT_ADDRESS:
437 rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
438 &vcpu->run->s.regs.gprs[r1]);
439 break;
440 case SIGP_SET_ARCHITECTURE:
441 vcpu->stat.instruction_sigp_arch++;
442 rc = __sigp_set_arch(vcpu, parameter);
443 break;
444 case SIGP_SET_PREFIX:
445 vcpu->stat.instruction_sigp_prefix++;
446 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
447 &vcpu->run->s.regs.gprs[r1]);
448 break;
449 case SIGP_COND_EMERGENCY_SIGNAL:
450 rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
451 &vcpu->run->s.regs.gprs[r1]);
452 break;
453 case SIGP_SENSE_RUNNING:
454 vcpu->stat.instruction_sigp_sense_running++;
455 rc = __sigp_sense_running(vcpu, cpu_addr,
456 &vcpu->run->s.regs.gprs[r1]);
457 break;
458 case SIGP_START:
459 rc = sigp_check_callable(vcpu, cpu_addr);
460 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
461 rc = -EOPNOTSUPP; /* Handle START in user space */
462 break;
463 case SIGP_RESTART:
464 vcpu->stat.instruction_sigp_restart++;
465 rc = sigp_check_callable(vcpu, cpu_addr);
466 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
467 VCPU_EVENT(vcpu, 4,
468 "sigp restart %x to handle userspace",
469 cpu_addr);
470 /* user space must know about restart */
471 rc = -EOPNOTSUPP;
473 break;
474 default:
475 return -EOPNOTSUPP;
478 if (rc < 0)
479 return rc;
481 kvm_s390_set_psw_cc(vcpu, rc);
482 return 0;