2 * handling interprocessor communication
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
23 static int __sigp_sense(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
26 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
29 if (cpu_addr
>= KVM_MAX_VCPUS
)
30 return SIGP_CC_NOT_OPERATIONAL
;
33 if (fi
->local_int
[cpu_addr
] == NULL
)
34 rc
= SIGP_CC_NOT_OPERATIONAL
;
35 else if (!(atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
36 & (CPUSTAT_ECALL_PEND
| CPUSTAT_STOPPED
)))
37 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
39 *reg
&= 0xffffffff00000000UL
;
40 if (atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
42 *reg
|= SIGP_STATUS_EXT_CALL_PENDING
;
43 if (atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
45 *reg
|= SIGP_STATUS_STOPPED
;
46 rc
= SIGP_CC_STATUS_STORED
;
48 spin_unlock(&fi
->lock
);
50 VCPU_EVENT(vcpu
, 4, "sensed status of cpu %x rc %x", cpu_addr
, rc
);
54 static int __sigp_emergency(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
56 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
57 struct kvm_s390_local_interrupt
*li
;
58 struct kvm_s390_interrupt_info
*inti
;
61 if (cpu_addr
>= KVM_MAX_VCPUS
)
62 return SIGP_CC_NOT_OPERATIONAL
;
64 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
68 inti
->type
= KVM_S390_INT_EMERGENCY
;
69 inti
->emerg
.code
= vcpu
->vcpu_id
;
72 li
= fi
->local_int
[cpu_addr
];
74 rc
= SIGP_CC_NOT_OPERATIONAL
;
78 spin_lock_bh(&li
->lock
);
79 list_add_tail(&inti
->list
, &li
->list
);
80 atomic_set(&li
->active
, 1);
81 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
82 if (waitqueue_active(li
->wq
))
83 wake_up_interruptible(li
->wq
);
84 spin_unlock_bh(&li
->lock
);
85 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
86 VCPU_EVENT(vcpu
, 4, "sent sigp emerg to cpu %x", cpu_addr
);
88 spin_unlock(&fi
->lock
);
92 static int __sigp_conditional_emergency(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
95 struct kvm_vcpu
*dst_vcpu
= NULL
;
96 const u64 psw_int_mask
= PSW_MASK_IO
| PSW_MASK_EXT
;
101 if (cpu_addr
< KVM_MAX_VCPUS
)
102 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
104 return SIGP_CC_NOT_OPERATIONAL
;
105 flags
= atomic_read(&dst_vcpu
->arch
.sie_block
->cpuflags
);
106 psw
= &dst_vcpu
->arch
.sie_block
->gpsw
;
107 p_asn
= dst_vcpu
->arch
.sie_block
->gcr
[4] & 0xffff; /* Primary ASN */
108 s_asn
= dst_vcpu
->arch
.sie_block
->gcr
[3] & 0xffff; /* Secondary ASN */
110 /* Deliver the emergency signal? */
111 if (!(flags
& CPUSTAT_STOPPED
)
112 || (psw
->mask
& psw_int_mask
) != psw_int_mask
113 || ((flags
& CPUSTAT_WAIT
) && psw
->addr
!= 0)
114 || (!(flags
& CPUSTAT_WAIT
) && (asn
== p_asn
|| asn
== s_asn
))) {
115 return __sigp_emergency(vcpu
, cpu_addr
);
117 *reg
&= 0xffffffff00000000UL
;
118 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
119 return SIGP_CC_STATUS_STORED
;
123 static int __sigp_external_call(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
125 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
126 struct kvm_s390_local_interrupt
*li
;
127 struct kvm_s390_interrupt_info
*inti
;
130 if (cpu_addr
>= KVM_MAX_VCPUS
)
131 return SIGP_CC_NOT_OPERATIONAL
;
133 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
137 inti
->type
= KVM_S390_INT_EXTERNAL_CALL
;
138 inti
->extcall
.code
= vcpu
->vcpu_id
;
140 spin_lock(&fi
->lock
);
141 li
= fi
->local_int
[cpu_addr
];
143 rc
= SIGP_CC_NOT_OPERATIONAL
;
147 spin_lock_bh(&li
->lock
);
148 list_add_tail(&inti
->list
, &li
->list
);
149 atomic_set(&li
->active
, 1);
150 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
151 if (waitqueue_active(li
->wq
))
152 wake_up_interruptible(li
->wq
);
153 spin_unlock_bh(&li
->lock
);
154 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
155 VCPU_EVENT(vcpu
, 4, "sent sigp ext call to cpu %x", cpu_addr
);
157 spin_unlock(&fi
->lock
);
161 static int __inject_sigp_stop(struct kvm_s390_local_interrupt
*li
, int action
)
163 struct kvm_s390_interrupt_info
*inti
;
164 int rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
166 inti
= kzalloc(sizeof(*inti
), GFP_ATOMIC
);
169 inti
->type
= KVM_S390_SIGP_STOP
;
171 spin_lock_bh(&li
->lock
);
172 if ((atomic_read(li
->cpuflags
) & CPUSTAT_STOPPED
)) {
174 if ((action
& ACTION_STORE_ON_STOP
) != 0)
178 list_add_tail(&inti
->list
, &li
->list
);
179 atomic_set(&li
->active
, 1);
180 atomic_set_mask(CPUSTAT_STOP_INT
, li
->cpuflags
);
181 li
->action_bits
|= action
;
182 if (waitqueue_active(li
->wq
))
183 wake_up_interruptible(li
->wq
);
185 spin_unlock_bh(&li
->lock
);
190 static int __sigp_stop(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, int action
)
192 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
193 struct kvm_s390_local_interrupt
*li
;
196 if (cpu_addr
>= KVM_MAX_VCPUS
)
197 return SIGP_CC_NOT_OPERATIONAL
;
199 spin_lock(&fi
->lock
);
200 li
= fi
->local_int
[cpu_addr
];
202 rc
= SIGP_CC_NOT_OPERATIONAL
;
206 rc
= __inject_sigp_stop(li
, action
);
209 spin_unlock(&fi
->lock
);
210 VCPU_EVENT(vcpu
, 4, "sent sigp stop to cpu %x", cpu_addr
);
212 if ((action
& ACTION_STORE_ON_STOP
) != 0 && rc
== -ESHUTDOWN
) {
213 /* If the CPU has already been stopped, we still have
214 * to save the status when doing stop-and-store. This
215 * has to be done after unlocking all spinlocks. */
216 struct kvm_vcpu
*dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
217 rc
= kvm_s390_store_status_unloaded(dst_vcpu
,
218 KVM_S390_STORE_STATUS_NOADDR
);
224 static int __sigp_set_arch(struct kvm_vcpu
*vcpu
, u32 parameter
)
228 switch (parameter
& 0xff) {
230 rc
= SIGP_CC_NOT_OPERATIONAL
;
234 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
242 static int __sigp_set_prefix(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, u32 address
,
245 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
246 struct kvm_s390_local_interrupt
*li
= NULL
;
247 struct kvm_s390_interrupt_info
*inti
;
251 /* make sure that the new value is valid memory */
252 address
= address
& 0x7fffe000u
;
253 if (copy_from_guest_absolute(vcpu
, &tmp
, address
, 1) ||
254 copy_from_guest_absolute(vcpu
, &tmp
, address
+ PAGE_SIZE
, 1)) {
255 *reg
&= 0xffffffff00000000UL
;
256 *reg
|= SIGP_STATUS_INVALID_PARAMETER
;
257 return SIGP_CC_STATUS_STORED
;
260 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
264 spin_lock(&fi
->lock
);
265 if (cpu_addr
< KVM_MAX_VCPUS
)
266 li
= fi
->local_int
[cpu_addr
];
269 *reg
&= 0xffffffff00000000UL
;
270 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
271 rc
= SIGP_CC_STATUS_STORED
;
276 spin_lock_bh(&li
->lock
);
277 /* cpu must be in stopped state */
278 if (!(atomic_read(li
->cpuflags
) & CPUSTAT_STOPPED
)) {
279 *reg
&= 0xffffffff00000000UL
;
280 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
281 rc
= SIGP_CC_STATUS_STORED
;
286 inti
->type
= KVM_S390_SIGP_SET_PREFIX
;
287 inti
->prefix
.address
= address
;
289 list_add_tail(&inti
->list
, &li
->list
);
290 atomic_set(&li
->active
, 1);
291 if (waitqueue_active(li
->wq
))
292 wake_up_interruptible(li
->wq
);
293 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
295 VCPU_EVENT(vcpu
, 4, "set prefix of cpu %02x to %x", cpu_addr
, address
);
297 spin_unlock_bh(&li
->lock
);
299 spin_unlock(&fi
->lock
);
303 static int __sigp_store_status_at_addr(struct kvm_vcpu
*vcpu
, u16 cpu_id
,
306 struct kvm_vcpu
*dst_vcpu
= NULL
;
310 if (cpu_id
< KVM_MAX_VCPUS
)
311 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_id
);
313 return SIGP_CC_NOT_OPERATIONAL
;
315 spin_lock_bh(&dst_vcpu
->arch
.local_int
.lock
);
316 flags
= atomic_read(dst_vcpu
->arch
.local_int
.cpuflags
);
317 spin_unlock_bh(&dst_vcpu
->arch
.local_int
.lock
);
318 if (!(flags
& CPUSTAT_STOPPED
)) {
319 *reg
&= 0xffffffff00000000UL
;
320 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
321 return SIGP_CC_STATUS_STORED
;
325 rc
= kvm_s390_store_status_unloaded(dst_vcpu
, addr
);
327 *reg
&= 0xffffffff00000000UL
;
328 *reg
|= SIGP_STATUS_INVALID_PARAMETER
;
329 rc
= SIGP_CC_STATUS_STORED
;
334 static int __sigp_sense_running(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
338 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
340 if (cpu_addr
>= KVM_MAX_VCPUS
)
341 return SIGP_CC_NOT_OPERATIONAL
;
343 spin_lock(&fi
->lock
);
344 if (fi
->local_int
[cpu_addr
] == NULL
)
345 rc
= SIGP_CC_NOT_OPERATIONAL
;
347 if (atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
350 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
353 *reg
&= 0xffffffff00000000UL
;
354 *reg
|= SIGP_STATUS_NOT_RUNNING
;
355 rc
= SIGP_CC_STATUS_STORED
;
358 spin_unlock(&fi
->lock
);
360 VCPU_EVENT(vcpu
, 4, "sensed running status of cpu %x rc %x", cpu_addr
,
366 /* Test whether the destination CPU is available and not busy */
367 static int sigp_check_callable(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
369 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
370 struct kvm_s390_local_interrupt
*li
;
371 int rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
373 if (cpu_addr
>= KVM_MAX_VCPUS
)
374 return SIGP_CC_NOT_OPERATIONAL
;
376 spin_lock(&fi
->lock
);
377 li
= fi
->local_int
[cpu_addr
];
379 rc
= SIGP_CC_NOT_OPERATIONAL
;
383 spin_lock_bh(&li
->lock
);
384 if (li
->action_bits
& ACTION_STOP_ON_STOP
)
386 spin_unlock_bh(&li
->lock
);
388 spin_unlock(&fi
->lock
);
392 int kvm_s390_handle_sigp(struct kvm_vcpu
*vcpu
)
394 int r1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
395 int r3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
397 u16 cpu_addr
= vcpu
->run
->s
.regs
.gprs
[r3
];
401 /* sigp in userspace can exit */
402 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
403 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
405 order_code
= kvm_s390_get_base_disp_rs(vcpu
);
408 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
];
410 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
+ 1];
412 trace_kvm_s390_handle_sigp(vcpu
, order_code
, cpu_addr
, parameter
);
413 switch (order_code
) {
415 vcpu
->stat
.instruction_sigp_sense
++;
416 rc
= __sigp_sense(vcpu
, cpu_addr
,
417 &vcpu
->run
->s
.regs
.gprs
[r1
]);
419 case SIGP_EXTERNAL_CALL
:
420 vcpu
->stat
.instruction_sigp_external_call
++;
421 rc
= __sigp_external_call(vcpu
, cpu_addr
);
423 case SIGP_EMERGENCY_SIGNAL
:
424 vcpu
->stat
.instruction_sigp_emergency
++;
425 rc
= __sigp_emergency(vcpu
, cpu_addr
);
428 vcpu
->stat
.instruction_sigp_stop
++;
429 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STOP_ON_STOP
);
431 case SIGP_STOP_AND_STORE_STATUS
:
432 vcpu
->stat
.instruction_sigp_stop
++;
433 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STORE_ON_STOP
|
434 ACTION_STOP_ON_STOP
);
436 case SIGP_STORE_STATUS_AT_ADDRESS
:
437 rc
= __sigp_store_status_at_addr(vcpu
, cpu_addr
, parameter
,
438 &vcpu
->run
->s
.regs
.gprs
[r1
]);
440 case SIGP_SET_ARCHITECTURE
:
441 vcpu
->stat
.instruction_sigp_arch
++;
442 rc
= __sigp_set_arch(vcpu
, parameter
);
444 case SIGP_SET_PREFIX
:
445 vcpu
->stat
.instruction_sigp_prefix
++;
446 rc
= __sigp_set_prefix(vcpu
, cpu_addr
, parameter
,
447 &vcpu
->run
->s
.regs
.gprs
[r1
]);
449 case SIGP_COND_EMERGENCY_SIGNAL
:
450 rc
= __sigp_conditional_emergency(vcpu
, cpu_addr
, parameter
,
451 &vcpu
->run
->s
.regs
.gprs
[r1
]);
453 case SIGP_SENSE_RUNNING
:
454 vcpu
->stat
.instruction_sigp_sense_running
++;
455 rc
= __sigp_sense_running(vcpu
, cpu_addr
,
456 &vcpu
->run
->s
.regs
.gprs
[r1
]);
459 rc
= sigp_check_callable(vcpu
, cpu_addr
);
460 if (rc
== SIGP_CC_ORDER_CODE_ACCEPTED
)
461 rc
= -EOPNOTSUPP
; /* Handle START in user space */
464 vcpu
->stat
.instruction_sigp_restart
++;
465 rc
= sigp_check_callable(vcpu
, cpu_addr
);
466 if (rc
== SIGP_CC_ORDER_CODE_ACCEPTED
) {
468 "sigp restart %x to handle userspace",
470 /* user space must know about restart */
481 kvm_s390_set_psw_cc(vcpu
, rc
);