2 * handling interprocessor communication
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
23 static int __sigp_sense(struct kvm_vcpu
*vcpu
, struct kvm_vcpu
*dst_vcpu
,
26 struct kvm_s390_local_interrupt
*li
;
31 li
= &dst_vcpu
->arch
.local_int
;
33 cpuflags
= atomic_read(li
->cpuflags
);
34 ext_call_pending
= kvm_s390_ext_call_pending(dst_vcpu
);
35 if (!(cpuflags
& CPUSTAT_STOPPED
) && !ext_call_pending
)
36 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
38 *reg
&= 0xffffffff00000000UL
;
40 *reg
|= SIGP_STATUS_EXT_CALL_PENDING
;
41 if (cpuflags
& CPUSTAT_STOPPED
)
42 *reg
|= SIGP_STATUS_STOPPED
;
43 rc
= SIGP_CC_STATUS_STORED
;
46 VCPU_EVENT(vcpu
, 4, "sensed status of cpu %x rc %x", dst_vcpu
->vcpu_id
,
51 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
52 struct kvm_vcpu
*dst_vcpu
)
54 struct kvm_s390_irq irq
= {
55 .type
= KVM_S390_INT_EMERGENCY
,
56 .u
.emerg
.code
= vcpu
->vcpu_id
,
60 rc
= kvm_s390_inject_vcpu(dst_vcpu
, &irq
);
62 VCPU_EVENT(vcpu
, 4, "sent sigp emerg to cpu %x",
65 return rc
? rc
: SIGP_CC_ORDER_CODE_ACCEPTED
;
68 static int __sigp_emergency(struct kvm_vcpu
*vcpu
, struct kvm_vcpu
*dst_vcpu
)
70 return __inject_sigp_emergency(vcpu
, dst_vcpu
);
73 static int __sigp_conditional_emergency(struct kvm_vcpu
*vcpu
,
74 struct kvm_vcpu
*dst_vcpu
,
77 const u64 psw_int_mask
= PSW_MASK_IO
| PSW_MASK_EXT
;
82 flags
= atomic_read(&dst_vcpu
->arch
.sie_block
->cpuflags
);
83 psw
= &dst_vcpu
->arch
.sie_block
->gpsw
;
84 p_asn
= dst_vcpu
->arch
.sie_block
->gcr
[4] & 0xffff; /* Primary ASN */
85 s_asn
= dst_vcpu
->arch
.sie_block
->gcr
[3] & 0xffff; /* Secondary ASN */
87 /* Inject the emergency signal? */
88 if (!(flags
& CPUSTAT_STOPPED
)
89 || (psw
->mask
& psw_int_mask
) != psw_int_mask
90 || ((flags
& CPUSTAT_WAIT
) && psw
->addr
!= 0)
91 || (!(flags
& CPUSTAT_WAIT
) && (asn
== p_asn
|| asn
== s_asn
))) {
92 return __inject_sigp_emergency(vcpu
, dst_vcpu
);
94 *reg
&= 0xffffffff00000000UL
;
95 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
96 return SIGP_CC_STATUS_STORED
;
100 static int __sigp_external_call(struct kvm_vcpu
*vcpu
,
101 struct kvm_vcpu
*dst_vcpu
, u64
*reg
)
103 struct kvm_s390_irq irq
= {
104 .type
= KVM_S390_INT_EXTERNAL_CALL
,
105 .u
.extcall
.code
= vcpu
->vcpu_id
,
109 rc
= kvm_s390_inject_vcpu(dst_vcpu
, &irq
);
111 *reg
&= 0xffffffff00000000UL
;
112 *reg
|= SIGP_STATUS_EXT_CALL_PENDING
;
113 return SIGP_CC_STATUS_STORED
;
114 } else if (rc
== 0) {
115 VCPU_EVENT(vcpu
, 4, "sent sigp ext call to cpu %x",
119 return rc
? rc
: SIGP_CC_ORDER_CODE_ACCEPTED
;
122 static int __sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_vcpu
*dst_vcpu
)
124 struct kvm_s390_irq irq
= {
125 .type
= KVM_S390_SIGP_STOP
,
129 rc
= kvm_s390_inject_vcpu(dst_vcpu
, &irq
);
133 VCPU_EVENT(vcpu
, 4, "sent sigp stop to cpu %x",
139 static int __sigp_stop_and_store_status(struct kvm_vcpu
*vcpu
,
140 struct kvm_vcpu
*dst_vcpu
, u64
*reg
)
142 struct kvm_s390_irq irq
= {
143 .type
= KVM_S390_SIGP_STOP
,
144 .u
.stop
.flags
= KVM_S390_STOP_FLAG_STORE_STATUS
,
148 rc
= kvm_s390_inject_vcpu(dst_vcpu
, &irq
);
152 VCPU_EVENT(vcpu
, 4, "sent sigp stop and store status to cpu %x",
158 static int __sigp_set_arch(struct kvm_vcpu
*vcpu
, u32 parameter
)
164 switch (parameter
& 0xff) {
166 rc
= SIGP_CC_NOT_OPERATIONAL
;
170 kvm_for_each_vcpu(i
, v
, vcpu
->kvm
) {
171 v
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
172 kvm_clear_async_pf_completion_queue(v
);
175 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
183 static int __sigp_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_vcpu
*dst_vcpu
,
184 u32 address
, u64
*reg
)
186 struct kvm_s390_irq irq
= {
187 .type
= KVM_S390_SIGP_SET_PREFIX
,
188 .u
.prefix
.address
= address
& 0x7fffe000u
,
193 * Make sure the new value is valid memory. We only need to check the
194 * first page, since address is 8k aligned and memory pieces are always
195 * at least 1MB aligned and have at least a size of 1MB.
197 if (kvm_is_error_gpa(vcpu
->kvm
, irq
.u
.prefix
.address
)) {
198 *reg
&= 0xffffffff00000000UL
;
199 *reg
|= SIGP_STATUS_INVALID_PARAMETER
;
200 return SIGP_CC_STATUS_STORED
;
203 rc
= kvm_s390_inject_vcpu(dst_vcpu
, &irq
);
205 *reg
&= 0xffffffff00000000UL
;
206 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
207 return SIGP_CC_STATUS_STORED
;
208 } else if (rc
== 0) {
209 VCPU_EVENT(vcpu
, 4, "set prefix of cpu %02x to %x",
210 dst_vcpu
->vcpu_id
, irq
.u
.prefix
.address
);
216 static int __sigp_store_status_at_addr(struct kvm_vcpu
*vcpu
,
217 struct kvm_vcpu
*dst_vcpu
,
223 flags
= atomic_read(dst_vcpu
->arch
.local_int
.cpuflags
);
224 if (!(flags
& CPUSTAT_STOPPED
)) {
225 *reg
&= 0xffffffff00000000UL
;
226 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
227 return SIGP_CC_STATUS_STORED
;
231 rc
= kvm_s390_store_status_unloaded(dst_vcpu
, addr
);
233 *reg
&= 0xffffffff00000000UL
;
234 *reg
|= SIGP_STATUS_INVALID_PARAMETER
;
235 rc
= SIGP_CC_STATUS_STORED
;
240 static int __sigp_sense_running(struct kvm_vcpu
*vcpu
,
241 struct kvm_vcpu
*dst_vcpu
, u64
*reg
)
243 struct kvm_s390_local_interrupt
*li
;
246 li
= &dst_vcpu
->arch
.local_int
;
247 if (atomic_read(li
->cpuflags
) & CPUSTAT_RUNNING
) {
249 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
252 *reg
&= 0xffffffff00000000UL
;
253 *reg
|= SIGP_STATUS_NOT_RUNNING
;
254 rc
= SIGP_CC_STATUS_STORED
;
257 VCPU_EVENT(vcpu
, 4, "sensed running status of cpu %x rc %x",
258 dst_vcpu
->vcpu_id
, rc
);
263 static int __prepare_sigp_re_start(struct kvm_vcpu
*vcpu
,
264 struct kvm_vcpu
*dst_vcpu
, u8 order_code
)
266 struct kvm_s390_local_interrupt
*li
= &dst_vcpu
->arch
.local_int
;
267 /* handle (RE)START in user space */
268 int rc
= -EOPNOTSUPP
;
270 /* make sure we don't race with STOP irq injection */
271 spin_lock(&li
->lock
);
272 if (kvm_s390_is_stop_irq_pending(dst_vcpu
))
274 spin_unlock(&li
->lock
);
279 static int __prepare_sigp_cpu_reset(struct kvm_vcpu
*vcpu
,
280 struct kvm_vcpu
*dst_vcpu
, u8 order_code
)
282 /* handle (INITIAL) CPU RESET in user space */
286 static int __prepare_sigp_unknown(struct kvm_vcpu
*vcpu
,
287 struct kvm_vcpu
*dst_vcpu
)
289 /* handle unknown orders in user space */
293 static int handle_sigp_dst(struct kvm_vcpu
*vcpu
, u8 order_code
,
294 u16 cpu_addr
, u32 parameter
, u64
*status_reg
)
297 struct kvm_vcpu
*dst_vcpu
;
299 if (cpu_addr
>= KVM_MAX_VCPUS
)
300 return SIGP_CC_NOT_OPERATIONAL
;
302 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
304 return SIGP_CC_NOT_OPERATIONAL
;
306 switch (order_code
) {
308 vcpu
->stat
.instruction_sigp_sense
++;
309 rc
= __sigp_sense(vcpu
, dst_vcpu
, status_reg
);
311 case SIGP_EXTERNAL_CALL
:
312 vcpu
->stat
.instruction_sigp_external_call
++;
313 rc
= __sigp_external_call(vcpu
, dst_vcpu
, status_reg
);
315 case SIGP_EMERGENCY_SIGNAL
:
316 vcpu
->stat
.instruction_sigp_emergency
++;
317 rc
= __sigp_emergency(vcpu
, dst_vcpu
);
320 vcpu
->stat
.instruction_sigp_stop
++;
321 rc
= __sigp_stop(vcpu
, dst_vcpu
);
323 case SIGP_STOP_AND_STORE_STATUS
:
324 vcpu
->stat
.instruction_sigp_stop_store_status
++;
325 rc
= __sigp_stop_and_store_status(vcpu
, dst_vcpu
, status_reg
);
327 case SIGP_STORE_STATUS_AT_ADDRESS
:
328 vcpu
->stat
.instruction_sigp_store_status
++;
329 rc
= __sigp_store_status_at_addr(vcpu
, dst_vcpu
, parameter
,
332 case SIGP_SET_PREFIX
:
333 vcpu
->stat
.instruction_sigp_prefix
++;
334 rc
= __sigp_set_prefix(vcpu
, dst_vcpu
, parameter
, status_reg
);
336 case SIGP_COND_EMERGENCY_SIGNAL
:
337 vcpu
->stat
.instruction_sigp_cond_emergency
++;
338 rc
= __sigp_conditional_emergency(vcpu
, dst_vcpu
, parameter
,
341 case SIGP_SENSE_RUNNING
:
342 vcpu
->stat
.instruction_sigp_sense_running
++;
343 rc
= __sigp_sense_running(vcpu
, dst_vcpu
, status_reg
);
346 vcpu
->stat
.instruction_sigp_start
++;
347 rc
= __prepare_sigp_re_start(vcpu
, dst_vcpu
, order_code
);
350 vcpu
->stat
.instruction_sigp_restart
++;
351 rc
= __prepare_sigp_re_start(vcpu
, dst_vcpu
, order_code
);
353 case SIGP_INITIAL_CPU_RESET
:
354 vcpu
->stat
.instruction_sigp_init_cpu_reset
++;
355 rc
= __prepare_sigp_cpu_reset(vcpu
, dst_vcpu
, order_code
);
358 vcpu
->stat
.instruction_sigp_cpu_reset
++;
359 rc
= __prepare_sigp_cpu_reset(vcpu
, dst_vcpu
, order_code
);
362 vcpu
->stat
.instruction_sigp_unknown
++;
363 rc
= __prepare_sigp_unknown(vcpu
, dst_vcpu
);
366 if (rc
== -EOPNOTSUPP
)
368 "sigp order %u -> cpu %x: handled in user space",
369 order_code
, dst_vcpu
->vcpu_id
);
374 static int handle_sigp_order_in_user_space(struct kvm_vcpu
*vcpu
, u8 order_code
)
376 if (!vcpu
->kvm
->arch
.user_sigp
)
379 switch (order_code
) {
381 case SIGP_EXTERNAL_CALL
:
382 case SIGP_EMERGENCY_SIGNAL
:
383 case SIGP_COND_EMERGENCY_SIGNAL
:
384 case SIGP_SENSE_RUNNING
:
386 /* update counters as we're directly dropping to user space */
388 vcpu
->stat
.instruction_sigp_stop
++;
390 case SIGP_STOP_AND_STORE_STATUS
:
391 vcpu
->stat
.instruction_sigp_stop_store_status
++;
393 case SIGP_STORE_STATUS_AT_ADDRESS
:
394 vcpu
->stat
.instruction_sigp_store_status
++;
396 case SIGP_STORE_ADDITIONAL_STATUS
:
397 vcpu
->stat
.instruction_sigp_store_adtl_status
++;
399 case SIGP_SET_PREFIX
:
400 vcpu
->stat
.instruction_sigp_prefix
++;
403 vcpu
->stat
.instruction_sigp_start
++;
406 vcpu
->stat
.instruction_sigp_restart
++;
408 case SIGP_INITIAL_CPU_RESET
:
409 vcpu
->stat
.instruction_sigp_init_cpu_reset
++;
412 vcpu
->stat
.instruction_sigp_cpu_reset
++;
415 vcpu
->stat
.instruction_sigp_unknown
++;
418 VCPU_EVENT(vcpu
, 4, "sigp order %u: completely handled in user space",
424 int kvm_s390_handle_sigp(struct kvm_vcpu
*vcpu
)
426 int r1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
427 int r3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
429 u16 cpu_addr
= vcpu
->run
->s
.regs
.gprs
[r3
];
433 /* sigp in userspace can exit */
434 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
435 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
437 order_code
= kvm_s390_get_base_disp_rs(vcpu
, NULL
);
438 if (handle_sigp_order_in_user_space(vcpu
, order_code
))
442 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
];
444 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
+ 1];
446 trace_kvm_s390_handle_sigp(vcpu
, order_code
, cpu_addr
, parameter
);
447 switch (order_code
) {
448 case SIGP_SET_ARCHITECTURE
:
449 vcpu
->stat
.instruction_sigp_arch
++;
450 rc
= __sigp_set_arch(vcpu
, parameter
);
453 rc
= handle_sigp_dst(vcpu
, order_code
, cpu_addr
,
455 &vcpu
->run
->s
.regs
.gprs
[r1
]);
461 kvm_s390_set_psw_cc(vcpu
, rc
);
466 * Handle SIGP partial execution interception.
468 * This interception will occur at the source cpu when a source cpu sends an
469 * external call to a target cpu and the target cpu has the WAIT bit set in
470 * its cpuflags. Interception will occurr after the interrupt indicator bits at
471 * the target cpu have been set. All error cases will lead to instruction
472 * interception, therefore nothing is to be checked or prepared.
474 int kvm_s390_handle_sigp_pei(struct kvm_vcpu
*vcpu
)
476 int r3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
477 u16 cpu_addr
= vcpu
->run
->s
.regs
.gprs
[r3
];
478 struct kvm_vcpu
*dest_vcpu
;
479 u8 order_code
= kvm_s390_get_base_disp_rs(vcpu
, NULL
);
481 trace_kvm_s390_handle_sigp_pei(vcpu
, order_code
, cpu_addr
);
483 if (order_code
== SIGP_EXTERNAL_CALL
) {
484 dest_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
485 BUG_ON(dest_vcpu
== NULL
);
487 kvm_s390_vcpu_wakeup(dest_vcpu
);
488 kvm_s390_set_psw_cc(vcpu
, SIGP_CC_ORDER_CODE_ACCEPTED
);