2 * handling interprocessor communication
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
23 static int __sigp_sense(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
26 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
29 if (cpu_addr
>= KVM_MAX_VCPUS
)
30 return SIGP_CC_NOT_OPERATIONAL
;
33 if (fi
->local_int
[cpu_addr
] == NULL
)
34 rc
= SIGP_CC_NOT_OPERATIONAL
;
35 else if (!(atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
36 & (CPUSTAT_ECALL_PEND
| CPUSTAT_STOPPED
)))
37 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
39 *reg
&= 0xffffffff00000000UL
;
40 if (atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
42 *reg
|= SIGP_STATUS_EXT_CALL_PENDING
;
43 if (atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
45 *reg
|= SIGP_STATUS_STOPPED
;
46 rc
= SIGP_CC_STATUS_STORED
;
48 spin_unlock(&fi
->lock
);
50 VCPU_EVENT(vcpu
, 4, "sensed status of cpu %x rc %x", cpu_addr
, rc
);
54 static int __sigp_emergency(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
56 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
57 struct kvm_s390_local_interrupt
*li
;
58 struct kvm_s390_interrupt_info
*inti
;
61 if (cpu_addr
>= KVM_MAX_VCPUS
)
62 return SIGP_CC_NOT_OPERATIONAL
;
64 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
68 inti
->type
= KVM_S390_INT_EMERGENCY
;
69 inti
->emerg
.code
= vcpu
->vcpu_id
;
72 li
= fi
->local_int
[cpu_addr
];
74 rc
= SIGP_CC_NOT_OPERATIONAL
;
78 spin_lock_bh(&li
->lock
);
79 list_add_tail(&inti
->list
, &li
->list
);
80 atomic_set(&li
->active
, 1);
81 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
82 if (waitqueue_active(li
->wq
))
83 wake_up_interruptible(li
->wq
);
84 spin_unlock_bh(&li
->lock
);
85 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
86 VCPU_EVENT(vcpu
, 4, "sent sigp emerg to cpu %x", cpu_addr
);
88 spin_unlock(&fi
->lock
);
92 static int __sigp_external_call(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
94 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
95 struct kvm_s390_local_interrupt
*li
;
96 struct kvm_s390_interrupt_info
*inti
;
99 if (cpu_addr
>= KVM_MAX_VCPUS
)
100 return SIGP_CC_NOT_OPERATIONAL
;
102 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
106 inti
->type
= KVM_S390_INT_EXTERNAL_CALL
;
107 inti
->extcall
.code
= vcpu
->vcpu_id
;
109 spin_lock(&fi
->lock
);
110 li
= fi
->local_int
[cpu_addr
];
112 rc
= SIGP_CC_NOT_OPERATIONAL
;
116 spin_lock_bh(&li
->lock
);
117 list_add_tail(&inti
->list
, &li
->list
);
118 atomic_set(&li
->active
, 1);
119 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
120 if (waitqueue_active(li
->wq
))
121 wake_up_interruptible(li
->wq
);
122 spin_unlock_bh(&li
->lock
);
123 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
124 VCPU_EVENT(vcpu
, 4, "sent sigp ext call to cpu %x", cpu_addr
);
126 spin_unlock(&fi
->lock
);
130 static int __inject_sigp_stop(struct kvm_s390_local_interrupt
*li
, int action
)
132 struct kvm_s390_interrupt_info
*inti
;
134 inti
= kzalloc(sizeof(*inti
), GFP_ATOMIC
);
137 inti
->type
= KVM_S390_SIGP_STOP
;
139 spin_lock_bh(&li
->lock
);
140 if ((atomic_read(li
->cpuflags
) & CPUSTAT_STOPPED
)) {
144 list_add_tail(&inti
->list
, &li
->list
);
145 atomic_set(&li
->active
, 1);
146 atomic_set_mask(CPUSTAT_STOP_INT
, li
->cpuflags
);
147 li
->action_bits
|= action
;
148 if (waitqueue_active(li
->wq
))
149 wake_up_interruptible(li
->wq
);
151 spin_unlock_bh(&li
->lock
);
153 return SIGP_CC_ORDER_CODE_ACCEPTED
;
156 static int __sigp_stop(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, int action
)
158 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
159 struct kvm_s390_local_interrupt
*li
;
162 if (cpu_addr
>= KVM_MAX_VCPUS
)
163 return SIGP_CC_NOT_OPERATIONAL
;
165 spin_lock(&fi
->lock
);
166 li
= fi
->local_int
[cpu_addr
];
168 rc
= SIGP_CC_NOT_OPERATIONAL
;
172 rc
= __inject_sigp_stop(li
, action
);
175 spin_unlock(&fi
->lock
);
176 VCPU_EVENT(vcpu
, 4, "sent sigp stop to cpu %x", cpu_addr
);
180 int kvm_s390_inject_sigp_stop(struct kvm_vcpu
*vcpu
, int action
)
182 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
183 return __inject_sigp_stop(li
, action
);
186 static int __sigp_set_arch(struct kvm_vcpu
*vcpu
, u32 parameter
)
190 switch (parameter
& 0xff) {
192 rc
= SIGP_CC_NOT_OPERATIONAL
;
196 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
204 static int __sigp_set_prefix(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, u32 address
,
207 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
208 struct kvm_s390_local_interrupt
*li
= NULL
;
209 struct kvm_s390_interrupt_info
*inti
;
213 /* make sure that the new value is valid memory */
214 address
= address
& 0x7fffe000u
;
215 if (copy_from_guest_absolute(vcpu
, &tmp
, address
, 1) ||
216 copy_from_guest_absolute(vcpu
, &tmp
, address
+ PAGE_SIZE
, 1)) {
217 *reg
&= 0xffffffff00000000UL
;
218 *reg
|= SIGP_STATUS_INVALID_PARAMETER
;
219 return SIGP_CC_STATUS_STORED
;
222 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
226 spin_lock(&fi
->lock
);
227 if (cpu_addr
< KVM_MAX_VCPUS
)
228 li
= fi
->local_int
[cpu_addr
];
231 *reg
&= 0xffffffff00000000UL
;
232 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
233 rc
= SIGP_CC_STATUS_STORED
;
238 spin_lock_bh(&li
->lock
);
239 /* cpu must be in stopped state */
240 if (!(atomic_read(li
->cpuflags
) & CPUSTAT_STOPPED
)) {
241 *reg
&= 0xffffffff00000000UL
;
242 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
243 rc
= SIGP_CC_STATUS_STORED
;
248 inti
->type
= KVM_S390_SIGP_SET_PREFIX
;
249 inti
->prefix
.address
= address
;
251 list_add_tail(&inti
->list
, &li
->list
);
252 atomic_set(&li
->active
, 1);
253 if (waitqueue_active(li
->wq
))
254 wake_up_interruptible(li
->wq
);
255 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
257 VCPU_EVENT(vcpu
, 4, "set prefix of cpu %02x to %x", cpu_addr
, address
);
259 spin_unlock_bh(&li
->lock
);
261 spin_unlock(&fi
->lock
);
265 static int __sigp_sense_running(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
269 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
271 if (cpu_addr
>= KVM_MAX_VCPUS
)
272 return SIGP_CC_NOT_OPERATIONAL
;
274 spin_lock(&fi
->lock
);
275 if (fi
->local_int
[cpu_addr
] == NULL
)
276 rc
= SIGP_CC_NOT_OPERATIONAL
;
278 if (atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
281 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
284 *reg
&= 0xffffffff00000000UL
;
285 *reg
|= SIGP_STATUS_NOT_RUNNING
;
286 rc
= SIGP_CC_STATUS_STORED
;
289 spin_unlock(&fi
->lock
);
291 VCPU_EVENT(vcpu
, 4, "sensed running status of cpu %x rc %x", cpu_addr
,
297 static int __sigp_restart(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
299 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
300 struct kvm_s390_local_interrupt
*li
;
301 int rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
303 if (cpu_addr
>= KVM_MAX_VCPUS
)
304 return SIGP_CC_NOT_OPERATIONAL
;
306 spin_lock(&fi
->lock
);
307 li
= fi
->local_int
[cpu_addr
];
309 rc
= SIGP_CC_NOT_OPERATIONAL
;
313 spin_lock_bh(&li
->lock
);
314 if (li
->action_bits
& ACTION_STOP_ON_STOP
)
317 VCPU_EVENT(vcpu
, 4, "sigp restart %x to handle userspace",
319 spin_unlock_bh(&li
->lock
);
321 spin_unlock(&fi
->lock
);
325 int kvm_s390_handle_sigp(struct kvm_vcpu
*vcpu
)
327 int r1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
328 int r3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
330 u16 cpu_addr
= vcpu
->run
->s
.regs
.gprs
[r3
];
334 /* sigp in userspace can exit */
335 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
336 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
338 order_code
= kvm_s390_get_base_disp_rs(vcpu
);
341 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
];
343 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
+ 1];
345 trace_kvm_s390_handle_sigp(vcpu
, order_code
, cpu_addr
, parameter
);
346 switch (order_code
) {
348 vcpu
->stat
.instruction_sigp_sense
++;
349 rc
= __sigp_sense(vcpu
, cpu_addr
,
350 &vcpu
->run
->s
.regs
.gprs
[r1
]);
352 case SIGP_EXTERNAL_CALL
:
353 vcpu
->stat
.instruction_sigp_external_call
++;
354 rc
= __sigp_external_call(vcpu
, cpu_addr
);
356 case SIGP_EMERGENCY_SIGNAL
:
357 vcpu
->stat
.instruction_sigp_emergency
++;
358 rc
= __sigp_emergency(vcpu
, cpu_addr
);
361 vcpu
->stat
.instruction_sigp_stop
++;
362 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STOP_ON_STOP
);
364 case SIGP_STOP_AND_STORE_STATUS
:
365 vcpu
->stat
.instruction_sigp_stop
++;
366 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STORE_ON_STOP
|
367 ACTION_STOP_ON_STOP
);
369 case SIGP_SET_ARCHITECTURE
:
370 vcpu
->stat
.instruction_sigp_arch
++;
371 rc
= __sigp_set_arch(vcpu
, parameter
);
373 case SIGP_SET_PREFIX
:
374 vcpu
->stat
.instruction_sigp_prefix
++;
375 rc
= __sigp_set_prefix(vcpu
, cpu_addr
, parameter
,
376 &vcpu
->run
->s
.regs
.gprs
[r1
]);
378 case SIGP_SENSE_RUNNING
:
379 vcpu
->stat
.instruction_sigp_sense_running
++;
380 rc
= __sigp_sense_running(vcpu
, cpu_addr
,
381 &vcpu
->run
->s
.regs
.gprs
[r1
]);
384 vcpu
->stat
.instruction_sigp_restart
++;
385 rc
= __sigp_restart(vcpu
, cpu_addr
);
386 if (rc
== SIGP_CC_BUSY
)
388 /* user space must know about restart */
396 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
397 vcpu
->arch
.sie_block
->gpsw
.mask
|= (rc
& 3ul) << 44;