2 * sigp.c - handlinge interprocessor communication
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
21 /* sigp order codes */
22 #define SIGP_SENSE 0x01
23 #define SIGP_EXTERNAL_CALL 0x02
24 #define SIGP_EMERGENCY 0x03
25 #define SIGP_START 0x04
26 #define SIGP_STOP 0x05
27 #define SIGP_RESTART 0x06
28 #define SIGP_STOP_STORE_STATUS 0x09
29 #define SIGP_INITIAL_CPU_RESET 0x0b
30 #define SIGP_CPU_RESET 0x0c
31 #define SIGP_SET_PREFIX 0x0d
32 #define SIGP_STORE_STATUS_ADDR 0x0e
33 #define SIGP_SET_ARCH 0x12
34 #define SIGP_SENSE_RUNNING 0x15
37 #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
38 #define SIGP_STAT_NOT_RUNNING 0x00000400UL
39 #define SIGP_STAT_INCORRECT_STATE 0x00000200UL
40 #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
41 #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
42 #define SIGP_STAT_STOPPED 0x00000040UL
43 #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
44 #define SIGP_STAT_CHECK_STOP 0x00000010UL
45 #define SIGP_STAT_INOPERATIVE 0x00000004UL
46 #define SIGP_STAT_INVALID_ORDER 0x00000002UL
47 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
50 static int __sigp_sense(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
53 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
56 if (cpu_addr
>= KVM_MAX_VCPUS
)
57 return 3; /* not operational */
60 if (fi
->local_int
[cpu_addr
] == NULL
)
61 rc
= 3; /* not operational */
62 else if (!(atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
64 *reg
&= 0xffffffff00000000UL
;
65 rc
= 1; /* status stored */
67 *reg
&= 0xffffffff00000000UL
;
68 *reg
|= SIGP_STAT_STOPPED
;
69 rc
= 1; /* status stored */
71 spin_unlock(&fi
->lock
);
73 VCPU_EVENT(vcpu
, 4, "sensed status of cpu %x rc %x", cpu_addr
, rc
);
77 static int __sigp_emergency(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
79 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
80 struct kvm_s390_local_interrupt
*li
;
81 struct kvm_s390_interrupt_info
*inti
;
84 if (cpu_addr
>= KVM_MAX_VCPUS
)
85 return 3; /* not operational */
87 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
91 inti
->type
= KVM_S390_INT_EMERGENCY
;
92 inti
->emerg
.code
= vcpu
->vcpu_id
;
95 li
= fi
->local_int
[cpu_addr
];
97 rc
= 3; /* not operational */
101 spin_lock_bh(&li
->lock
);
102 list_add_tail(&inti
->list
, &li
->list
);
103 atomic_set(&li
->active
, 1);
104 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
105 if (waitqueue_active(&li
->wq
))
106 wake_up_interruptible(&li
->wq
);
107 spin_unlock_bh(&li
->lock
);
108 rc
= 0; /* order accepted */
109 VCPU_EVENT(vcpu
, 4, "sent sigp emerg to cpu %x", cpu_addr
);
111 spin_unlock(&fi
->lock
);
115 static int __sigp_external_call(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
117 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
118 struct kvm_s390_local_interrupt
*li
;
119 struct kvm_s390_interrupt_info
*inti
;
122 if (cpu_addr
>= KVM_MAX_VCPUS
)
123 return 3; /* not operational */
125 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
129 inti
->type
= KVM_S390_INT_EXTERNAL_CALL
;
130 inti
->extcall
.code
= vcpu
->vcpu_id
;
132 spin_lock(&fi
->lock
);
133 li
= fi
->local_int
[cpu_addr
];
135 rc
= 3; /* not operational */
139 spin_lock_bh(&li
->lock
);
140 list_add_tail(&inti
->list
, &li
->list
);
141 atomic_set(&li
->active
, 1);
142 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
143 if (waitqueue_active(&li
->wq
))
144 wake_up_interruptible(&li
->wq
);
145 spin_unlock_bh(&li
->lock
);
146 rc
= 0; /* order accepted */
147 VCPU_EVENT(vcpu
, 4, "sent sigp ext call to cpu %x", cpu_addr
);
149 spin_unlock(&fi
->lock
);
153 static int __inject_sigp_stop(struct kvm_s390_local_interrupt
*li
, int action
)
155 struct kvm_s390_interrupt_info
*inti
;
157 inti
= kzalloc(sizeof(*inti
), GFP_ATOMIC
);
160 inti
->type
= KVM_S390_SIGP_STOP
;
162 spin_lock_bh(&li
->lock
);
163 list_add_tail(&inti
->list
, &li
->list
);
164 atomic_set(&li
->active
, 1);
165 atomic_set_mask(CPUSTAT_STOP_INT
, li
->cpuflags
);
166 li
->action_bits
|= action
;
167 if (waitqueue_active(&li
->wq
))
168 wake_up_interruptible(&li
->wq
);
169 spin_unlock_bh(&li
->lock
);
171 return 0; /* order accepted */
174 static int __sigp_stop(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, int action
)
176 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
177 struct kvm_s390_local_interrupt
*li
;
180 if (cpu_addr
>= KVM_MAX_VCPUS
)
181 return 3; /* not operational */
183 spin_lock(&fi
->lock
);
184 li
= fi
->local_int
[cpu_addr
];
186 rc
= 3; /* not operational */
190 rc
= __inject_sigp_stop(li
, action
);
193 spin_unlock(&fi
->lock
);
194 VCPU_EVENT(vcpu
, 4, "sent sigp stop to cpu %x", cpu_addr
);
198 int kvm_s390_inject_sigp_stop(struct kvm_vcpu
*vcpu
, int action
)
200 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
201 return __inject_sigp_stop(li
, action
);
204 static int __sigp_set_arch(struct kvm_vcpu
*vcpu
, u32 parameter
)
208 switch (parameter
& 0xff) {
210 rc
= 3; /* not operational */
214 rc
= 0; /* order accepted */
222 static int __sigp_set_prefix(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, u32 address
,
225 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
226 struct kvm_s390_local_interrupt
*li
= NULL
;
227 struct kvm_s390_interrupt_info
*inti
;
231 /* make sure that the new value is valid memory */
232 address
= address
& 0x7fffe000u
;
233 if (copy_from_guest_absolute(vcpu
, &tmp
, address
, 1) ||
234 copy_from_guest_absolute(vcpu
, &tmp
, address
+ PAGE_SIZE
, 1)) {
235 *reg
|= SIGP_STAT_INVALID_PARAMETER
;
236 return 1; /* invalid parameter */
239 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
243 spin_lock(&fi
->lock
);
244 if (cpu_addr
< KVM_MAX_VCPUS
)
245 li
= fi
->local_int
[cpu_addr
];
248 rc
= 1; /* incorrect state */
249 *reg
&= SIGP_STAT_INCORRECT_STATE
;
254 spin_lock_bh(&li
->lock
);
255 /* cpu must be in stopped state */
256 if (!(atomic_read(li
->cpuflags
) & CPUSTAT_STOPPED
)) {
257 rc
= 1; /* incorrect state */
258 *reg
&= SIGP_STAT_INCORRECT_STATE
;
263 inti
->type
= KVM_S390_SIGP_SET_PREFIX
;
264 inti
->prefix
.address
= address
;
266 list_add_tail(&inti
->list
, &li
->list
);
267 atomic_set(&li
->active
, 1);
268 if (waitqueue_active(&li
->wq
))
269 wake_up_interruptible(&li
->wq
);
270 rc
= 0; /* order accepted */
272 VCPU_EVENT(vcpu
, 4, "set prefix of cpu %02x to %x", cpu_addr
, address
);
274 spin_unlock_bh(&li
->lock
);
276 spin_unlock(&fi
->lock
);
280 static int __sigp_sense_running(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
284 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
286 if (cpu_addr
>= KVM_MAX_VCPUS
)
287 return 3; /* not operational */
289 spin_lock(&fi
->lock
);
290 if (fi
->local_int
[cpu_addr
] == NULL
)
291 rc
= 3; /* not operational */
293 if (atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
299 *reg
&= 0xffffffff00000000UL
;
300 *reg
|= SIGP_STAT_NOT_RUNNING
;
304 spin_unlock(&fi
->lock
);
306 VCPU_EVENT(vcpu
, 4, "sensed running status of cpu %x rc %x", cpu_addr
,
312 int kvm_s390_handle_sigp(struct kvm_vcpu
*vcpu
)
314 int r1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
315 int r3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
316 int base2
= vcpu
->arch
.sie_block
->ipb
>> 28;
317 int disp2
= ((vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16);
319 u16 cpu_addr
= vcpu
->arch
.guest_gprs
[r3
];
323 /* sigp in userspace can exit */
324 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
325 return kvm_s390_inject_program_int(vcpu
,
326 PGM_PRIVILEGED_OPERATION
);
330 order_code
+= vcpu
->arch
.guest_gprs
[base2
];
333 parameter
= vcpu
->arch
.guest_gprs
[r1
];
335 parameter
= vcpu
->arch
.guest_gprs
[r1
+ 1];
337 switch (order_code
) {
339 vcpu
->stat
.instruction_sigp_sense
++;
340 rc
= __sigp_sense(vcpu
, cpu_addr
,
341 &vcpu
->arch
.guest_gprs
[r1
]);
343 case SIGP_EXTERNAL_CALL
:
344 vcpu
->stat
.instruction_sigp_external_call
++;
345 rc
= __sigp_external_call(vcpu
, cpu_addr
);
348 vcpu
->stat
.instruction_sigp_emergency
++;
349 rc
= __sigp_emergency(vcpu
, cpu_addr
);
352 vcpu
->stat
.instruction_sigp_stop
++;
353 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STOP_ON_STOP
);
355 case SIGP_STOP_STORE_STATUS
:
356 vcpu
->stat
.instruction_sigp_stop
++;
357 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STORE_ON_STOP
);
360 vcpu
->stat
.instruction_sigp_arch
++;
361 rc
= __sigp_set_arch(vcpu
, parameter
);
363 case SIGP_SET_PREFIX
:
364 vcpu
->stat
.instruction_sigp_prefix
++;
365 rc
= __sigp_set_prefix(vcpu
, cpu_addr
, parameter
,
366 &vcpu
->arch
.guest_gprs
[r1
]);
368 case SIGP_SENSE_RUNNING
:
369 vcpu
->stat
.instruction_sigp_sense_running
++;
370 rc
= __sigp_sense_running(vcpu
, cpu_addr
,
371 &vcpu
->arch
.guest_gprs
[r1
]);
374 vcpu
->stat
.instruction_sigp_restart
++;
375 /* user space must know about restart */
383 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
384 vcpu
->arch
.sie_block
->gpsw
.mask
|= (rc
& 3ul) << 44;