ARM: mm: avoid taking ASID spinlock on fastpath
[linux/fpc-iii.git] / arch / s390 / kvm / priv.c
blobd768906f15c81b27b82698ae49368579a2169ef9
1 /*
2 * handling privileged instructions
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <asm/current.h>
18 #include <asm/debug.h>
19 #include <asm/ebcdic.h>
20 #include <asm/sysinfo.h>
21 #include "gaccess.h"
22 #include "kvm-s390.h"
23 #include "trace.h"
25 static int handle_set_prefix(struct kvm_vcpu *vcpu)
27 int base2 = vcpu->arch.sie_block->ipb >> 28;
28 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
29 u64 operand2;
30 u32 address = 0;
31 u8 tmp;
33 vcpu->stat.instruction_spx++;
35 operand2 = disp2;
36 if (base2)
37 operand2 += vcpu->run->s.regs.gprs[base2];
39 /* must be word boundary */
40 if (operand2 & 3) {
41 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
42 goto out;
45 /* get the value */
46 if (get_guest_u32(vcpu, operand2, &address)) {
47 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
48 goto out;
51 address = address & 0x7fffe000u;
53 /* make sure that the new value is valid memory */
54 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
55 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
56 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
57 goto out;
60 kvm_s390_set_prefix(vcpu, address);
62 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
63 trace_kvm_s390_handle_prefix(vcpu, 1, address);
64 out:
65 return 0;
68 static int handle_store_prefix(struct kvm_vcpu *vcpu)
70 int base2 = vcpu->arch.sie_block->ipb >> 28;
71 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
72 u64 operand2;
73 u32 address;
75 vcpu->stat.instruction_stpx++;
76 operand2 = disp2;
77 if (base2)
78 operand2 += vcpu->run->s.regs.gprs[base2];
80 /* must be word boundary */
81 if (operand2 & 3) {
82 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
83 goto out;
86 address = vcpu->arch.sie_block->prefix;
87 address = address & 0x7fffe000u;
89 /* get the value */
90 if (put_guest_u32(vcpu, operand2, address)) {
91 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
92 goto out;
95 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
96 trace_kvm_s390_handle_prefix(vcpu, 0, address);
97 out:
98 return 0;
101 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
103 int base2 = vcpu->arch.sie_block->ipb >> 28;
104 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
105 u64 useraddr;
106 int rc;
108 vcpu->stat.instruction_stap++;
109 useraddr = disp2;
110 if (base2)
111 useraddr += vcpu->run->s.regs.gprs[base2];
113 if (useraddr & 1) {
114 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
115 goto out;
118 rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
119 if (rc == -EFAULT) {
120 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
121 goto out;
124 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
125 trace_kvm_s390_handle_stap(vcpu, useraddr);
126 out:
127 return 0;
130 static int handle_skey(struct kvm_vcpu *vcpu)
132 vcpu->stat.instruction_storage_key++;
133 vcpu->arch.sie_block->gpsw.addr -= 4;
134 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
135 return 0;
138 static int handle_stsch(struct kvm_vcpu *vcpu)
140 vcpu->stat.instruction_stsch++;
141 VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
142 /* condition code 3 */
143 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
144 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
145 return 0;
148 static int handle_chsc(struct kvm_vcpu *vcpu)
150 vcpu->stat.instruction_chsc++;
151 VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
152 /* condition code 3 */
153 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
154 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
155 return 0;
158 static int handle_stfl(struct kvm_vcpu *vcpu)
160 unsigned int facility_list;
161 int rc;
163 vcpu->stat.instruction_stfl++;
164 /* only pass the facility bits, which we can handle */
165 facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
167 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
168 &facility_list, sizeof(facility_list));
169 if (rc == -EFAULT)
170 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
171 else {
172 VCPU_EVENT(vcpu, 5, "store facility list value %x",
173 facility_list);
174 trace_kvm_s390_handle_stfl(vcpu, facility_list);
176 return 0;
179 static int handle_stidp(struct kvm_vcpu *vcpu)
181 int base2 = vcpu->arch.sie_block->ipb >> 28;
182 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
183 u64 operand2;
184 int rc;
186 vcpu->stat.instruction_stidp++;
187 operand2 = disp2;
188 if (base2)
189 operand2 += vcpu->run->s.regs.gprs[base2];
191 if (operand2 & 7) {
192 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
193 goto out;
196 rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
197 if (rc == -EFAULT) {
198 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
199 goto out;
202 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
203 out:
204 return 0;
207 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
209 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
210 int cpus = 0;
211 int n;
213 spin_lock(&fi->lock);
214 for (n = 0; n < KVM_MAX_VCPUS; n++)
215 if (fi->local_int[n])
216 cpus++;
217 spin_unlock(&fi->lock);
219 /* deal with other level 3 hypervisors */
220 if (stsi(mem, 3, 2, 2))
221 mem->count = 0;
222 if (mem->count < 8)
223 mem->count++;
224 for (n = mem->count - 1; n > 0 ; n--)
225 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
227 mem->vm[0].cpus_total = cpus;
228 mem->vm[0].cpus_configured = cpus;
229 mem->vm[0].cpus_standby = 0;
230 mem->vm[0].cpus_reserved = 0;
231 mem->vm[0].caf = 1000;
232 memcpy(mem->vm[0].name, "KVMguest", 8);
233 ASCEBC(mem->vm[0].name, 8);
234 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
235 ASCEBC(mem->vm[0].cpi, 16);
238 static int handle_stsi(struct kvm_vcpu *vcpu)
240 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
241 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
242 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
243 int base2 = vcpu->arch.sie_block->ipb >> 28;
244 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
245 u64 operand2;
246 unsigned long mem;
248 vcpu->stat.instruction_stsi++;
249 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
251 operand2 = disp2;
252 if (base2)
253 operand2 += vcpu->run->s.regs.gprs[base2];
255 if (operand2 & 0xfff && fc > 0)
256 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
258 switch (fc) {
259 case 0:
260 vcpu->run->s.regs.gprs[0] = 3 << 28;
261 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
262 return 0;
263 case 1: /* same handling for 1 and 2 */
264 case 2:
265 mem = get_zeroed_page(GFP_KERNEL);
266 if (!mem)
267 goto out_fail;
268 if (stsi((void *) mem, fc, sel1, sel2))
269 goto out_mem;
270 break;
271 case 3:
272 if (sel1 != 2 || sel2 != 2)
273 goto out_fail;
274 mem = get_zeroed_page(GFP_KERNEL);
275 if (!mem)
276 goto out_fail;
277 handle_stsi_3_2_2(vcpu, (void *) mem);
278 break;
279 default:
280 goto out_fail;
283 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
284 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
285 goto out_mem;
287 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
288 free_page(mem);
289 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
290 vcpu->run->s.regs.gprs[0] = 0;
291 return 0;
292 out_mem:
293 free_page(mem);
294 out_fail:
295 /* condition code 3 */
296 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
297 return 0;
300 static intercept_handler_t priv_handlers[256] = {
301 [0x02] = handle_stidp,
302 [0x10] = handle_set_prefix,
303 [0x11] = handle_store_prefix,
304 [0x12] = handle_store_cpu_address,
305 [0x29] = handle_skey,
306 [0x2a] = handle_skey,
307 [0x2b] = handle_skey,
308 [0x34] = handle_stsch,
309 [0x5f] = handle_chsc,
310 [0x7d] = handle_stsi,
311 [0xb1] = handle_stfl,
314 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
316 intercept_handler_t handler;
319 * a lot of B2 instructions are priviledged. We first check for
320 * the privileged ones, that we can handle in the kernel. If the
321 * kernel can handle this instruction, we check for the problem
322 * state bit and (a) handle the instruction or (b) send a code 2
323 * program check.
324 * Anything else goes to userspace.*/
325 handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
326 if (handler) {
327 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
328 return kvm_s390_inject_program_int(vcpu,
329 PGM_PRIVILEGED_OPERATION);
330 else
331 return handler(vcpu);
333 return -EOPNOTSUPP;
336 static int handle_tprot(struct kvm_vcpu *vcpu)
338 int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
339 int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
340 int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
341 int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
342 u64 address1 = disp1 + base1 ? vcpu->run->s.regs.gprs[base1] : 0;
343 u64 address2 = disp2 + base2 ? vcpu->run->s.regs.gprs[base2] : 0;
344 struct vm_area_struct *vma;
345 unsigned long user_address;
347 vcpu->stat.instruction_tprot++;
349 /* we only handle the Linux memory detection case:
350 * access key == 0
351 * guest DAT == off
352 * everything else goes to userspace. */
353 if (address2 & 0xf0)
354 return -EOPNOTSUPP;
355 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
356 return -EOPNOTSUPP;
359 /* we must resolve the address without holding the mmap semaphore.
360 * This is ok since the userspace hypervisor is not supposed to change
361 * the mapping while the guest queries the memory. Otherwise the guest
362 * might crash or get wrong info anyway. */
363 user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
365 down_read(&current->mm->mmap_sem);
366 vma = find_vma(current->mm, user_address);
367 if (!vma) {
368 up_read(&current->mm->mmap_sem);
369 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
372 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
373 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
374 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
375 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
376 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
378 up_read(&current->mm->mmap_sem);
379 return 0;
382 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
384 /* For e5xx... instructions we only handle TPROT */
385 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
386 return handle_tprot(vcpu);
387 return -EOPNOTSUPP;
390 static int handle_sckpf(struct kvm_vcpu *vcpu)
392 u32 value;
394 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
395 return kvm_s390_inject_program_int(vcpu,
396 PGM_PRIVILEGED_OPERATION);
398 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
399 return kvm_s390_inject_program_int(vcpu,
400 PGM_SPECIFICATION);
402 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
403 vcpu->arch.sie_block->todpr = value;
405 return 0;
408 static intercept_handler_t x01_handlers[256] = {
409 [0x07] = handle_sckpf,
412 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
414 intercept_handler_t handler;
416 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
417 if (handler)
418 return handler(vcpu);
419 return -EOPNOTSUPP;