Linux 4.11-rc5
[linux/fpc-iii.git] / arch / s390 / kvm / priv.c
blob64b6a309f2c47c1b0f658fbc44ea1cb31faa4b9c
1 /*
2 * handling privileged instructions
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <linux/mm_types.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/facility.h>
22 #include <asm/current.h>
23 #include <asm/debug.h>
24 #include <asm/ebcdic.h>
25 #include <asm/sysinfo.h>
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
28 #include <asm/gmap.h>
29 #include <asm/io.h>
30 #include <asm/ptrace.h>
31 #include <asm/compat.h>
32 #include <asm/sclp.h>
33 #include "gaccess.h"
34 #include "kvm-s390.h"
35 #include "trace.h"
37 static int handle_ri(struct kvm_vcpu *vcpu)
39 if (test_kvm_facility(vcpu->kvm, 64)) {
40 vcpu->arch.sie_block->ecb3 |= 0x01;
41 kvm_s390_retry_instr(vcpu);
42 return 0;
43 } else
44 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
47 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
49 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
50 return handle_ri(vcpu);
51 else
52 return -EOPNOTSUPP;
55 /* Handle SCK (SET CLOCK) interception */
56 static int handle_set_clock(struct kvm_vcpu *vcpu)
58 int rc;
59 u8 ar;
60 u64 op2, val;
62 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
63 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
65 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
66 if (op2 & 7) /* Operand must be on a doubleword boundary */
67 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
68 rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
69 if (rc)
70 return kvm_s390_inject_prog_cond(vcpu, rc);
72 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
73 kvm_s390_set_tod_clock(vcpu->kvm, val);
75 kvm_s390_set_psw_cc(vcpu, 0);
76 return 0;
79 static int handle_set_prefix(struct kvm_vcpu *vcpu)
81 u64 operand2;
82 u32 address;
83 int rc;
84 u8 ar;
86 vcpu->stat.instruction_spx++;
88 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
89 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
91 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
93 /* must be word boundary */
94 if (operand2 & 3)
95 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
97 /* get the value */
98 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
99 if (rc)
100 return kvm_s390_inject_prog_cond(vcpu, rc);
102 address &= 0x7fffe000u;
105 * Make sure the new value is valid memory. We only need to check the
106 * first page, since address is 8k aligned and memory pieces are always
107 * at least 1MB aligned and have at least a size of 1MB.
109 if (kvm_is_error_gpa(vcpu->kvm, address))
110 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
112 kvm_s390_set_prefix(vcpu, address);
113 trace_kvm_s390_handle_prefix(vcpu, 1, address);
114 return 0;
117 static int handle_store_prefix(struct kvm_vcpu *vcpu)
119 u64 operand2;
120 u32 address;
121 int rc;
122 u8 ar;
124 vcpu->stat.instruction_stpx++;
126 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
127 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
129 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
131 /* must be word boundary */
132 if (operand2 & 3)
133 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
135 address = kvm_s390_get_prefix(vcpu);
137 /* get the value */
138 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
139 if (rc)
140 return kvm_s390_inject_prog_cond(vcpu, rc);
142 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
143 trace_kvm_s390_handle_prefix(vcpu, 0, address);
144 return 0;
147 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
149 u16 vcpu_id = vcpu->vcpu_id;
150 u64 ga;
151 int rc;
152 u8 ar;
154 vcpu->stat.instruction_stap++;
156 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
157 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
159 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
161 if (ga & 1)
162 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
164 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
165 if (rc)
166 return kvm_s390_inject_prog_cond(vcpu, rc);
168 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
169 trace_kvm_s390_handle_stap(vcpu, ga);
170 return 0;
173 static int __skey_check_enable(struct kvm_vcpu *vcpu)
175 int rc = 0;
177 trace_kvm_s390_skey_related_inst(vcpu);
178 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
179 return rc;
181 rc = s390_enable_skey();
182 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
183 if (!rc)
184 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
185 return rc;
188 static int try_handle_skey(struct kvm_vcpu *vcpu)
190 int rc;
192 vcpu->stat.instruction_storage_key++;
193 rc = __skey_check_enable(vcpu);
194 if (rc)
195 return rc;
196 if (sclp.has_skey) {
197 /* with storage-key facility, SIE interprets it for us */
198 kvm_s390_retry_instr(vcpu);
199 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
200 return -EAGAIN;
202 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
203 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
204 return 0;
207 static int handle_iske(struct kvm_vcpu *vcpu)
209 unsigned long addr;
210 unsigned char key;
211 int reg1, reg2;
212 int rc;
214 rc = try_handle_skey(vcpu);
215 if (rc)
216 return rc != -EAGAIN ? rc : 0;
218 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
220 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
221 addr = kvm_s390_logical_to_effective(vcpu, addr);
222 addr = kvm_s390_real_to_abs(vcpu, addr);
223 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
224 if (kvm_is_error_hva(addr))
225 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
227 down_read(&current->mm->mmap_sem);
228 rc = get_guest_storage_key(current->mm, addr, &key);
229 up_read(&current->mm->mmap_sem);
230 if (rc)
231 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
232 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
233 vcpu->run->s.regs.gprs[reg1] |= key;
234 return 0;
237 static int handle_rrbe(struct kvm_vcpu *vcpu)
239 unsigned long addr;
240 int reg1, reg2;
241 int rc;
243 rc = try_handle_skey(vcpu);
244 if (rc)
245 return rc != -EAGAIN ? rc : 0;
247 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
249 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
250 addr = kvm_s390_logical_to_effective(vcpu, addr);
251 addr = kvm_s390_real_to_abs(vcpu, addr);
252 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
253 if (kvm_is_error_hva(addr))
254 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
256 down_read(&current->mm->mmap_sem);
257 rc = reset_guest_reference_bit(current->mm, addr);
258 up_read(&current->mm->mmap_sem);
259 if (rc < 0)
260 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
262 kvm_s390_set_psw_cc(vcpu, rc);
263 return 0;
266 #define SSKE_NQ 0x8
267 #define SSKE_MR 0x4
268 #define SSKE_MC 0x2
269 #define SSKE_MB 0x1
270 static int handle_sske(struct kvm_vcpu *vcpu)
272 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
273 unsigned long start, end;
274 unsigned char key, oldkey;
275 int reg1, reg2;
276 int rc;
278 rc = try_handle_skey(vcpu);
279 if (rc)
280 return rc != -EAGAIN ? rc : 0;
282 if (!test_kvm_facility(vcpu->kvm, 8))
283 m3 &= ~SSKE_MB;
284 if (!test_kvm_facility(vcpu->kvm, 10))
285 m3 &= ~(SSKE_MC | SSKE_MR);
286 if (!test_kvm_facility(vcpu->kvm, 14))
287 m3 &= ~SSKE_NQ;
289 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
291 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
292 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
293 start = kvm_s390_logical_to_effective(vcpu, start);
294 if (m3 & SSKE_MB) {
295 /* start already designates an absolute address */
296 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
297 } else {
298 start = kvm_s390_real_to_abs(vcpu, start);
299 end = start + PAGE_SIZE;
302 while (start != end) {
303 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
305 if (kvm_is_error_hva(addr))
306 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
308 down_read(&current->mm->mmap_sem);
309 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
310 m3 & SSKE_NQ, m3 & SSKE_MR,
311 m3 & SSKE_MC);
312 up_read(&current->mm->mmap_sem);
313 if (rc < 0)
314 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
315 start += PAGE_SIZE;
318 if (m3 & (SSKE_MC | SSKE_MR)) {
319 if (m3 & SSKE_MB) {
320 /* skey in reg1 is unpredictable */
321 kvm_s390_set_psw_cc(vcpu, 3);
322 } else {
323 kvm_s390_set_psw_cc(vcpu, rc);
324 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
325 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
328 if (m3 & SSKE_MB) {
329 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT)
330 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
331 else
332 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
333 end = kvm_s390_logical_to_effective(vcpu, end);
334 vcpu->run->s.regs.gprs[reg2] |= end;
336 return 0;
339 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
341 vcpu->stat.instruction_ipte_interlock++;
342 if (psw_bits(vcpu->arch.sie_block->gpsw).p)
343 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
344 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
345 kvm_s390_retry_instr(vcpu);
346 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
347 return 0;
350 static int handle_test_block(struct kvm_vcpu *vcpu)
352 gpa_t addr;
353 int reg2;
355 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
356 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
358 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
359 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
360 addr = kvm_s390_logical_to_effective(vcpu, addr);
361 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
362 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
363 addr = kvm_s390_real_to_abs(vcpu, addr);
365 if (kvm_is_error_gpa(vcpu->kvm, addr))
366 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
368 * We don't expect errors on modern systems, and do not care
369 * about storage keys (yet), so let's just clear the page.
371 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
372 return -EFAULT;
373 kvm_s390_set_psw_cc(vcpu, 0);
374 vcpu->run->s.regs.gprs[0] = 0;
375 return 0;
378 static int handle_tpi(struct kvm_vcpu *vcpu)
380 struct kvm_s390_interrupt_info *inti;
381 unsigned long len;
382 u32 tpi_data[3];
383 int rc;
384 u64 addr;
385 u8 ar;
387 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
388 if (addr & 3)
389 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
391 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
392 if (!inti) {
393 kvm_s390_set_psw_cc(vcpu, 0);
394 return 0;
397 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
398 tpi_data[1] = inti->io.io_int_parm;
399 tpi_data[2] = inti->io.io_int_word;
400 if (addr) {
402 * Store the two-word I/O interruption code into the
403 * provided area.
405 len = sizeof(tpi_data) - 4;
406 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
407 if (rc) {
408 rc = kvm_s390_inject_prog_cond(vcpu, rc);
409 goto reinject_interrupt;
411 } else {
413 * Store the three-word I/O interruption code into
414 * the appropriate lowcore area.
416 len = sizeof(tpi_data);
417 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
418 /* failed writes to the low core are not recoverable */
419 rc = -EFAULT;
420 goto reinject_interrupt;
424 /* irq was successfully handed to the guest */
425 kfree(inti);
426 kvm_s390_set_psw_cc(vcpu, 1);
427 return 0;
428 reinject_interrupt:
430 * If we encounter a problem storing the interruption code, the
431 * instruction is suppressed from the guest's view: reinject the
432 * interrupt.
434 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
435 kfree(inti);
436 rc = -EFAULT;
438 /* don't set the cc, a pgm irq was injected or we drop to user space */
439 return rc ? -EFAULT : 0;
442 static int handle_tsch(struct kvm_vcpu *vcpu)
444 struct kvm_s390_interrupt_info *inti = NULL;
445 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
447 /* a valid schid has at least one bit set */
448 if (vcpu->run->s.regs.gprs[1])
449 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
450 vcpu->run->s.regs.gprs[1]);
453 * Prepare exit to userspace.
454 * We indicate whether we dequeued a pending I/O interrupt
455 * so that userspace can re-inject it if the instruction gets
456 * a program check. While this may re-order the pending I/O
457 * interrupts, this is no problem since the priority is kept
458 * intact.
460 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
461 vcpu->run->s390_tsch.dequeued = !!inti;
462 if (inti) {
463 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
464 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
465 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
466 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
468 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
469 kfree(inti);
470 return -EREMOTE;
473 static int handle_io_inst(struct kvm_vcpu *vcpu)
475 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
477 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
478 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
480 if (vcpu->kvm->arch.css_support) {
482 * Most I/O instructions will be handled by userspace.
483 * Exceptions are tpi and the interrupt portion of tsch.
485 if (vcpu->arch.sie_block->ipa == 0xb236)
486 return handle_tpi(vcpu);
487 if (vcpu->arch.sie_block->ipa == 0xb235)
488 return handle_tsch(vcpu);
489 /* Handle in userspace. */
490 return -EOPNOTSUPP;
491 } else {
493 * Set condition code 3 to stop the guest from issuing channel
494 * I/O instructions.
496 kvm_s390_set_psw_cc(vcpu, 3);
497 return 0;
501 static int handle_stfl(struct kvm_vcpu *vcpu)
503 int rc;
504 unsigned int fac;
506 vcpu->stat.instruction_stfl++;
508 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
509 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
512 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
513 * into a u32 memory representation. They will remain bits 0-31.
515 fac = *vcpu->kvm->arch.model.fac_list >> 32;
516 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
517 &fac, sizeof(fac));
518 if (rc)
519 return rc;
520 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
521 trace_kvm_s390_handle_stfl(vcpu, fac);
522 return 0;
525 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
526 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
527 #define PSW_ADDR_24 0x0000000000ffffffUL
528 #define PSW_ADDR_31 0x000000007fffffffUL
530 int is_valid_psw(psw_t *psw)
532 if (psw->mask & PSW_MASK_UNASSIGNED)
533 return 0;
534 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
535 if (psw->addr & ~PSW_ADDR_31)
536 return 0;
538 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
539 return 0;
540 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
541 return 0;
542 if (psw->addr & 1)
543 return 0;
544 return 1;
547 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
549 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
550 psw_compat_t new_psw;
551 u64 addr;
552 int rc;
553 u8 ar;
555 if (gpsw->mask & PSW_MASK_PSTATE)
556 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
558 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
559 if (addr & 7)
560 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
562 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
563 if (rc)
564 return kvm_s390_inject_prog_cond(vcpu, rc);
565 if (!(new_psw.mask & PSW32_MASK_BASE))
566 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
567 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
568 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
569 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
570 if (!is_valid_psw(gpsw))
571 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
572 return 0;
575 static int handle_lpswe(struct kvm_vcpu *vcpu)
577 psw_t new_psw;
578 u64 addr;
579 int rc;
580 u8 ar;
582 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
583 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
585 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
586 if (addr & 7)
587 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
588 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
589 if (rc)
590 return kvm_s390_inject_prog_cond(vcpu, rc);
591 vcpu->arch.sie_block->gpsw = new_psw;
592 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
593 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
594 return 0;
597 static int handle_stidp(struct kvm_vcpu *vcpu)
599 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
600 u64 operand2;
601 int rc;
602 u8 ar;
604 vcpu->stat.instruction_stidp++;
606 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
607 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
609 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
611 if (operand2 & 7)
612 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
614 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
615 if (rc)
616 return kvm_s390_inject_prog_cond(vcpu, rc);
618 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
619 return 0;
622 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
624 int cpus = 0;
625 int n;
627 cpus = atomic_read(&vcpu->kvm->online_vcpus);
629 /* deal with other level 3 hypervisors */
630 if (stsi(mem, 3, 2, 2))
631 mem->count = 0;
632 if (mem->count < 8)
633 mem->count++;
634 for (n = mem->count - 1; n > 0 ; n--)
635 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
637 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
638 mem->vm[0].cpus_total = cpus;
639 mem->vm[0].cpus_configured = cpus;
640 mem->vm[0].cpus_standby = 0;
641 mem->vm[0].cpus_reserved = 0;
642 mem->vm[0].caf = 1000;
643 memcpy(mem->vm[0].name, "KVMguest", 8);
644 ASCEBC(mem->vm[0].name, 8);
645 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
646 ASCEBC(mem->vm[0].cpi, 16);
649 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
650 u8 fc, u8 sel1, u16 sel2)
652 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
653 vcpu->run->s390_stsi.addr = addr;
654 vcpu->run->s390_stsi.ar = ar;
655 vcpu->run->s390_stsi.fc = fc;
656 vcpu->run->s390_stsi.sel1 = sel1;
657 vcpu->run->s390_stsi.sel2 = sel2;
660 static int handle_stsi(struct kvm_vcpu *vcpu)
662 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
663 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
664 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
665 unsigned long mem = 0;
666 u64 operand2;
667 int rc = 0;
668 u8 ar;
670 vcpu->stat.instruction_stsi++;
671 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
673 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
674 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
676 if (fc > 3) {
677 kvm_s390_set_psw_cc(vcpu, 3);
678 return 0;
681 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
682 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
683 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
685 if (fc == 0) {
686 vcpu->run->s.regs.gprs[0] = 3 << 28;
687 kvm_s390_set_psw_cc(vcpu, 0);
688 return 0;
691 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
693 if (operand2 & 0xfff)
694 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
696 switch (fc) {
697 case 1: /* same handling for 1 and 2 */
698 case 2:
699 mem = get_zeroed_page(GFP_KERNEL);
700 if (!mem)
701 goto out_no_data;
702 if (stsi((void *) mem, fc, sel1, sel2))
703 goto out_no_data;
704 break;
705 case 3:
706 if (sel1 != 2 || sel2 != 2)
707 goto out_no_data;
708 mem = get_zeroed_page(GFP_KERNEL);
709 if (!mem)
710 goto out_no_data;
711 handle_stsi_3_2_2(vcpu, (void *) mem);
712 break;
715 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
716 if (rc) {
717 rc = kvm_s390_inject_prog_cond(vcpu, rc);
718 goto out;
720 if (vcpu->kvm->arch.user_stsi) {
721 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
722 rc = -EREMOTE;
724 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
725 free_page(mem);
726 kvm_s390_set_psw_cc(vcpu, 0);
727 vcpu->run->s.regs.gprs[0] = 0;
728 return rc;
729 out_no_data:
730 kvm_s390_set_psw_cc(vcpu, 3);
731 out:
732 free_page(mem);
733 return rc;
736 static const intercept_handler_t b2_handlers[256] = {
737 [0x02] = handle_stidp,
738 [0x04] = handle_set_clock,
739 [0x10] = handle_set_prefix,
740 [0x11] = handle_store_prefix,
741 [0x12] = handle_store_cpu_address,
742 [0x14] = kvm_s390_handle_vsie,
743 [0x21] = handle_ipte_interlock,
744 [0x29] = handle_iske,
745 [0x2a] = handle_rrbe,
746 [0x2b] = handle_sske,
747 [0x2c] = handle_test_block,
748 [0x30] = handle_io_inst,
749 [0x31] = handle_io_inst,
750 [0x32] = handle_io_inst,
751 [0x33] = handle_io_inst,
752 [0x34] = handle_io_inst,
753 [0x35] = handle_io_inst,
754 [0x36] = handle_io_inst,
755 [0x37] = handle_io_inst,
756 [0x38] = handle_io_inst,
757 [0x39] = handle_io_inst,
758 [0x3a] = handle_io_inst,
759 [0x3b] = handle_io_inst,
760 [0x3c] = handle_io_inst,
761 [0x50] = handle_ipte_interlock,
762 [0x5f] = handle_io_inst,
763 [0x74] = handle_io_inst,
764 [0x76] = handle_io_inst,
765 [0x7d] = handle_stsi,
766 [0xb1] = handle_stfl,
767 [0xb2] = handle_lpswe,
770 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
772 intercept_handler_t handler;
775 * A lot of B2 instructions are priviledged. Here we check for
776 * the privileged ones, that we can handle in the kernel.
777 * Anything else goes to userspace.
779 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
780 if (handler)
781 return handler(vcpu);
783 return -EOPNOTSUPP;
786 static int handle_epsw(struct kvm_vcpu *vcpu)
788 int reg1, reg2;
790 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
792 /* This basically extracts the mask half of the psw. */
793 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
794 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
795 if (reg2) {
796 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
797 vcpu->run->s.regs.gprs[reg2] |=
798 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
800 return 0;
803 #define PFMF_RESERVED 0xfffc0101UL
804 #define PFMF_SK 0x00020000UL
805 #define PFMF_CF 0x00010000UL
806 #define PFMF_UI 0x00008000UL
807 #define PFMF_FSC 0x00007000UL
808 #define PFMF_NQ 0x00000800UL
809 #define PFMF_MR 0x00000400UL
810 #define PFMF_MC 0x00000200UL
811 #define PFMF_KEY 0x000000feUL
813 static int handle_pfmf(struct kvm_vcpu *vcpu)
815 bool mr = false, mc = false, nq;
816 int reg1, reg2;
817 unsigned long start, end;
818 unsigned char key;
820 vcpu->stat.instruction_pfmf++;
822 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
824 if (!test_kvm_facility(vcpu->kvm, 8))
825 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
827 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
828 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
830 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
831 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
833 /* Only provide non-quiescing support if enabled for the guest */
834 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
835 !test_kvm_facility(vcpu->kvm, 14))
836 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
838 /* Only provide conditional-SSKE support if enabled for the guest */
839 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
840 test_kvm_facility(vcpu->kvm, 10)) {
841 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
842 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
845 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
846 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
847 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
848 start = kvm_s390_logical_to_effective(vcpu, start);
850 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
851 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
852 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
855 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
856 case 0x00000000:
857 /* only 4k frames specify a real address */
858 start = kvm_s390_real_to_abs(vcpu, start);
859 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
860 break;
861 case 0x00001000:
862 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
863 break;
864 case 0x00002000:
865 /* only support 2G frame size if EDAT2 is available and we are
866 not in 24-bit addressing mode */
867 if (!test_kvm_facility(vcpu->kvm, 78) ||
868 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
870 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
871 break;
872 default:
873 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
876 while (start != end) {
877 unsigned long useraddr;
879 /* Translate guest address to host address */
880 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
881 if (kvm_is_error_hva(useraddr))
882 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
884 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
885 if (clear_user((void __user *)useraddr, PAGE_SIZE))
886 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
889 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
890 int rc = __skey_check_enable(vcpu);
892 if (rc)
893 return rc;
894 down_read(&current->mm->mmap_sem);
895 rc = cond_set_guest_storage_key(current->mm, useraddr,
896 key, NULL, nq, mr, mc);
897 up_read(&current->mm->mmap_sem);
898 if (rc < 0)
899 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
902 start += PAGE_SIZE;
904 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
905 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) {
906 vcpu->run->s.regs.gprs[reg2] = end;
907 } else {
908 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
909 end = kvm_s390_logical_to_effective(vcpu, end);
910 vcpu->run->s.regs.gprs[reg2] |= end;
913 return 0;
916 static int handle_essa(struct kvm_vcpu *vcpu)
918 /* entries expected to be 1FF */
919 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
920 unsigned long *cbrlo;
921 struct gmap *gmap;
922 int i;
924 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
925 gmap = vcpu->arch.gmap;
926 vcpu->stat.instruction_essa++;
927 if (!vcpu->kvm->arch.use_cmma)
928 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
930 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
931 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
933 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
934 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
936 /* Retry the ESSA instruction */
937 kvm_s390_retry_instr(vcpu);
938 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
939 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
940 down_read(&gmap->mm->mmap_sem);
941 for (i = 0; i < entries; ++i)
942 __gmap_zap(gmap, cbrlo[i]);
943 up_read(&gmap->mm->mmap_sem);
944 return 0;
947 static const intercept_handler_t b9_handlers[256] = {
948 [0x8a] = handle_ipte_interlock,
949 [0x8d] = handle_epsw,
950 [0x8e] = handle_ipte_interlock,
951 [0x8f] = handle_ipte_interlock,
952 [0xab] = handle_essa,
953 [0xaf] = handle_pfmf,
956 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
958 intercept_handler_t handler;
960 /* This is handled just as for the B2 instructions. */
961 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
962 if (handler)
963 return handler(vcpu);
965 return -EOPNOTSUPP;
968 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
970 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
971 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
972 int reg, rc, nr_regs;
973 u32 ctl_array[16];
974 u64 ga;
975 u8 ar;
977 vcpu->stat.instruction_lctl++;
979 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
980 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
982 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
984 if (ga & 3)
985 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
987 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
988 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
990 nr_regs = ((reg3 - reg1) & 0xf) + 1;
991 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
992 if (rc)
993 return kvm_s390_inject_prog_cond(vcpu, rc);
994 reg = reg1;
995 nr_regs = 0;
996 do {
997 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
998 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
999 if (reg == reg3)
1000 break;
1001 reg = (reg + 1) % 16;
1002 } while (1);
1003 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1004 return 0;
1007 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1009 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1010 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1011 int reg, rc, nr_regs;
1012 u32 ctl_array[16];
1013 u64 ga;
1014 u8 ar;
1016 vcpu->stat.instruction_stctl++;
1018 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1019 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1021 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1023 if (ga & 3)
1024 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1026 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1027 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1029 reg = reg1;
1030 nr_regs = 0;
1031 do {
1032 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1033 if (reg == reg3)
1034 break;
1035 reg = (reg + 1) % 16;
1036 } while (1);
1037 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1038 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1041 static int handle_lctlg(struct kvm_vcpu *vcpu)
1043 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1044 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1045 int reg, rc, nr_regs;
1046 u64 ctl_array[16];
1047 u64 ga;
1048 u8 ar;
1050 vcpu->stat.instruction_lctlg++;
1052 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1053 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1055 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1057 if (ga & 7)
1058 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1060 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1061 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1063 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1064 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1065 if (rc)
1066 return kvm_s390_inject_prog_cond(vcpu, rc);
1067 reg = reg1;
1068 nr_regs = 0;
1069 do {
1070 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1071 if (reg == reg3)
1072 break;
1073 reg = (reg + 1) % 16;
1074 } while (1);
1075 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1076 return 0;
1079 static int handle_stctg(struct kvm_vcpu *vcpu)
1081 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1082 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1083 int reg, rc, nr_regs;
1084 u64 ctl_array[16];
1085 u64 ga;
1086 u8 ar;
1088 vcpu->stat.instruction_stctg++;
1090 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1091 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1093 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1095 if (ga & 7)
1096 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1098 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1099 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1101 reg = reg1;
1102 nr_regs = 0;
1103 do {
1104 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1105 if (reg == reg3)
1106 break;
1107 reg = (reg + 1) % 16;
1108 } while (1);
1109 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1110 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1113 static const intercept_handler_t eb_handlers[256] = {
1114 [0x2f] = handle_lctlg,
1115 [0x25] = handle_stctg,
1116 [0x60] = handle_ri,
1117 [0x61] = handle_ri,
1118 [0x62] = handle_ri,
1121 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1123 intercept_handler_t handler;
1125 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
1126 if (handler)
1127 return handler(vcpu);
1128 return -EOPNOTSUPP;
1131 static int handle_tprot(struct kvm_vcpu *vcpu)
1133 u64 address1, address2;
1134 unsigned long hva, gpa;
1135 int ret = 0, cc = 0;
1136 bool writable;
1137 u8 ar;
1139 vcpu->stat.instruction_tprot++;
1141 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1142 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1144 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1146 /* we only handle the Linux memory detection case:
1147 * access key == 0
1148 * everything else goes to userspace. */
1149 if (address2 & 0xf0)
1150 return -EOPNOTSUPP;
1151 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1152 ipte_lock(vcpu);
1153 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1154 if (ret == PGM_PROTECTION) {
1155 /* Write protected? Try again with read-only... */
1156 cc = 1;
1157 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1158 GACC_FETCH);
1160 if (ret) {
1161 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1162 ret = kvm_s390_inject_program_int(vcpu, ret);
1163 } else if (ret > 0) {
1164 /* Translation not available */
1165 kvm_s390_set_psw_cc(vcpu, 3);
1166 ret = 0;
1168 goto out_unlock;
1171 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1172 if (kvm_is_error_hva(hva)) {
1173 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1174 } else {
1175 if (!writable)
1176 cc = 1; /* Write not permitted ==> read-only */
1177 kvm_s390_set_psw_cc(vcpu, cc);
1178 /* Note: CC2 only occurs for storage keys (not supported yet) */
1180 out_unlock:
1181 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1182 ipte_unlock(vcpu);
1183 return ret;
1186 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1188 /* For e5xx... instructions we only handle TPROT */
1189 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
1190 return handle_tprot(vcpu);
1191 return -EOPNOTSUPP;
1194 static int handle_sckpf(struct kvm_vcpu *vcpu)
1196 u32 value;
1198 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1199 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1201 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1202 return kvm_s390_inject_program_int(vcpu,
1203 PGM_SPECIFICATION);
1205 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1206 vcpu->arch.sie_block->todpr = value;
1208 return 0;
1211 static int handle_ptff(struct kvm_vcpu *vcpu)
1213 /* we don't emulate any control instructions yet */
1214 kvm_s390_set_psw_cc(vcpu, 3);
1215 return 0;
1218 static const intercept_handler_t x01_handlers[256] = {
1219 [0x04] = handle_ptff,
1220 [0x07] = handle_sckpf,
1223 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1225 intercept_handler_t handler;
1227 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1228 if (handler)
1229 return handler(vcpu);
1230 return -EOPNOTSUPP;