2 * handling privileged instructions
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
27 #include <asm/ptrace.h>
28 #include <asm/compat.h>
33 /* Handle SCK (SET CLOCK) interception */
34 static int handle_set_clock(struct kvm_vcpu
*vcpu
)
36 struct kvm_vcpu
*cpup
;
41 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
42 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
44 op2
= kvm_s390_get_base_disp_s(vcpu
);
45 if (op2
& 7) /* Operand must be on a doubleword boundary */
46 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
47 rc
= read_guest(vcpu
, op2
, &val
, sizeof(val
));
49 return kvm_s390_inject_prog_cond(vcpu
, rc
);
51 if (store_tod_clock(&hostclk
)) {
52 kvm_s390_set_psw_cc(vcpu
, 3);
55 val
= (val
- hostclk
) & ~0x3fUL
;
57 mutex_lock(&vcpu
->kvm
->lock
);
58 kvm_for_each_vcpu(i
, cpup
, vcpu
->kvm
)
59 cpup
->arch
.sie_block
->epoch
= val
;
60 mutex_unlock(&vcpu
->kvm
->lock
);
62 kvm_s390_set_psw_cc(vcpu
, 0);
66 static int handle_set_prefix(struct kvm_vcpu
*vcpu
)
72 vcpu
->stat
.instruction_spx
++;
74 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
75 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
77 operand2
= kvm_s390_get_base_disp_s(vcpu
);
79 /* must be word boundary */
81 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
84 rc
= read_guest(vcpu
, operand2
, &address
, sizeof(address
));
86 return kvm_s390_inject_prog_cond(vcpu
, rc
);
88 address
&= 0x7fffe000u
;
91 * Make sure the new value is valid memory. We only need to check the
92 * first page, since address is 8k aligned and memory pieces are always
93 * at least 1MB aligned and have at least a size of 1MB.
95 if (kvm_is_error_gpa(vcpu
->kvm
, address
))
96 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
98 kvm_s390_set_prefix(vcpu
, address
);
100 VCPU_EVENT(vcpu
, 5, "setting prefix to %x", address
);
101 trace_kvm_s390_handle_prefix(vcpu
, 1, address
);
105 static int handle_store_prefix(struct kvm_vcpu
*vcpu
)
111 vcpu
->stat
.instruction_stpx
++;
113 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
114 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
116 operand2
= kvm_s390_get_base_disp_s(vcpu
);
118 /* must be word boundary */
120 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
122 address
= kvm_s390_get_prefix(vcpu
);
125 rc
= write_guest(vcpu
, operand2
, &address
, sizeof(address
));
127 return kvm_s390_inject_prog_cond(vcpu
, rc
);
129 VCPU_EVENT(vcpu
, 5, "storing prefix to %x", address
);
130 trace_kvm_s390_handle_prefix(vcpu
, 0, address
);
134 static int handle_store_cpu_address(struct kvm_vcpu
*vcpu
)
136 u16 vcpu_id
= vcpu
->vcpu_id
;
140 vcpu
->stat
.instruction_stap
++;
142 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
143 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
145 ga
= kvm_s390_get_base_disp_s(vcpu
);
148 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
150 rc
= write_guest(vcpu
, ga
, &vcpu_id
, sizeof(vcpu_id
));
152 return kvm_s390_inject_prog_cond(vcpu
, rc
);
154 VCPU_EVENT(vcpu
, 5, "storing cpu address to %llx", ga
);
155 trace_kvm_s390_handle_stap(vcpu
, ga
);
159 static void __skey_check_enable(struct kvm_vcpu
*vcpu
)
161 if (!(vcpu
->arch
.sie_block
->ictl
& (ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
)))
165 trace_kvm_s390_skey_related_inst(vcpu
);
166 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
);
170 static int handle_skey(struct kvm_vcpu
*vcpu
)
172 __skey_check_enable(vcpu
);
174 vcpu
->stat
.instruction_storage_key
++;
176 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
177 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
179 vcpu
->arch
.sie_block
->gpsw
.addr
=
180 __rewind_psw(vcpu
->arch
.sie_block
->gpsw
, 4);
181 VCPU_EVENT(vcpu
, 4, "%s", "retrying storage key operation");
185 static int handle_ipte_interlock(struct kvm_vcpu
*vcpu
)
187 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
189 vcpu
->stat
.instruction_ipte_interlock
++;
190 if (psw_bits(*psw
).p
)
191 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
192 wait_event(vcpu
->kvm
->arch
.ipte_wq
, !ipte_lock_held(vcpu
));
193 psw
->addr
= __rewind_psw(*psw
, 4);
194 VCPU_EVENT(vcpu
, 4, "%s", "retrying ipte interlock operation");
198 static int handle_test_block(struct kvm_vcpu
*vcpu
)
203 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
204 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
206 kvm_s390_get_regs_rre(vcpu
, NULL
, ®2
);
207 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
208 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
209 if (kvm_s390_check_low_addr_protection(vcpu
, addr
))
210 return kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
211 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
213 if (kvm_is_error_gpa(vcpu
->kvm
, addr
))
214 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
216 * We don't expect errors on modern systems, and do not care
217 * about storage keys (yet), so let's just clear the page.
219 if (kvm_clear_guest(vcpu
->kvm
, addr
, PAGE_SIZE
))
221 kvm_s390_set_psw_cc(vcpu
, 0);
222 vcpu
->run
->s
.regs
.gprs
[0] = 0;
226 static int handle_tpi(struct kvm_vcpu
*vcpu
)
228 struct kvm_s390_interrupt_info
*inti
;
235 addr
= kvm_s390_get_base_disp_s(vcpu
);
237 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
239 inti
= kvm_s390_get_io_int(vcpu
->kvm
, vcpu
->arch
.sie_block
->gcr
[6], 0);
243 tpi_data
[0] = inti
->io
.subchannel_id
<< 16 | inti
->io
.subchannel_nr
;
244 tpi_data
[1] = inti
->io
.io_int_parm
;
245 tpi_data
[2] = inti
->io
.io_int_word
;
248 * Store the two-word I/O interruption code into the
251 len
= sizeof(tpi_data
) - 4;
252 rc
= write_guest(vcpu
, addr
, &tpi_data
, len
);
254 return kvm_s390_inject_prog_cond(vcpu
, rc
);
257 * Store the three-word I/O interruption code into
258 * the appropriate lowcore area.
260 len
= sizeof(tpi_data
);
261 if (write_guest_lc(vcpu
, __LC_SUBCHANNEL_ID
, &tpi_data
, len
))
265 * If we encounter a problem storing the interruption code, the
266 * instruction is suppressed from the guest's view: reinject the
272 kvm_s390_reinject_io_int(vcpu
->kvm
, inti
);
274 /* Set condition code and we're done. */
276 kvm_s390_set_psw_cc(vcpu
, cc
);
277 return rc
? -EFAULT
: 0;
280 static int handle_tsch(struct kvm_vcpu
*vcpu
)
282 struct kvm_s390_interrupt_info
*inti
;
284 inti
= kvm_s390_get_io_int(vcpu
->kvm
, 0,
285 vcpu
->run
->s
.regs
.gprs
[1]);
288 * Prepare exit to userspace.
289 * We indicate whether we dequeued a pending I/O interrupt
290 * so that userspace can re-inject it if the instruction gets
291 * a program check. While this may re-order the pending I/O
292 * interrupts, this is no problem since the priority is kept
295 vcpu
->run
->exit_reason
= KVM_EXIT_S390_TSCH
;
296 vcpu
->run
->s390_tsch
.dequeued
= !!inti
;
298 vcpu
->run
->s390_tsch
.subchannel_id
= inti
->io
.subchannel_id
;
299 vcpu
->run
->s390_tsch
.subchannel_nr
= inti
->io
.subchannel_nr
;
300 vcpu
->run
->s390_tsch
.io_int_parm
= inti
->io
.io_int_parm
;
301 vcpu
->run
->s390_tsch
.io_int_word
= inti
->io
.io_int_word
;
303 vcpu
->run
->s390_tsch
.ipb
= vcpu
->arch
.sie_block
->ipb
;
308 static int handle_io_inst(struct kvm_vcpu
*vcpu
)
310 VCPU_EVENT(vcpu
, 4, "%s", "I/O instruction");
312 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
313 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
315 if (vcpu
->kvm
->arch
.css_support
) {
317 * Most I/O instructions will be handled by userspace.
318 * Exceptions are tpi and the interrupt portion of tsch.
320 if (vcpu
->arch
.sie_block
->ipa
== 0xb236)
321 return handle_tpi(vcpu
);
322 if (vcpu
->arch
.sie_block
->ipa
== 0xb235)
323 return handle_tsch(vcpu
);
324 /* Handle in userspace. */
328 * Set condition code 3 to stop the guest from issuing channel
331 kvm_s390_set_psw_cc(vcpu
, 3);
336 static int handle_stfl(struct kvm_vcpu
*vcpu
)
340 vcpu
->stat
.instruction_stfl
++;
342 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
343 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
345 rc
= write_guest_lc(vcpu
, offsetof(struct _lowcore
, stfl_fac_list
),
349 VCPU_EVENT(vcpu
, 5, "store facility list value %x",
350 *(unsigned int *) vfacilities
);
351 trace_kvm_s390_handle_stfl(vcpu
, *(unsigned int *) vfacilities
);
355 static void handle_new_psw(struct kvm_vcpu
*vcpu
)
357 /* Check whether the new psw is enabled for machine checks. */
358 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
)
359 kvm_s390_deliver_pending_machine_checks(vcpu
);
362 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
363 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
364 #define PSW_ADDR_24 0x0000000000ffffffUL
365 #define PSW_ADDR_31 0x000000007fffffffUL
367 int is_valid_psw(psw_t
*psw
)
369 if (psw
->mask
& PSW_MASK_UNASSIGNED
)
371 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_BA
) {
372 if (psw
->addr
& ~PSW_ADDR_31
)
375 if (!(psw
->mask
& PSW_MASK_ADDR_MODE
) && (psw
->addr
& ~PSW_ADDR_24
))
377 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_EA
)
384 int kvm_s390_handle_lpsw(struct kvm_vcpu
*vcpu
)
386 psw_t
*gpsw
= &vcpu
->arch
.sie_block
->gpsw
;
387 psw_compat_t new_psw
;
391 if (gpsw
->mask
& PSW_MASK_PSTATE
)
392 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
394 addr
= kvm_s390_get_base_disp_s(vcpu
);
396 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
398 rc
= read_guest(vcpu
, addr
, &new_psw
, sizeof(new_psw
));
400 return kvm_s390_inject_prog_cond(vcpu
, rc
);
401 if (!(new_psw
.mask
& PSW32_MASK_BASE
))
402 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
403 gpsw
->mask
= (new_psw
.mask
& ~PSW32_MASK_BASE
) << 32;
404 gpsw
->mask
|= new_psw
.addr
& PSW32_ADDR_AMODE
;
405 gpsw
->addr
= new_psw
.addr
& ~PSW32_ADDR_AMODE
;
406 if (!is_valid_psw(gpsw
))
407 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
408 handle_new_psw(vcpu
);
412 static int handle_lpswe(struct kvm_vcpu
*vcpu
)
418 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
419 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
421 addr
= kvm_s390_get_base_disp_s(vcpu
);
423 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
424 rc
= read_guest(vcpu
, addr
, &new_psw
, sizeof(new_psw
));
426 return kvm_s390_inject_prog_cond(vcpu
, rc
);
427 vcpu
->arch
.sie_block
->gpsw
= new_psw
;
428 if (!is_valid_psw(&vcpu
->arch
.sie_block
->gpsw
))
429 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
430 handle_new_psw(vcpu
);
434 static int handle_stidp(struct kvm_vcpu
*vcpu
)
436 u64 stidp_data
= vcpu
->arch
.stidp_data
;
440 vcpu
->stat
.instruction_stidp
++;
442 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
443 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
445 operand2
= kvm_s390_get_base_disp_s(vcpu
);
448 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
450 rc
= write_guest(vcpu
, operand2
, &stidp_data
, sizeof(stidp_data
));
452 return kvm_s390_inject_prog_cond(vcpu
, rc
);
454 VCPU_EVENT(vcpu
, 5, "%s", "store cpu id");
458 static void handle_stsi_3_2_2(struct kvm_vcpu
*vcpu
, struct sysinfo_3_2_2
*mem
)
463 cpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
465 /* deal with other level 3 hypervisors */
466 if (stsi(mem
, 3, 2, 2))
470 for (n
= mem
->count
- 1; n
> 0 ; n
--)
471 memcpy(&mem
->vm
[n
], &mem
->vm
[n
- 1], sizeof(mem
->vm
[0]));
473 mem
->vm
[0].cpus_total
= cpus
;
474 mem
->vm
[0].cpus_configured
= cpus
;
475 mem
->vm
[0].cpus_standby
= 0;
476 mem
->vm
[0].cpus_reserved
= 0;
477 mem
->vm
[0].caf
= 1000;
478 memcpy(mem
->vm
[0].name
, "KVMguest", 8);
479 ASCEBC(mem
->vm
[0].name
, 8);
480 memcpy(mem
->vm
[0].cpi
, "KVM/Linux ", 16);
481 ASCEBC(mem
->vm
[0].cpi
, 16);
484 static int handle_stsi(struct kvm_vcpu
*vcpu
)
486 int fc
= (vcpu
->run
->s
.regs
.gprs
[0] & 0xf0000000) >> 28;
487 int sel1
= vcpu
->run
->s
.regs
.gprs
[0] & 0xff;
488 int sel2
= vcpu
->run
->s
.regs
.gprs
[1] & 0xffff;
489 unsigned long mem
= 0;
493 vcpu
->stat
.instruction_stsi
++;
494 VCPU_EVENT(vcpu
, 4, "stsi: fc: %x sel1: %x sel2: %x", fc
, sel1
, sel2
);
496 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
497 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
500 kvm_s390_set_psw_cc(vcpu
, 3);
504 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x0fffff00
505 || vcpu
->run
->s
.regs
.gprs
[1] & 0xffff0000)
506 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
509 vcpu
->run
->s
.regs
.gprs
[0] = 3 << 28;
510 kvm_s390_set_psw_cc(vcpu
, 0);
514 operand2
= kvm_s390_get_base_disp_s(vcpu
);
516 if (operand2
& 0xfff)
517 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
520 case 1: /* same handling for 1 and 2 */
522 mem
= get_zeroed_page(GFP_KERNEL
);
525 if (stsi((void *) mem
, fc
, sel1
, sel2
))
529 if (sel1
!= 2 || sel2
!= 2)
531 mem
= get_zeroed_page(GFP_KERNEL
);
534 handle_stsi_3_2_2(vcpu
, (void *) mem
);
538 rc
= write_guest(vcpu
, operand2
, (void *)mem
, PAGE_SIZE
);
540 rc
= kvm_s390_inject_prog_cond(vcpu
, rc
);
543 trace_kvm_s390_handle_stsi(vcpu
, fc
, sel1
, sel2
, operand2
);
545 kvm_s390_set_psw_cc(vcpu
, 0);
546 vcpu
->run
->s
.regs
.gprs
[0] = 0;
549 kvm_s390_set_psw_cc(vcpu
, 3);
555 static const intercept_handler_t b2_handlers
[256] = {
556 [0x02] = handle_stidp
,
557 [0x04] = handle_set_clock
,
558 [0x10] = handle_set_prefix
,
559 [0x11] = handle_store_prefix
,
560 [0x12] = handle_store_cpu_address
,
561 [0x21] = handle_ipte_interlock
,
562 [0x29] = handle_skey
,
563 [0x2a] = handle_skey
,
564 [0x2b] = handle_skey
,
565 [0x2c] = handle_test_block
,
566 [0x30] = handle_io_inst
,
567 [0x31] = handle_io_inst
,
568 [0x32] = handle_io_inst
,
569 [0x33] = handle_io_inst
,
570 [0x34] = handle_io_inst
,
571 [0x35] = handle_io_inst
,
572 [0x36] = handle_io_inst
,
573 [0x37] = handle_io_inst
,
574 [0x38] = handle_io_inst
,
575 [0x39] = handle_io_inst
,
576 [0x3a] = handle_io_inst
,
577 [0x3b] = handle_io_inst
,
578 [0x3c] = handle_io_inst
,
579 [0x50] = handle_ipte_interlock
,
580 [0x5f] = handle_io_inst
,
581 [0x74] = handle_io_inst
,
582 [0x76] = handle_io_inst
,
583 [0x7d] = handle_stsi
,
584 [0xb1] = handle_stfl
,
585 [0xb2] = handle_lpswe
,
588 int kvm_s390_handle_b2(struct kvm_vcpu
*vcpu
)
590 intercept_handler_t handler
;
593 * A lot of B2 instructions are priviledged. Here we check for
594 * the privileged ones, that we can handle in the kernel.
595 * Anything else goes to userspace.
597 handler
= b2_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
599 return handler(vcpu
);
604 static int handle_epsw(struct kvm_vcpu
*vcpu
)
608 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
610 /* This basically extracts the mask half of the psw. */
611 vcpu
->run
->s
.regs
.gprs
[reg1
] &= 0xffffffff00000000UL
;
612 vcpu
->run
->s
.regs
.gprs
[reg1
] |= vcpu
->arch
.sie_block
->gpsw
.mask
>> 32;
614 vcpu
->run
->s
.regs
.gprs
[reg2
] &= 0xffffffff00000000UL
;
615 vcpu
->run
->s
.regs
.gprs
[reg2
] |=
616 vcpu
->arch
.sie_block
->gpsw
.mask
& 0x00000000ffffffffUL
;
621 #define PFMF_RESERVED 0xfffc0101UL
622 #define PFMF_SK 0x00020000UL
623 #define PFMF_CF 0x00010000UL
624 #define PFMF_UI 0x00008000UL
625 #define PFMF_FSC 0x00007000UL
626 #define PFMF_NQ 0x00000800UL
627 #define PFMF_MR 0x00000400UL
628 #define PFMF_MC 0x00000200UL
629 #define PFMF_KEY 0x000000feUL
631 static int handle_pfmf(struct kvm_vcpu
*vcpu
)
634 unsigned long start
, end
;
636 vcpu
->stat
.instruction_pfmf
++;
638 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
640 if (!MACHINE_HAS_PFMF
)
641 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
643 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
644 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
646 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_RESERVED
)
647 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
649 /* Only provide non-quiescing support if the host supports it */
650 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
&& !test_facility(14))
651 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
653 /* No support for conditional-SSKE */
654 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & (PFMF_MR
| PFMF_MC
))
655 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
657 start
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
658 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
659 if (kvm_s390_check_low_addr_protection(vcpu
, start
))
660 return kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
663 switch (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) {
665 end
= (start
+ (1UL << 12)) & ~((1UL << 12) - 1);
668 end
= (start
+ (1UL << 20)) & ~((1UL << 20) - 1);
670 /* We dont support EDAT2
672 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
675 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
677 while (start
< end
) {
678 unsigned long useraddr
, abs_addr
;
680 /* Translate guest address to host address */
681 if ((vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) == 0)
682 abs_addr
= kvm_s390_real_to_abs(vcpu
, start
);
685 useraddr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(abs_addr
));
686 if (kvm_is_error_hva(useraddr
))
687 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
689 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
690 if (clear_user((void __user
*)useraddr
, PAGE_SIZE
))
691 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
694 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_SK
) {
695 __skey_check_enable(vcpu
);
696 if (set_guest_storage_key(current
->mm
, useraddr
,
697 vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_KEY
,
698 vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
))
699 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
704 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
)
705 vcpu
->run
->s
.regs
.gprs
[reg2
] = end
;
709 static int handle_essa(struct kvm_vcpu
*vcpu
)
711 /* entries expected to be 1FF */
712 int entries
= (vcpu
->arch
.sie_block
->cbrlo
& ~PAGE_MASK
) >> 3;
713 unsigned long *cbrlo
, cbrle
;
717 VCPU_EVENT(vcpu
, 5, "cmma release %d pages", entries
);
718 gmap
= vcpu
->arch
.gmap
;
719 vcpu
->stat
.instruction_essa
++;
720 if (!kvm_s390_cmma_enabled(vcpu
->kvm
))
721 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
723 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
724 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
726 if (((vcpu
->arch
.sie_block
->ipb
& 0xf0000000) >> 28) > 6)
727 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
729 /* Rewind PSW to repeat the ESSA instruction */
730 vcpu
->arch
.sie_block
->gpsw
.addr
=
731 __rewind_psw(vcpu
->arch
.sie_block
->gpsw
, 4);
732 vcpu
->arch
.sie_block
->cbrlo
&= PAGE_MASK
; /* reset nceo */
733 cbrlo
= phys_to_virt(vcpu
->arch
.sie_block
->cbrlo
);
734 down_read(&gmap
->mm
->mmap_sem
);
735 for (i
= 0; i
< entries
; ++i
) {
737 if (unlikely(cbrle
& ~PAGE_MASK
|| cbrle
< 2 * PAGE_SIZE
))
740 /* try to free backing */
741 __gmap_zap(cbrle
, gmap
);
743 up_read(&gmap
->mm
->mmap_sem
);
745 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
749 static const intercept_handler_t b9_handlers
[256] = {
750 [0x8a] = handle_ipte_interlock
,
751 [0x8d] = handle_epsw
,
752 [0x8e] = handle_ipte_interlock
,
753 [0x8f] = handle_ipte_interlock
,
754 [0xab] = handle_essa
,
755 [0xaf] = handle_pfmf
,
758 int kvm_s390_handle_b9(struct kvm_vcpu
*vcpu
)
760 intercept_handler_t handler
;
762 /* This is handled just as for the B2 instructions. */
763 handler
= b9_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
765 return handler(vcpu
);
770 int kvm_s390_handle_lctl(struct kvm_vcpu
*vcpu
)
772 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
773 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
778 vcpu
->stat
.instruction_lctl
++;
780 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
781 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
783 ga
= kvm_s390_get_base_disp_rs(vcpu
);
786 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
788 VCPU_EVENT(vcpu
, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1
, reg3
, ga
);
789 trace_kvm_s390_handle_lctl(vcpu
, 0, reg1
, reg3
, ga
);
793 rc
= read_guest(vcpu
, ga
, &val
, sizeof(val
));
795 return kvm_s390_inject_prog_cond(vcpu
, rc
);
796 vcpu
->arch
.sie_block
->gcr
[reg
] &= 0xffffffff00000000ul
;
797 vcpu
->arch
.sie_block
->gcr
[reg
] |= val
;
801 reg
= (reg
+ 1) % 16;
807 int kvm_s390_handle_stctl(struct kvm_vcpu
*vcpu
)
809 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
810 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
815 vcpu
->stat
.instruction_stctl
++;
817 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
818 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
820 ga
= kvm_s390_get_base_disp_rs(vcpu
);
823 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
825 VCPU_EVENT(vcpu
, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1
, reg3
, ga
);
826 trace_kvm_s390_handle_stctl(vcpu
, 0, reg1
, reg3
, ga
);
830 val
= vcpu
->arch
.sie_block
->gcr
[reg
] & 0x00000000fffffffful
;
831 rc
= write_guest(vcpu
, ga
, &val
, sizeof(val
));
833 return kvm_s390_inject_prog_cond(vcpu
, rc
);
837 reg
= (reg
+ 1) % 16;
843 static int handle_lctlg(struct kvm_vcpu
*vcpu
)
845 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
846 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
850 vcpu
->stat
.instruction_lctlg
++;
852 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
853 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
855 ga
= kvm_s390_get_base_disp_rsy(vcpu
);
858 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
862 VCPU_EVENT(vcpu
, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1
, reg3
, ga
);
863 trace_kvm_s390_handle_lctl(vcpu
, 1, reg1
, reg3
, ga
);
866 rc
= read_guest(vcpu
, ga
, &val
, sizeof(val
));
868 return kvm_s390_inject_prog_cond(vcpu
, rc
);
869 vcpu
->arch
.sie_block
->gcr
[reg
] = val
;
873 reg
= (reg
+ 1) % 16;
879 static int handle_stctg(struct kvm_vcpu
*vcpu
)
881 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
882 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
886 vcpu
->stat
.instruction_stctg
++;
888 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
889 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
891 ga
= kvm_s390_get_base_disp_rsy(vcpu
);
894 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
898 VCPU_EVENT(vcpu
, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1
, reg3
, ga
);
899 trace_kvm_s390_handle_stctl(vcpu
, 1, reg1
, reg3
, ga
);
902 val
= vcpu
->arch
.sie_block
->gcr
[reg
];
903 rc
= write_guest(vcpu
, ga
, &val
, sizeof(val
));
905 return kvm_s390_inject_prog_cond(vcpu
, rc
);
909 reg
= (reg
+ 1) % 16;
915 static const intercept_handler_t eb_handlers
[256] = {
916 [0x2f] = handle_lctlg
,
917 [0x25] = handle_stctg
,
920 int kvm_s390_handle_eb(struct kvm_vcpu
*vcpu
)
922 intercept_handler_t handler
;
924 handler
= eb_handlers
[vcpu
->arch
.sie_block
->ipb
& 0xff];
926 return handler(vcpu
);
930 static int handle_tprot(struct kvm_vcpu
*vcpu
)
932 u64 address1
, address2
;
933 unsigned long hva
, gpa
;
937 vcpu
->stat
.instruction_tprot
++;
939 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
940 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
942 kvm_s390_get_base_disp_sse(vcpu
, &address1
, &address2
);
944 /* we only handle the Linux memory detection case:
946 * everything else goes to userspace. */
949 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
951 ret
= guest_translate_address(vcpu
, address1
, &gpa
, 1);
952 if (ret
== PGM_PROTECTION
) {
953 /* Write protected? Try again with read-only... */
955 ret
= guest_translate_address(vcpu
, address1
, &gpa
, 0);
958 if (ret
== PGM_ADDRESSING
|| ret
== PGM_TRANSLATION_SPEC
) {
959 ret
= kvm_s390_inject_program_int(vcpu
, ret
);
960 } else if (ret
> 0) {
961 /* Translation not available */
962 kvm_s390_set_psw_cc(vcpu
, 3);
968 hva
= gfn_to_hva_prot(vcpu
->kvm
, gpa_to_gfn(gpa
), &writable
);
969 if (kvm_is_error_hva(hva
)) {
970 ret
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
973 cc
= 1; /* Write not permitted ==> read-only */
974 kvm_s390_set_psw_cc(vcpu
, cc
);
975 /* Note: CC2 only occurs for storage keys (not supported yet) */
978 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
983 int kvm_s390_handle_e5(struct kvm_vcpu
*vcpu
)
985 /* For e5xx... instructions we only handle TPROT */
986 if ((vcpu
->arch
.sie_block
->ipa
& 0x00ff) == 0x01)
987 return handle_tprot(vcpu
);
991 static int handle_sckpf(struct kvm_vcpu
*vcpu
)
995 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
996 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
998 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x00000000ffff0000)
999 return kvm_s390_inject_program_int(vcpu
,
1002 value
= vcpu
->run
->s
.regs
.gprs
[0] & 0x000000000000ffff;
1003 vcpu
->arch
.sie_block
->todpr
= value
;
1008 static const intercept_handler_t x01_handlers
[256] = {
1009 [0x07] = handle_sckpf
,
1012 int kvm_s390_handle_01(struct kvm_vcpu
*vcpu
)
1014 intercept_handler_t handler
;
1016 handler
= x01_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
1018 return handler(vcpu
);