2 * handling privileged instructions
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
27 #include <asm/ptrace.h>
28 #include <asm/compat.h>
33 static int handle_set_prefix(struct kvm_vcpu
*vcpu
)
39 vcpu
->stat
.instruction_spx
++;
41 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
42 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
44 operand2
= kvm_s390_get_base_disp_s(vcpu
);
46 /* must be word boundary */
48 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
51 if (get_guest(vcpu
, address
, (u32 __user
*) operand2
))
52 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
54 address
= address
& 0x7fffe000u
;
56 /* make sure that the new value is valid memory */
57 if (copy_from_guest_absolute(vcpu
, &tmp
, address
, 1) ||
58 (copy_from_guest_absolute(vcpu
, &tmp
, address
+ PAGE_SIZE
, 1)))
59 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
61 kvm_s390_set_prefix(vcpu
, address
);
63 VCPU_EVENT(vcpu
, 5, "setting prefix to %x", address
);
64 trace_kvm_s390_handle_prefix(vcpu
, 1, address
);
68 static int handle_store_prefix(struct kvm_vcpu
*vcpu
)
73 vcpu
->stat
.instruction_stpx
++;
75 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
76 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
78 operand2
= kvm_s390_get_base_disp_s(vcpu
);
80 /* must be word boundary */
82 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
84 address
= vcpu
->arch
.sie_block
->prefix
;
85 address
= address
& 0x7fffe000u
;
88 if (put_guest(vcpu
, address
, (u32 __user
*)operand2
))
89 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
91 VCPU_EVENT(vcpu
, 5, "storing prefix to %x", address
);
92 trace_kvm_s390_handle_prefix(vcpu
, 0, address
);
96 static int handle_store_cpu_address(struct kvm_vcpu
*vcpu
)
100 vcpu
->stat
.instruction_stap
++;
102 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
103 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
105 useraddr
= kvm_s390_get_base_disp_s(vcpu
);
108 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
110 if (put_guest(vcpu
, vcpu
->vcpu_id
, (u16 __user
*)useraddr
))
111 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
113 VCPU_EVENT(vcpu
, 5, "storing cpu address to %llx", useraddr
);
114 trace_kvm_s390_handle_stap(vcpu
, useraddr
);
118 static int handle_skey(struct kvm_vcpu
*vcpu
)
120 vcpu
->stat
.instruction_storage_key
++;
122 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
123 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
125 vcpu
->arch
.sie_block
->gpsw
.addr
=
126 __rewind_psw(vcpu
->arch
.sie_block
->gpsw
, 4);
127 VCPU_EVENT(vcpu
, 4, "%s", "retrying storage key operation");
131 static int handle_tpi(struct kvm_vcpu
*vcpu
)
133 struct kvm_s390_interrupt_info
*inti
;
137 addr
= kvm_s390_get_base_disp_s(vcpu
);
139 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
141 inti
= kvm_s390_get_io_int(vcpu
->kvm
, vcpu
->run
->s
.regs
.crs
[6], 0);
147 * Store the two-word I/O interruption code into the
150 if (put_guest(vcpu
, inti
->io
.subchannel_id
, (u16 __user
*)addr
)
151 || put_guest(vcpu
, inti
->io
.subchannel_nr
, (u16 __user
*)(addr
+ 2))
152 || put_guest(vcpu
, inti
->io
.io_int_parm
, (u32 __user
*)(addr
+ 4)))
153 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
156 * Store the three-word I/O interruption code into
157 * the appropriate lowcore area.
159 put_guest(vcpu
, inti
->io
.subchannel_id
, (u16 __user
*) __LC_SUBCHANNEL_ID
);
160 put_guest(vcpu
, inti
->io
.subchannel_nr
, (u16 __user
*) __LC_SUBCHANNEL_NR
);
161 put_guest(vcpu
, inti
->io
.io_int_parm
, (u32 __user
*) __LC_IO_INT_PARM
);
162 put_guest(vcpu
, inti
->io
.io_int_word
, (u32 __user
*) __LC_IO_INT_WORD
);
166 /* Set condition code and we're done. */
167 kvm_s390_set_psw_cc(vcpu
, cc
);
171 static int handle_tsch(struct kvm_vcpu
*vcpu
)
173 struct kvm_s390_interrupt_info
*inti
;
175 inti
= kvm_s390_get_io_int(vcpu
->kvm
, 0,
176 vcpu
->run
->s
.regs
.gprs
[1]);
179 * Prepare exit to userspace.
180 * We indicate whether we dequeued a pending I/O interrupt
181 * so that userspace can re-inject it if the instruction gets
182 * a program check. While this may re-order the pending I/O
183 * interrupts, this is no problem since the priority is kept
186 vcpu
->run
->exit_reason
= KVM_EXIT_S390_TSCH
;
187 vcpu
->run
->s390_tsch
.dequeued
= !!inti
;
189 vcpu
->run
->s390_tsch
.subchannel_id
= inti
->io
.subchannel_id
;
190 vcpu
->run
->s390_tsch
.subchannel_nr
= inti
->io
.subchannel_nr
;
191 vcpu
->run
->s390_tsch
.io_int_parm
= inti
->io
.io_int_parm
;
192 vcpu
->run
->s390_tsch
.io_int_word
= inti
->io
.io_int_word
;
194 vcpu
->run
->s390_tsch
.ipb
= vcpu
->arch
.sie_block
->ipb
;
199 static int handle_io_inst(struct kvm_vcpu
*vcpu
)
201 VCPU_EVENT(vcpu
, 4, "%s", "I/O instruction");
203 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
204 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
206 if (vcpu
->kvm
->arch
.css_support
) {
208 * Most I/O instructions will be handled by userspace.
209 * Exceptions are tpi and the interrupt portion of tsch.
211 if (vcpu
->arch
.sie_block
->ipa
== 0xb236)
212 return handle_tpi(vcpu
);
213 if (vcpu
->arch
.sie_block
->ipa
== 0xb235)
214 return handle_tsch(vcpu
);
215 /* Handle in userspace. */
219 * Set condition code 3 to stop the guest from issueing channel
222 kvm_s390_set_psw_cc(vcpu
, 3);
227 static int handle_stfl(struct kvm_vcpu
*vcpu
)
231 vcpu
->stat
.instruction_stfl
++;
233 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
234 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
236 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
, stfl_fac_list
),
239 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
240 VCPU_EVENT(vcpu
, 5, "store facility list value %x",
241 *(unsigned int *) vfacilities
);
242 trace_kvm_s390_handle_stfl(vcpu
, *(unsigned int *) vfacilities
);
246 static void handle_new_psw(struct kvm_vcpu
*vcpu
)
248 /* Check whether the new psw is enabled for machine checks. */
249 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
)
250 kvm_s390_deliver_pending_machine_checks(vcpu
);
253 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
254 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
255 #define PSW_ADDR_24 0x0000000000ffffffUL
256 #define PSW_ADDR_31 0x000000007fffffffUL
258 static int is_valid_psw(psw_t
*psw
) {
259 if (psw
->mask
& PSW_MASK_UNASSIGNED
)
261 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_BA
) {
262 if (psw
->addr
& ~PSW_ADDR_31
)
265 if (!(psw
->mask
& PSW_MASK_ADDR_MODE
) && (psw
->addr
& ~PSW_ADDR_24
))
267 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_EA
)
272 int kvm_s390_handle_lpsw(struct kvm_vcpu
*vcpu
)
274 psw_t
*gpsw
= &vcpu
->arch
.sie_block
->gpsw
;
275 psw_compat_t new_psw
;
278 if (gpsw
->mask
& PSW_MASK_PSTATE
)
279 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
281 addr
= kvm_s390_get_base_disp_s(vcpu
);
283 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
284 if (copy_from_guest(vcpu
, &new_psw
, addr
, sizeof(new_psw
)))
285 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
286 if (!(new_psw
.mask
& PSW32_MASK_BASE
))
287 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
288 gpsw
->mask
= (new_psw
.mask
& ~PSW32_MASK_BASE
) << 32;
289 gpsw
->mask
|= new_psw
.addr
& PSW32_ADDR_AMODE
;
290 gpsw
->addr
= new_psw
.addr
& ~PSW32_ADDR_AMODE
;
291 if (!is_valid_psw(gpsw
))
292 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
293 handle_new_psw(vcpu
);
297 static int handle_lpswe(struct kvm_vcpu
*vcpu
)
302 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
303 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
305 addr
= kvm_s390_get_base_disp_s(vcpu
);
307 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
308 if (copy_from_guest(vcpu
, &new_psw
, addr
, sizeof(new_psw
)))
309 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
310 vcpu
->arch
.sie_block
->gpsw
= new_psw
;
311 if (!is_valid_psw(&vcpu
->arch
.sie_block
->gpsw
))
312 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
313 handle_new_psw(vcpu
);
317 static int handle_stidp(struct kvm_vcpu
*vcpu
)
321 vcpu
->stat
.instruction_stidp
++;
323 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
324 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
326 operand2
= kvm_s390_get_base_disp_s(vcpu
);
329 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
331 if (put_guest(vcpu
, vcpu
->arch
.stidp_data
, (u64 __user
*)operand2
))
332 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
334 VCPU_EVENT(vcpu
, 5, "%s", "store cpu id");
338 static void handle_stsi_3_2_2(struct kvm_vcpu
*vcpu
, struct sysinfo_3_2_2
*mem
)
340 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
344 spin_lock(&fi
->lock
);
345 for (n
= 0; n
< KVM_MAX_VCPUS
; n
++)
346 if (fi
->local_int
[n
])
348 spin_unlock(&fi
->lock
);
350 /* deal with other level 3 hypervisors */
351 if (stsi(mem
, 3, 2, 2))
355 for (n
= mem
->count
- 1; n
> 0 ; n
--)
356 memcpy(&mem
->vm
[n
], &mem
->vm
[n
- 1], sizeof(mem
->vm
[0]));
358 mem
->vm
[0].cpus_total
= cpus
;
359 mem
->vm
[0].cpus_configured
= cpus
;
360 mem
->vm
[0].cpus_standby
= 0;
361 mem
->vm
[0].cpus_reserved
= 0;
362 mem
->vm
[0].caf
= 1000;
363 memcpy(mem
->vm
[0].name
, "KVMguest", 8);
364 ASCEBC(mem
->vm
[0].name
, 8);
365 memcpy(mem
->vm
[0].cpi
, "KVM/Linux ", 16);
366 ASCEBC(mem
->vm
[0].cpi
, 16);
369 static int handle_stsi(struct kvm_vcpu
*vcpu
)
371 int fc
= (vcpu
->run
->s
.regs
.gprs
[0] & 0xf0000000) >> 28;
372 int sel1
= vcpu
->run
->s
.regs
.gprs
[0] & 0xff;
373 int sel2
= vcpu
->run
->s
.regs
.gprs
[1] & 0xffff;
374 unsigned long mem
= 0;
378 vcpu
->stat
.instruction_stsi
++;
379 VCPU_EVENT(vcpu
, 4, "stsi: fc: %x sel1: %x sel2: %x", fc
, sel1
, sel2
);
381 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
382 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
385 kvm_s390_set_psw_cc(vcpu
, 3);
389 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x0fffff00
390 || vcpu
->run
->s
.regs
.gprs
[1] & 0xffff0000)
391 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
394 vcpu
->run
->s
.regs
.gprs
[0] = 3 << 28;
395 kvm_s390_set_psw_cc(vcpu
, 0);
399 operand2
= kvm_s390_get_base_disp_s(vcpu
);
401 if (operand2
& 0xfff)
402 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
405 case 1: /* same handling for 1 and 2 */
407 mem
= get_zeroed_page(GFP_KERNEL
);
410 if (stsi((void *) mem
, fc
, sel1
, sel2
))
414 if (sel1
!= 2 || sel2
!= 2)
416 mem
= get_zeroed_page(GFP_KERNEL
);
419 handle_stsi_3_2_2(vcpu
, (void *) mem
);
423 if (copy_to_guest_absolute(vcpu
, operand2
, (void *) mem
, PAGE_SIZE
)) {
424 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
427 trace_kvm_s390_handle_stsi(vcpu
, fc
, sel1
, sel2
, operand2
);
429 kvm_s390_set_psw_cc(vcpu
, 0);
430 vcpu
->run
->s
.regs
.gprs
[0] = 0;
433 kvm_s390_set_psw_cc(vcpu
, 3);
439 static const intercept_handler_t b2_handlers
[256] = {
440 [0x02] = handle_stidp
,
441 [0x10] = handle_set_prefix
,
442 [0x11] = handle_store_prefix
,
443 [0x12] = handle_store_cpu_address
,
444 [0x29] = handle_skey
,
445 [0x2a] = handle_skey
,
446 [0x2b] = handle_skey
,
447 [0x30] = handle_io_inst
,
448 [0x31] = handle_io_inst
,
449 [0x32] = handle_io_inst
,
450 [0x33] = handle_io_inst
,
451 [0x34] = handle_io_inst
,
452 [0x35] = handle_io_inst
,
453 [0x36] = handle_io_inst
,
454 [0x37] = handle_io_inst
,
455 [0x38] = handle_io_inst
,
456 [0x39] = handle_io_inst
,
457 [0x3a] = handle_io_inst
,
458 [0x3b] = handle_io_inst
,
459 [0x3c] = handle_io_inst
,
460 [0x5f] = handle_io_inst
,
461 [0x74] = handle_io_inst
,
462 [0x76] = handle_io_inst
,
463 [0x7d] = handle_stsi
,
464 [0xb1] = handle_stfl
,
465 [0xb2] = handle_lpswe
,
468 int kvm_s390_handle_b2(struct kvm_vcpu
*vcpu
)
470 intercept_handler_t handler
;
473 * A lot of B2 instructions are priviledged. Here we check for
474 * the privileged ones, that we can handle in the kernel.
475 * Anything else goes to userspace.
477 handler
= b2_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
479 return handler(vcpu
);
484 static int handle_epsw(struct kvm_vcpu
*vcpu
)
488 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
490 /* This basically extracts the mask half of the psw. */
491 vcpu
->run
->s
.regs
.gprs
[reg1
] &= 0xffffffff00000000UL
;
492 vcpu
->run
->s
.regs
.gprs
[reg1
] |= vcpu
->arch
.sie_block
->gpsw
.mask
>> 32;
494 vcpu
->run
->s
.regs
.gprs
[reg2
] &= 0xffffffff00000000UL
;
495 vcpu
->run
->s
.regs
.gprs
[reg2
] |=
496 vcpu
->arch
.sie_block
->gpsw
.mask
& 0x00000000ffffffffUL
;
501 #define PFMF_RESERVED 0xfffc0101UL
502 #define PFMF_SK 0x00020000UL
503 #define PFMF_CF 0x00010000UL
504 #define PFMF_UI 0x00008000UL
505 #define PFMF_FSC 0x00007000UL
506 #define PFMF_NQ 0x00000800UL
507 #define PFMF_MR 0x00000400UL
508 #define PFMF_MC 0x00000200UL
509 #define PFMF_KEY 0x000000feUL
511 static int handle_pfmf(struct kvm_vcpu
*vcpu
)
514 unsigned long start
, end
;
516 vcpu
->stat
.instruction_pfmf
++;
518 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
520 if (!MACHINE_HAS_PFMF
)
521 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
523 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
524 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
526 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_RESERVED
)
527 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
529 /* Only provide non-quiescing support if the host supports it */
530 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
&& !test_facility(14))
531 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
533 /* No support for conditional-SSKE */
534 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & (PFMF_MR
| PFMF_MC
))
535 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
537 start
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
538 switch (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) {
540 end
= (start
+ (1UL << 12)) & ~((1UL << 12) - 1);
543 end
= (start
+ (1UL << 20)) & ~((1UL << 20) - 1);
545 /* We dont support EDAT2
547 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
550 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
552 while (start
< end
) {
553 unsigned long useraddr
;
555 useraddr
= gmap_translate(start
, vcpu
->arch
.gmap
);
556 if (IS_ERR((void *)useraddr
))
557 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
559 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
560 if (clear_user((void __user
*)useraddr
, PAGE_SIZE
))
561 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
564 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_SK
) {
565 if (set_guest_storage_key(current
->mm
, useraddr
,
566 vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_KEY
,
567 vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
))
568 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
573 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
)
574 vcpu
->run
->s
.regs
.gprs
[reg2
] = end
;
578 static const intercept_handler_t b9_handlers
[256] = {
579 [0x8d] = handle_epsw
,
580 [0x9c] = handle_io_inst
,
581 [0xaf] = handle_pfmf
,
584 int kvm_s390_handle_b9(struct kvm_vcpu
*vcpu
)
586 intercept_handler_t handler
;
588 /* This is handled just as for the B2 instructions. */
589 handler
= b9_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
591 return handler(vcpu
);
596 int kvm_s390_handle_lctl(struct kvm_vcpu
*vcpu
)
598 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
599 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
604 vcpu
->stat
.instruction_lctl
++;
606 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
607 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
609 useraddr
= kvm_s390_get_base_disp_rs(vcpu
);
612 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
614 VCPU_EVENT(vcpu
, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1
, reg3
,
616 trace_kvm_s390_handle_lctl(vcpu
, 0, reg1
, reg3
, useraddr
);
620 rc
= get_guest(vcpu
, val
, (u32 __user
*) useraddr
);
622 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
623 vcpu
->arch
.sie_block
->gcr
[reg
] &= 0xffffffff00000000ul
;
624 vcpu
->arch
.sie_block
->gcr
[reg
] |= val
;
628 reg
= (reg
+ 1) % 16;
634 static int handle_lctlg(struct kvm_vcpu
*vcpu
)
636 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
637 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
641 vcpu
->stat
.instruction_lctlg
++;
643 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
644 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
646 useraddr
= kvm_s390_get_base_disp_rsy(vcpu
);
649 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
653 VCPU_EVENT(vcpu
, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1
, reg3
,
655 trace_kvm_s390_handle_lctl(vcpu
, 1, reg1
, reg3
, useraddr
);
658 rc
= get_guest(vcpu
, vcpu
->arch
.sie_block
->gcr
[reg
],
659 (u64 __user
*) useraddr
);
661 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
665 reg
= (reg
+ 1) % 16;
671 static const intercept_handler_t eb_handlers
[256] = {
672 [0x2f] = handle_lctlg
,
673 [0x8a] = handle_io_inst
,
676 int kvm_s390_handle_eb(struct kvm_vcpu
*vcpu
)
678 intercept_handler_t handler
;
680 handler
= eb_handlers
[vcpu
->arch
.sie_block
->ipb
& 0xff];
682 return handler(vcpu
);
686 static int handle_tprot(struct kvm_vcpu
*vcpu
)
688 u64 address1
, address2
;
689 struct vm_area_struct
*vma
;
690 unsigned long user_address
;
692 vcpu
->stat
.instruction_tprot
++;
694 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
695 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
697 kvm_s390_get_base_disp_sse(vcpu
, &address1
, &address2
);
699 /* we only handle the Linux memory detection case:
702 * everything else goes to userspace. */
705 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
708 down_read(¤t
->mm
->mmap_sem
);
709 user_address
= __gmap_translate(address1
, vcpu
->arch
.gmap
);
710 if (IS_ERR_VALUE(user_address
))
712 vma
= find_vma(current
->mm
, user_address
);
715 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
716 if (!(vma
->vm_flags
& VM_WRITE
) && (vma
->vm_flags
& VM_READ
))
717 vcpu
->arch
.sie_block
->gpsw
.mask
|= (1ul << 44);
718 if (!(vma
->vm_flags
& VM_WRITE
) && !(vma
->vm_flags
& VM_READ
))
719 vcpu
->arch
.sie_block
->gpsw
.mask
|= (2ul << 44);
721 up_read(¤t
->mm
->mmap_sem
);
725 up_read(¤t
->mm
->mmap_sem
);
726 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
729 int kvm_s390_handle_e5(struct kvm_vcpu
*vcpu
)
731 /* For e5xx... instructions we only handle TPROT */
732 if ((vcpu
->arch
.sie_block
->ipa
& 0x00ff) == 0x01)
733 return handle_tprot(vcpu
);
737 static int handle_sckpf(struct kvm_vcpu
*vcpu
)
741 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
742 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
744 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x00000000ffff0000)
745 return kvm_s390_inject_program_int(vcpu
,
748 value
= vcpu
->run
->s
.regs
.gprs
[0] & 0x000000000000ffff;
749 vcpu
->arch
.sie_block
->todpr
= value
;
754 static const intercept_handler_t x01_handlers
[256] = {
755 [0x07] = handle_sckpf
,
758 int kvm_s390_handle_01(struct kvm_vcpu
*vcpu
)
760 intercept_handler_t handler
;
762 handler
= x01_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
764 return handler(vcpu
);