2 * handling privileged instructions
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
27 #include <asm/ptrace.h>
28 #include <asm/compat.h>
33 static int handle_set_prefix(struct kvm_vcpu
*vcpu
)
39 vcpu
->stat
.instruction_spx
++;
41 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
42 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
44 operand2
= kvm_s390_get_base_disp_s(vcpu
);
46 /* must be word boundary */
48 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
51 if (get_guest(vcpu
, address
, (u32 __user
*) operand2
))
52 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
54 address
= address
& 0x7fffe000u
;
56 /* make sure that the new value is valid memory */
57 if (copy_from_guest_absolute(vcpu
, &tmp
, address
, 1) ||
58 (copy_from_guest_absolute(vcpu
, &tmp
, address
+ PAGE_SIZE
, 1)))
59 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
61 kvm_s390_set_prefix(vcpu
, address
);
63 VCPU_EVENT(vcpu
, 5, "setting prefix to %x", address
);
64 trace_kvm_s390_handle_prefix(vcpu
, 1, address
);
68 static int handle_store_prefix(struct kvm_vcpu
*vcpu
)
73 vcpu
->stat
.instruction_stpx
++;
75 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
76 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
78 operand2
= kvm_s390_get_base_disp_s(vcpu
);
80 /* must be word boundary */
82 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
84 address
= vcpu
->arch
.sie_block
->prefix
;
85 address
= address
& 0x7fffe000u
;
88 if (put_guest(vcpu
, address
, (u32 __user
*)operand2
))
89 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
91 VCPU_EVENT(vcpu
, 5, "storing prefix to %x", address
);
92 trace_kvm_s390_handle_prefix(vcpu
, 0, address
);
96 static int handle_store_cpu_address(struct kvm_vcpu
*vcpu
)
100 vcpu
->stat
.instruction_stap
++;
102 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
103 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
105 useraddr
= kvm_s390_get_base_disp_s(vcpu
);
108 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
110 if (put_guest(vcpu
, vcpu
->vcpu_id
, (u16 __user
*)useraddr
))
111 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
113 VCPU_EVENT(vcpu
, 5, "storing cpu address to %llx", useraddr
);
114 trace_kvm_s390_handle_stap(vcpu
, useraddr
);
118 static int handle_skey(struct kvm_vcpu
*vcpu
)
120 vcpu
->stat
.instruction_storage_key
++;
122 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
123 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
125 vcpu
->arch
.sie_block
->gpsw
.addr
=
126 __rewind_psw(vcpu
->arch
.sie_block
->gpsw
, 4);
127 VCPU_EVENT(vcpu
, 4, "%s", "retrying storage key operation");
131 static int handle_tpi(struct kvm_vcpu
*vcpu
)
133 struct kvm_s390_interrupt_info
*inti
;
137 addr
= kvm_s390_get_base_disp_s(vcpu
);
139 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
141 inti
= kvm_s390_get_io_int(vcpu
->kvm
, vcpu
->run
->s
.regs
.crs
[6], 0);
147 * Store the two-word I/O interruption code into the
150 if (put_guest(vcpu
, inti
->io
.subchannel_id
, (u16 __user
*)addr
)
151 || put_guest(vcpu
, inti
->io
.subchannel_nr
, (u16 __user
*)(addr
+ 2))
152 || put_guest(vcpu
, inti
->io
.io_int_parm
, (u32 __user
*)(addr
+ 4)))
153 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
156 * Store the three-word I/O interruption code into
157 * the appropriate lowcore area.
159 put_guest(vcpu
, inti
->io
.subchannel_id
, (u16 __user
*) __LC_SUBCHANNEL_ID
);
160 put_guest(vcpu
, inti
->io
.subchannel_nr
, (u16 __user
*) __LC_SUBCHANNEL_NR
);
161 put_guest(vcpu
, inti
->io
.io_int_parm
, (u32 __user
*) __LC_IO_INT_PARM
);
162 put_guest(vcpu
, inti
->io
.io_int_word
, (u32 __user
*) __LC_IO_INT_WORD
);
166 /* Set condition code and we're done. */
167 kvm_s390_set_psw_cc(vcpu
, cc
);
171 static int handle_tsch(struct kvm_vcpu
*vcpu
)
173 struct kvm_s390_interrupt_info
*inti
;
175 inti
= kvm_s390_get_io_int(vcpu
->kvm
, 0,
176 vcpu
->run
->s
.regs
.gprs
[1]);
179 * Prepare exit to userspace.
180 * We indicate whether we dequeued a pending I/O interrupt
181 * so that userspace can re-inject it if the instruction gets
182 * a program check. While this may re-order the pending I/O
183 * interrupts, this is no problem since the priority is kept
186 vcpu
->run
->exit_reason
= KVM_EXIT_S390_TSCH
;
187 vcpu
->run
->s390_tsch
.dequeued
= !!inti
;
189 vcpu
->run
->s390_tsch
.subchannel_id
= inti
->io
.subchannel_id
;
190 vcpu
->run
->s390_tsch
.subchannel_nr
= inti
->io
.subchannel_nr
;
191 vcpu
->run
->s390_tsch
.io_int_parm
= inti
->io
.io_int_parm
;
192 vcpu
->run
->s390_tsch
.io_int_word
= inti
->io
.io_int_word
;
194 vcpu
->run
->s390_tsch
.ipb
= vcpu
->arch
.sie_block
->ipb
;
199 static int handle_io_inst(struct kvm_vcpu
*vcpu
)
201 VCPU_EVENT(vcpu
, 4, "%s", "I/O instruction");
203 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
204 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
206 if (vcpu
->kvm
->arch
.css_support
) {
208 * Most I/O instructions will be handled by userspace.
209 * Exceptions are tpi and the interrupt portion of tsch.
211 if (vcpu
->arch
.sie_block
->ipa
== 0xb236)
212 return handle_tpi(vcpu
);
213 if (vcpu
->arch
.sie_block
->ipa
== 0xb235)
214 return handle_tsch(vcpu
);
215 /* Handle in userspace. */
219 * Set condition code 3 to stop the guest from issueing channel
222 kvm_s390_set_psw_cc(vcpu
, 3);
227 static int handle_stfl(struct kvm_vcpu
*vcpu
)
231 vcpu
->stat
.instruction_stfl
++;
233 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
234 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
236 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
, stfl_fac_list
),
239 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
240 VCPU_EVENT(vcpu
, 5, "store facility list value %x",
241 *(unsigned int *) vfacilities
);
242 trace_kvm_s390_handle_stfl(vcpu
, *(unsigned int *) vfacilities
);
246 static void handle_new_psw(struct kvm_vcpu
*vcpu
)
248 /* Check whether the new psw is enabled for machine checks. */
249 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
)
250 kvm_s390_deliver_pending_machine_checks(vcpu
);
253 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
254 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
255 #define PSW_ADDR_24 0x0000000000ffffffUL
256 #define PSW_ADDR_31 0x000000007fffffffUL
258 static int is_valid_psw(psw_t
*psw
) {
259 if (psw
->mask
& PSW_MASK_UNASSIGNED
)
261 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_BA
) {
262 if (psw
->addr
& ~PSW_ADDR_31
)
265 if (!(psw
->mask
& PSW_MASK_ADDR_MODE
) && (psw
->addr
& ~PSW_ADDR_24
))
267 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_EA
)
272 int kvm_s390_handle_lpsw(struct kvm_vcpu
*vcpu
)
274 psw_t
*gpsw
= &vcpu
->arch
.sie_block
->gpsw
;
275 psw_compat_t new_psw
;
278 if (gpsw
->mask
& PSW_MASK_PSTATE
)
279 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
281 addr
= kvm_s390_get_base_disp_s(vcpu
);
283 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
284 if (copy_from_guest(vcpu
, &new_psw
, addr
, sizeof(new_psw
)))
285 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
286 if (!(new_psw
.mask
& PSW32_MASK_BASE
))
287 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
288 gpsw
->mask
= (new_psw
.mask
& ~PSW32_MASK_BASE
) << 32;
289 gpsw
->mask
|= new_psw
.addr
& PSW32_ADDR_AMODE
;
290 gpsw
->addr
= new_psw
.addr
& ~PSW32_ADDR_AMODE
;
291 if (!is_valid_psw(gpsw
))
292 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
293 handle_new_psw(vcpu
);
297 static int handle_lpswe(struct kvm_vcpu
*vcpu
)
302 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
303 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
305 addr
= kvm_s390_get_base_disp_s(vcpu
);
307 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
308 if (copy_from_guest(vcpu
, &new_psw
, addr
, sizeof(new_psw
)))
309 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
310 vcpu
->arch
.sie_block
->gpsw
= new_psw
;
311 if (!is_valid_psw(&vcpu
->arch
.sie_block
->gpsw
))
312 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
313 handle_new_psw(vcpu
);
317 static int handle_stidp(struct kvm_vcpu
*vcpu
)
321 vcpu
->stat
.instruction_stidp
++;
323 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
324 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
326 operand2
= kvm_s390_get_base_disp_s(vcpu
);
329 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
331 if (put_guest(vcpu
, vcpu
->arch
.stidp_data
, (u64 __user
*)operand2
))
332 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
334 VCPU_EVENT(vcpu
, 5, "%s", "store cpu id");
338 static void handle_stsi_3_2_2(struct kvm_vcpu
*vcpu
, struct sysinfo_3_2_2
*mem
)
340 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
344 spin_lock(&fi
->lock
);
345 for (n
= 0; n
< KVM_MAX_VCPUS
; n
++)
346 if (fi
->local_int
[n
])
348 spin_unlock(&fi
->lock
);
350 /* deal with other level 3 hypervisors */
351 if (stsi(mem
, 3, 2, 2))
355 for (n
= mem
->count
- 1; n
> 0 ; n
--)
356 memcpy(&mem
->vm
[n
], &mem
->vm
[n
- 1], sizeof(mem
->vm
[0]));
358 memset(&mem
->vm
[0], 0, sizeof(mem
->vm
[0]));
359 mem
->vm
[0].cpus_total
= cpus
;
360 mem
->vm
[0].cpus_configured
= cpus
;
361 mem
->vm
[0].cpus_standby
= 0;
362 mem
->vm
[0].cpus_reserved
= 0;
363 mem
->vm
[0].caf
= 1000;
364 memcpy(mem
->vm
[0].name
, "KVMguest", 8);
365 ASCEBC(mem
->vm
[0].name
, 8);
366 memcpy(mem
->vm
[0].cpi
, "KVM/Linux ", 16);
367 ASCEBC(mem
->vm
[0].cpi
, 16);
370 static int handle_stsi(struct kvm_vcpu
*vcpu
)
372 int fc
= (vcpu
->run
->s
.regs
.gprs
[0] & 0xf0000000) >> 28;
373 int sel1
= vcpu
->run
->s
.regs
.gprs
[0] & 0xff;
374 int sel2
= vcpu
->run
->s
.regs
.gprs
[1] & 0xffff;
375 unsigned long mem
= 0;
379 vcpu
->stat
.instruction_stsi
++;
380 VCPU_EVENT(vcpu
, 4, "stsi: fc: %x sel1: %x sel2: %x", fc
, sel1
, sel2
);
382 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
383 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
386 kvm_s390_set_psw_cc(vcpu
, 3);
390 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x0fffff00
391 || vcpu
->run
->s
.regs
.gprs
[1] & 0xffff0000)
392 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
395 vcpu
->run
->s
.regs
.gprs
[0] = 3 << 28;
396 kvm_s390_set_psw_cc(vcpu
, 0);
400 operand2
= kvm_s390_get_base_disp_s(vcpu
);
402 if (operand2
& 0xfff)
403 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
406 case 1: /* same handling for 1 and 2 */
408 mem
= get_zeroed_page(GFP_KERNEL
);
411 if (stsi((void *) mem
, fc
, sel1
, sel2
))
415 if (sel1
!= 2 || sel2
!= 2)
417 mem
= get_zeroed_page(GFP_KERNEL
);
420 handle_stsi_3_2_2(vcpu
, (void *) mem
);
424 if (copy_to_guest_absolute(vcpu
, operand2
, (void *) mem
, PAGE_SIZE
)) {
425 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
428 trace_kvm_s390_handle_stsi(vcpu
, fc
, sel1
, sel2
, operand2
);
430 kvm_s390_set_psw_cc(vcpu
, 0);
431 vcpu
->run
->s
.regs
.gprs
[0] = 0;
434 kvm_s390_set_psw_cc(vcpu
, 3);
440 static const intercept_handler_t b2_handlers
[256] = {
441 [0x02] = handle_stidp
,
442 [0x10] = handle_set_prefix
,
443 [0x11] = handle_store_prefix
,
444 [0x12] = handle_store_cpu_address
,
445 [0x29] = handle_skey
,
446 [0x2a] = handle_skey
,
447 [0x2b] = handle_skey
,
448 [0x30] = handle_io_inst
,
449 [0x31] = handle_io_inst
,
450 [0x32] = handle_io_inst
,
451 [0x33] = handle_io_inst
,
452 [0x34] = handle_io_inst
,
453 [0x35] = handle_io_inst
,
454 [0x36] = handle_io_inst
,
455 [0x37] = handle_io_inst
,
456 [0x38] = handle_io_inst
,
457 [0x39] = handle_io_inst
,
458 [0x3a] = handle_io_inst
,
459 [0x3b] = handle_io_inst
,
460 [0x3c] = handle_io_inst
,
461 [0x5f] = handle_io_inst
,
462 [0x74] = handle_io_inst
,
463 [0x76] = handle_io_inst
,
464 [0x7d] = handle_stsi
,
465 [0xb1] = handle_stfl
,
466 [0xb2] = handle_lpswe
,
469 int kvm_s390_handle_b2(struct kvm_vcpu
*vcpu
)
471 intercept_handler_t handler
;
474 * A lot of B2 instructions are priviledged. Here we check for
475 * the privileged ones, that we can handle in the kernel.
476 * Anything else goes to userspace.
478 handler
= b2_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
480 return handler(vcpu
);
485 static int handle_epsw(struct kvm_vcpu
*vcpu
)
489 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
491 /* This basically extracts the mask half of the psw. */
492 vcpu
->run
->s
.regs
.gprs
[reg1
] &= 0xffffffff00000000UL
;
493 vcpu
->run
->s
.regs
.gprs
[reg1
] |= vcpu
->arch
.sie_block
->gpsw
.mask
>> 32;
495 vcpu
->run
->s
.regs
.gprs
[reg2
] &= 0xffffffff00000000UL
;
496 vcpu
->run
->s
.regs
.gprs
[reg2
] |=
497 vcpu
->arch
.sie_block
->gpsw
.mask
& 0x00000000ffffffffUL
;
502 #define PFMF_RESERVED 0xfffc0101UL
503 #define PFMF_SK 0x00020000UL
504 #define PFMF_CF 0x00010000UL
505 #define PFMF_UI 0x00008000UL
506 #define PFMF_FSC 0x00007000UL
507 #define PFMF_NQ 0x00000800UL
508 #define PFMF_MR 0x00000400UL
509 #define PFMF_MC 0x00000200UL
510 #define PFMF_KEY 0x000000feUL
512 static int handle_pfmf(struct kvm_vcpu
*vcpu
)
515 unsigned long start
, end
;
517 vcpu
->stat
.instruction_pfmf
++;
519 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
521 if (!MACHINE_HAS_PFMF
)
522 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
524 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
525 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
527 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_RESERVED
)
528 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
530 /* Only provide non-quiescing support if the host supports it */
531 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
&& !test_facility(14))
532 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
534 /* No support for conditional-SSKE */
535 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & (PFMF_MR
| PFMF_MC
))
536 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
538 start
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
539 switch (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) {
541 end
= (start
+ (1UL << 12)) & ~((1UL << 12) - 1);
544 end
= (start
+ (1UL << 20)) & ~((1UL << 20) - 1);
546 /* We dont support EDAT2
548 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
551 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
553 while (start
< end
) {
554 unsigned long useraddr
;
556 useraddr
= gmap_translate(start
, vcpu
->arch
.gmap
);
557 if (IS_ERR((void *)useraddr
))
558 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
560 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
561 if (clear_user((void __user
*)useraddr
, PAGE_SIZE
))
562 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
565 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_SK
) {
566 if (set_guest_storage_key(current
->mm
, useraddr
,
567 vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_KEY
,
568 vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
))
569 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
574 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
)
575 vcpu
->run
->s
.regs
.gprs
[reg2
] = end
;
579 static const intercept_handler_t b9_handlers
[256] = {
580 [0x8d] = handle_epsw
,
581 [0x9c] = handle_io_inst
,
582 [0xaf] = handle_pfmf
,
585 int kvm_s390_handle_b9(struct kvm_vcpu
*vcpu
)
587 intercept_handler_t handler
;
589 /* This is handled just as for the B2 instructions. */
590 handler
= b9_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
592 return handler(vcpu
);
597 int kvm_s390_handle_lctl(struct kvm_vcpu
*vcpu
)
599 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
600 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
605 vcpu
->stat
.instruction_lctl
++;
607 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
608 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
610 useraddr
= kvm_s390_get_base_disp_rs(vcpu
);
613 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
615 VCPU_EVENT(vcpu
, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1
, reg3
,
617 trace_kvm_s390_handle_lctl(vcpu
, 0, reg1
, reg3
, useraddr
);
621 rc
= get_guest(vcpu
, val
, (u32 __user
*) useraddr
);
623 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
624 vcpu
->arch
.sie_block
->gcr
[reg
] &= 0xffffffff00000000ul
;
625 vcpu
->arch
.sie_block
->gcr
[reg
] |= val
;
629 reg
= (reg
+ 1) % 16;
635 static int handle_lctlg(struct kvm_vcpu
*vcpu
)
637 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
638 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
642 vcpu
->stat
.instruction_lctlg
++;
644 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
645 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
647 useraddr
= kvm_s390_get_base_disp_rsy(vcpu
);
650 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
654 VCPU_EVENT(vcpu
, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1
, reg3
,
656 trace_kvm_s390_handle_lctl(vcpu
, 1, reg1
, reg3
, useraddr
);
659 rc
= get_guest(vcpu
, vcpu
->arch
.sie_block
->gcr
[reg
],
660 (u64 __user
*) useraddr
);
662 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
666 reg
= (reg
+ 1) % 16;
672 static const intercept_handler_t eb_handlers
[256] = {
673 [0x2f] = handle_lctlg
,
674 [0x8a] = handle_io_inst
,
677 int kvm_s390_handle_eb(struct kvm_vcpu
*vcpu
)
679 intercept_handler_t handler
;
681 handler
= eb_handlers
[vcpu
->arch
.sie_block
->ipb
& 0xff];
683 return handler(vcpu
);
687 static int handle_tprot(struct kvm_vcpu
*vcpu
)
689 u64 address1
, address2
;
690 struct vm_area_struct
*vma
;
691 unsigned long user_address
;
693 vcpu
->stat
.instruction_tprot
++;
695 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
696 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
698 kvm_s390_get_base_disp_sse(vcpu
, &address1
, &address2
);
700 /* we only handle the Linux memory detection case:
703 * everything else goes to userspace. */
706 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
709 down_read(¤t
->mm
->mmap_sem
);
710 user_address
= __gmap_translate(address1
, vcpu
->arch
.gmap
);
711 if (IS_ERR_VALUE(user_address
))
713 vma
= find_vma(current
->mm
, user_address
);
716 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
717 if (!(vma
->vm_flags
& VM_WRITE
) && (vma
->vm_flags
& VM_READ
))
718 vcpu
->arch
.sie_block
->gpsw
.mask
|= (1ul << 44);
719 if (!(vma
->vm_flags
& VM_WRITE
) && !(vma
->vm_flags
& VM_READ
))
720 vcpu
->arch
.sie_block
->gpsw
.mask
|= (2ul << 44);
722 up_read(¤t
->mm
->mmap_sem
);
726 up_read(¤t
->mm
->mmap_sem
);
727 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
730 int kvm_s390_handle_e5(struct kvm_vcpu
*vcpu
)
732 /* For e5xx... instructions we only handle TPROT */
733 if ((vcpu
->arch
.sie_block
->ipa
& 0x00ff) == 0x01)
734 return handle_tprot(vcpu
);
738 static int handle_sckpf(struct kvm_vcpu
*vcpu
)
742 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
743 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
745 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x00000000ffff0000)
746 return kvm_s390_inject_program_int(vcpu
,
749 value
= vcpu
->run
->s
.regs
.gprs
[0] & 0x000000000000ffff;
750 vcpu
->arch
.sie_block
->todpr
= value
;
755 static const intercept_handler_t x01_handlers
[256] = {
756 [0x07] = handle_sckpf
,
759 int kvm_s390_handle_01(struct kvm_vcpu
*vcpu
)
761 intercept_handler_t handler
;
763 handler
= x01_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
765 return handler(vcpu
);