2 * handling privileged instructions
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/current.h>
20 #include <asm/debug.h>
21 #include <asm/ebcdic.h>
22 #include <asm/sysinfo.h>
23 #include <asm/ptrace.h>
24 #include <asm/compat.h>
29 static int handle_set_prefix(struct kvm_vcpu
*vcpu
)
35 vcpu
->stat
.instruction_spx
++;
37 operand2
= kvm_s390_get_base_disp_s(vcpu
);
39 /* must be word boundary */
41 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
44 if (get_guest(vcpu
, address
, (u32 __user
*) operand2
))
45 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
47 address
= address
& 0x7fffe000u
;
49 /* make sure that the new value is valid memory */
50 if (copy_from_guest_absolute(vcpu
, &tmp
, address
, 1) ||
51 (copy_from_guest_absolute(vcpu
, &tmp
, address
+ PAGE_SIZE
, 1)))
52 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
54 kvm_s390_set_prefix(vcpu
, address
);
56 VCPU_EVENT(vcpu
, 5, "setting prefix to %x", address
);
57 trace_kvm_s390_handle_prefix(vcpu
, 1, address
);
61 static int handle_store_prefix(struct kvm_vcpu
*vcpu
)
66 vcpu
->stat
.instruction_stpx
++;
68 operand2
= kvm_s390_get_base_disp_s(vcpu
);
70 /* must be word boundary */
72 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
74 address
= vcpu
->arch
.sie_block
->prefix
;
75 address
= address
& 0x7fffe000u
;
78 if (put_guest(vcpu
, address
, (u32 __user
*)operand2
))
79 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
81 VCPU_EVENT(vcpu
, 5, "storing prefix to %x", address
);
82 trace_kvm_s390_handle_prefix(vcpu
, 0, address
);
86 static int handle_store_cpu_address(struct kvm_vcpu
*vcpu
)
90 vcpu
->stat
.instruction_stap
++;
92 useraddr
= kvm_s390_get_base_disp_s(vcpu
);
95 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
97 if (put_guest(vcpu
, vcpu
->vcpu_id
, (u16 __user
*)useraddr
))
98 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
100 VCPU_EVENT(vcpu
, 5, "storing cpu address to %llx", useraddr
);
101 trace_kvm_s390_handle_stap(vcpu
, useraddr
);
105 static int handle_skey(struct kvm_vcpu
*vcpu
)
107 vcpu
->stat
.instruction_storage_key
++;
108 vcpu
->arch
.sie_block
->gpsw
.addr
-= 4;
109 VCPU_EVENT(vcpu
, 4, "%s", "retrying storage key operation");
113 static int handle_tpi(struct kvm_vcpu
*vcpu
)
115 struct kvm_s390_interrupt_info
*inti
;
119 addr
= kvm_s390_get_base_disp_s(vcpu
);
121 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
123 inti
= kvm_s390_get_io_int(vcpu
->kvm
, vcpu
->run
->s
.regs
.crs
[6], 0);
129 * Store the two-word I/O interruption code into the
132 put_guest(vcpu
, inti
->io
.subchannel_id
, (u16 __user
*) addr
);
133 put_guest(vcpu
, inti
->io
.subchannel_nr
, (u16 __user
*) (addr
+ 2));
134 put_guest(vcpu
, inti
->io
.io_int_parm
, (u32 __user
*) (addr
+ 4));
137 * Store the three-word I/O interruption code into
138 * the appropriate lowcore area.
140 put_guest(vcpu
, inti
->io
.subchannel_id
, (u16 __user
*) __LC_SUBCHANNEL_ID
);
141 put_guest(vcpu
, inti
->io
.subchannel_nr
, (u16 __user
*) __LC_SUBCHANNEL_NR
);
142 put_guest(vcpu
, inti
->io
.io_int_parm
, (u32 __user
*) __LC_IO_INT_PARM
);
143 put_guest(vcpu
, inti
->io
.io_int_word
, (u32 __user
*) __LC_IO_INT_WORD
);
147 /* Set condition code and we're done. */
148 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
149 vcpu
->arch
.sie_block
->gpsw
.mask
|= (cc
& 3ul) << 44;
153 static int handle_tsch(struct kvm_vcpu
*vcpu
)
155 struct kvm_s390_interrupt_info
*inti
;
157 inti
= kvm_s390_get_io_int(vcpu
->kvm
, 0,
158 vcpu
->run
->s
.regs
.gprs
[1]);
161 * Prepare exit to userspace.
162 * We indicate whether we dequeued a pending I/O interrupt
163 * so that userspace can re-inject it if the instruction gets
164 * a program check. While this may re-order the pending I/O
165 * interrupts, this is no problem since the priority is kept
168 vcpu
->run
->exit_reason
= KVM_EXIT_S390_TSCH
;
169 vcpu
->run
->s390_tsch
.dequeued
= !!inti
;
171 vcpu
->run
->s390_tsch
.subchannel_id
= inti
->io
.subchannel_id
;
172 vcpu
->run
->s390_tsch
.subchannel_nr
= inti
->io
.subchannel_nr
;
173 vcpu
->run
->s390_tsch
.io_int_parm
= inti
->io
.io_int_parm
;
174 vcpu
->run
->s390_tsch
.io_int_word
= inti
->io
.io_int_word
;
176 vcpu
->run
->s390_tsch
.ipb
= vcpu
->arch
.sie_block
->ipb
;
181 static int handle_io_inst(struct kvm_vcpu
*vcpu
)
183 VCPU_EVENT(vcpu
, 4, "%s", "I/O instruction");
185 if (vcpu
->kvm
->arch
.css_support
) {
187 * Most I/O instructions will be handled by userspace.
188 * Exceptions are tpi and the interrupt portion of tsch.
190 if (vcpu
->arch
.sie_block
->ipa
== 0xb236)
191 return handle_tpi(vcpu
);
192 if (vcpu
->arch
.sie_block
->ipa
== 0xb235)
193 return handle_tsch(vcpu
);
194 /* Handle in userspace. */
198 * Set condition code 3 to stop the guest from issueing channel
201 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
202 vcpu
->arch
.sie_block
->gpsw
.mask
|= (3 & 3ul) << 44;
207 static int handle_stfl(struct kvm_vcpu
*vcpu
)
209 unsigned int facility_list
;
212 vcpu
->stat
.instruction_stfl
++;
213 /* only pass the facility bits, which we can handle */
214 facility_list
= S390_lowcore
.stfl_fac_list
& 0xff00fff3;
216 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
, stfl_fac_list
),
217 &facility_list
, sizeof(facility_list
));
219 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
220 VCPU_EVENT(vcpu
, 5, "store facility list value %x", facility_list
);
221 trace_kvm_s390_handle_stfl(vcpu
, facility_list
);
225 static void handle_new_psw(struct kvm_vcpu
*vcpu
)
227 /* Check whether the new psw is enabled for machine checks. */
228 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
)
229 kvm_s390_deliver_pending_machine_checks(vcpu
);
232 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
233 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
234 #define PSW_ADDR_24 0x0000000000ffffffUL
235 #define PSW_ADDR_31 0x000000007fffffffUL
237 static int is_valid_psw(psw_t
*psw
) {
238 if (psw
->mask
& PSW_MASK_UNASSIGNED
)
240 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_BA
) {
241 if (psw
->addr
& ~PSW_ADDR_31
)
244 if (!(psw
->mask
& PSW_MASK_ADDR_MODE
) && (psw
->addr
& ~PSW_ADDR_24
))
246 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_EA
)
251 int kvm_s390_handle_lpsw(struct kvm_vcpu
*vcpu
)
253 psw_t
*gpsw
= &vcpu
->arch
.sie_block
->gpsw
;
254 psw_compat_t new_psw
;
257 if (gpsw
->mask
& PSW_MASK_PSTATE
)
258 return kvm_s390_inject_program_int(vcpu
,
259 PGM_PRIVILEGED_OPERATION
);
260 addr
= kvm_s390_get_base_disp_s(vcpu
);
262 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
263 if (copy_from_guest(vcpu
, &new_psw
, addr
, sizeof(new_psw
)))
264 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
265 if (!(new_psw
.mask
& PSW32_MASK_BASE
))
266 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
267 gpsw
->mask
= (new_psw
.mask
& ~PSW32_MASK_BASE
) << 32;
268 gpsw
->mask
|= new_psw
.addr
& PSW32_ADDR_AMODE
;
269 gpsw
->addr
= new_psw
.addr
& ~PSW32_ADDR_AMODE
;
270 if (!is_valid_psw(gpsw
))
271 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
272 handle_new_psw(vcpu
);
276 static int handle_lpswe(struct kvm_vcpu
*vcpu
)
281 addr
= kvm_s390_get_base_disp_s(vcpu
);
283 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
284 if (copy_from_guest(vcpu
, &new_psw
, addr
, sizeof(new_psw
)))
285 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
286 vcpu
->arch
.sie_block
->gpsw
= new_psw
;
287 if (!is_valid_psw(&vcpu
->arch
.sie_block
->gpsw
))
288 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
289 handle_new_psw(vcpu
);
293 static int handle_stidp(struct kvm_vcpu
*vcpu
)
297 vcpu
->stat
.instruction_stidp
++;
299 operand2
= kvm_s390_get_base_disp_s(vcpu
);
302 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
304 if (put_guest(vcpu
, vcpu
->arch
.stidp_data
, (u64 __user
*)operand2
))
305 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
307 VCPU_EVENT(vcpu
, 5, "%s", "store cpu id");
311 static void handle_stsi_3_2_2(struct kvm_vcpu
*vcpu
, struct sysinfo_3_2_2
*mem
)
313 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
317 spin_lock(&fi
->lock
);
318 for (n
= 0; n
< KVM_MAX_VCPUS
; n
++)
319 if (fi
->local_int
[n
])
321 spin_unlock(&fi
->lock
);
323 /* deal with other level 3 hypervisors */
324 if (stsi(mem
, 3, 2, 2))
328 for (n
= mem
->count
- 1; n
> 0 ; n
--)
329 memcpy(&mem
->vm
[n
], &mem
->vm
[n
- 1], sizeof(mem
->vm
[0]));
331 mem
->vm
[0].cpus_total
= cpus
;
332 mem
->vm
[0].cpus_configured
= cpus
;
333 mem
->vm
[0].cpus_standby
= 0;
334 mem
->vm
[0].cpus_reserved
= 0;
335 mem
->vm
[0].caf
= 1000;
336 memcpy(mem
->vm
[0].name
, "KVMguest", 8);
337 ASCEBC(mem
->vm
[0].name
, 8);
338 memcpy(mem
->vm
[0].cpi
, "KVM/Linux ", 16);
339 ASCEBC(mem
->vm
[0].cpi
, 16);
342 static int handle_stsi(struct kvm_vcpu
*vcpu
)
344 int fc
= (vcpu
->run
->s
.regs
.gprs
[0] & 0xf0000000) >> 28;
345 int sel1
= vcpu
->run
->s
.regs
.gprs
[0] & 0xff;
346 int sel2
= vcpu
->run
->s
.regs
.gprs
[1] & 0xffff;
347 unsigned long mem
= 0;
351 vcpu
->stat
.instruction_stsi
++;
352 VCPU_EVENT(vcpu
, 4, "stsi: fc: %x sel1: %x sel2: %x", fc
, sel1
, sel2
);
354 operand2
= kvm_s390_get_base_disp_s(vcpu
);
356 if (operand2
& 0xfff && fc
> 0)
357 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
361 vcpu
->run
->s
.regs
.gprs
[0] = 3 << 28;
362 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
364 case 1: /* same handling for 1 and 2 */
366 mem
= get_zeroed_page(GFP_KERNEL
);
369 if (stsi((void *) mem
, fc
, sel1
, sel2
))
373 if (sel1
!= 2 || sel2
!= 2)
375 mem
= get_zeroed_page(GFP_KERNEL
);
378 handle_stsi_3_2_2(vcpu
, (void *) mem
);
384 if (copy_to_guest_absolute(vcpu
, operand2
, (void *) mem
, PAGE_SIZE
)) {
385 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
388 trace_kvm_s390_handle_stsi(vcpu
, fc
, sel1
, sel2
, operand2
);
390 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
391 vcpu
->run
->s
.regs
.gprs
[0] = 0;
394 /* condition code 3 */
395 vcpu
->arch
.sie_block
->gpsw
.mask
|= 3ul << 44;
401 static const intercept_handler_t b2_handlers
[256] = {
402 [0x02] = handle_stidp
,
403 [0x10] = handle_set_prefix
,
404 [0x11] = handle_store_prefix
,
405 [0x12] = handle_store_cpu_address
,
406 [0x29] = handle_skey
,
407 [0x2a] = handle_skey
,
408 [0x2b] = handle_skey
,
409 [0x30] = handle_io_inst
,
410 [0x31] = handle_io_inst
,
411 [0x32] = handle_io_inst
,
412 [0x33] = handle_io_inst
,
413 [0x34] = handle_io_inst
,
414 [0x35] = handle_io_inst
,
415 [0x36] = handle_io_inst
,
416 [0x37] = handle_io_inst
,
417 [0x38] = handle_io_inst
,
418 [0x39] = handle_io_inst
,
419 [0x3a] = handle_io_inst
,
420 [0x3b] = handle_io_inst
,
421 [0x3c] = handle_io_inst
,
422 [0x5f] = handle_io_inst
,
423 [0x74] = handle_io_inst
,
424 [0x76] = handle_io_inst
,
425 [0x7d] = handle_stsi
,
426 [0xb1] = handle_stfl
,
427 [0xb2] = handle_lpswe
,
430 int kvm_s390_handle_b2(struct kvm_vcpu
*vcpu
)
432 intercept_handler_t handler
;
435 * a lot of B2 instructions are priviledged. We first check for
436 * the privileged ones, that we can handle in the kernel. If the
437 * kernel can handle this instruction, we check for the problem
438 * state bit and (a) handle the instruction or (b) send a code 2
440 * Anything else goes to userspace.*/
441 handler
= b2_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
443 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
444 return kvm_s390_inject_program_int(vcpu
,
445 PGM_PRIVILEGED_OPERATION
);
447 return handler(vcpu
);
452 static int handle_epsw(struct kvm_vcpu
*vcpu
)
456 reg1
= (vcpu
->arch
.sie_block
->ipb
& 0x00f00000) >> 24;
457 reg2
= (vcpu
->arch
.sie_block
->ipb
& 0x000f0000) >> 16;
459 /* This basically extracts the mask half of the psw. */
460 vcpu
->run
->s
.regs
.gprs
[reg1
] &= 0xffffffff00000000;
461 vcpu
->run
->s
.regs
.gprs
[reg1
] |= vcpu
->arch
.sie_block
->gpsw
.mask
>> 32;
463 vcpu
->run
->s
.regs
.gprs
[reg2
] &= 0xffffffff00000000;
464 vcpu
->run
->s
.regs
.gprs
[reg2
] |=
465 vcpu
->arch
.sie_block
->gpsw
.mask
& 0x00000000ffffffff;
470 static const intercept_handler_t b9_handlers
[256] = {
471 [0x8d] = handle_epsw
,
472 [0x9c] = handle_io_inst
,
475 int kvm_s390_handle_b9(struct kvm_vcpu
*vcpu
)
477 intercept_handler_t handler
;
479 /* This is handled just as for the B2 instructions. */
480 handler
= b9_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
482 if ((handler
!= handle_epsw
) &&
483 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
))
484 return kvm_s390_inject_program_int(vcpu
,
485 PGM_PRIVILEGED_OPERATION
);
487 return handler(vcpu
);
492 static const intercept_handler_t eb_handlers
[256] = {
493 [0x8a] = handle_io_inst
,
496 int kvm_s390_handle_priv_eb(struct kvm_vcpu
*vcpu
)
498 intercept_handler_t handler
;
500 /* All eb instructions that end up here are privileged. */
501 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
502 return kvm_s390_inject_program_int(vcpu
,
503 PGM_PRIVILEGED_OPERATION
);
504 handler
= eb_handlers
[vcpu
->arch
.sie_block
->ipb
& 0xff];
506 return handler(vcpu
);
510 static int handle_tprot(struct kvm_vcpu
*vcpu
)
512 u64 address1
, address2
;
513 struct vm_area_struct
*vma
;
514 unsigned long user_address
;
516 vcpu
->stat
.instruction_tprot
++;
518 kvm_s390_get_base_disp_sse(vcpu
, &address1
, &address2
);
520 /* we only handle the Linux memory detection case:
523 * everything else goes to userspace. */
526 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
529 down_read(¤t
->mm
->mmap_sem
);
530 user_address
= __gmap_translate(address1
, vcpu
->arch
.gmap
);
531 if (IS_ERR_VALUE(user_address
))
533 vma
= find_vma(current
->mm
, user_address
);
536 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
537 if (!(vma
->vm_flags
& VM_WRITE
) && (vma
->vm_flags
& VM_READ
))
538 vcpu
->arch
.sie_block
->gpsw
.mask
|= (1ul << 44);
539 if (!(vma
->vm_flags
& VM_WRITE
) && !(vma
->vm_flags
& VM_READ
))
540 vcpu
->arch
.sie_block
->gpsw
.mask
|= (2ul << 44);
542 up_read(¤t
->mm
->mmap_sem
);
546 up_read(¤t
->mm
->mmap_sem
);
547 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
550 int kvm_s390_handle_e5(struct kvm_vcpu
*vcpu
)
552 /* For e5xx... instructions we only handle TPROT */
553 if ((vcpu
->arch
.sie_block
->ipa
& 0x00ff) == 0x01)
554 return handle_tprot(vcpu
);
558 static int handle_sckpf(struct kvm_vcpu
*vcpu
)
562 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
563 return kvm_s390_inject_program_int(vcpu
,
564 PGM_PRIVILEGED_OPERATION
);
566 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x00000000ffff0000)
567 return kvm_s390_inject_program_int(vcpu
,
570 value
= vcpu
->run
->s
.regs
.gprs
[0] & 0x000000000000ffff;
571 vcpu
->arch
.sie_block
->todpr
= value
;
576 static const intercept_handler_t x01_handlers
[256] = {
577 [0x07] = handle_sckpf
,
580 int kvm_s390_handle_01(struct kvm_vcpu
*vcpu
)
582 intercept_handler_t handler
;
584 handler
= x01_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
586 return handler(vcpu
);