2 * handling privileged instructions
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <linux/mm_types.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/facility.h>
22 #include <asm/current.h>
23 #include <asm/debug.h>
24 #include <asm/ebcdic.h>
25 #include <asm/sysinfo.h>
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
30 #include <asm/ptrace.h>
31 #include <asm/compat.h>
37 static int handle_ri(struct kvm_vcpu
*vcpu
)
39 if (test_kvm_facility(vcpu
->kvm
, 64)) {
40 vcpu
->arch
.sie_block
->ecb3
|= 0x01;
41 kvm_s390_retry_instr(vcpu
);
44 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
47 int kvm_s390_handle_aa(struct kvm_vcpu
*vcpu
)
49 if ((vcpu
->arch
.sie_block
->ipa
& 0xf) <= 4)
50 return handle_ri(vcpu
);
55 /* Handle SCK (SET CLOCK) interception */
56 static int handle_set_clock(struct kvm_vcpu
*vcpu
)
62 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
63 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
65 op2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
66 if (op2
& 7) /* Operand must be on a doubleword boundary */
67 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
68 rc
= read_guest(vcpu
, op2
, ar
, &val
, sizeof(val
));
70 return kvm_s390_inject_prog_cond(vcpu
, rc
);
72 VCPU_EVENT(vcpu
, 3, "SCK: setting guest TOD to 0x%llx", val
);
73 kvm_s390_set_tod_clock(vcpu
->kvm
, val
);
75 kvm_s390_set_psw_cc(vcpu
, 0);
79 static int handle_set_prefix(struct kvm_vcpu
*vcpu
)
86 vcpu
->stat
.instruction_spx
++;
88 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
89 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
91 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
93 /* must be word boundary */
95 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
98 rc
= read_guest(vcpu
, operand2
, ar
, &address
, sizeof(address
));
100 return kvm_s390_inject_prog_cond(vcpu
, rc
);
102 address
&= 0x7fffe000u
;
105 * Make sure the new value is valid memory. We only need to check the
106 * first page, since address is 8k aligned and memory pieces are always
107 * at least 1MB aligned and have at least a size of 1MB.
109 if (kvm_is_error_gpa(vcpu
->kvm
, address
))
110 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
112 kvm_s390_set_prefix(vcpu
, address
);
113 trace_kvm_s390_handle_prefix(vcpu
, 1, address
);
117 static int handle_store_prefix(struct kvm_vcpu
*vcpu
)
124 vcpu
->stat
.instruction_stpx
++;
126 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
127 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
129 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
131 /* must be word boundary */
133 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
135 address
= kvm_s390_get_prefix(vcpu
);
138 rc
= write_guest(vcpu
, operand2
, ar
, &address
, sizeof(address
));
140 return kvm_s390_inject_prog_cond(vcpu
, rc
);
142 VCPU_EVENT(vcpu
, 3, "STPX: storing prefix 0x%x into 0x%llx", address
, operand2
);
143 trace_kvm_s390_handle_prefix(vcpu
, 0, address
);
147 static int handle_store_cpu_address(struct kvm_vcpu
*vcpu
)
149 u16 vcpu_id
= vcpu
->vcpu_id
;
154 vcpu
->stat
.instruction_stap
++;
156 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
157 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
159 ga
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
162 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
164 rc
= write_guest(vcpu
, ga
, ar
, &vcpu_id
, sizeof(vcpu_id
));
166 return kvm_s390_inject_prog_cond(vcpu
, rc
);
168 VCPU_EVENT(vcpu
, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id
, ga
);
169 trace_kvm_s390_handle_stap(vcpu
, ga
);
173 static int __skey_check_enable(struct kvm_vcpu
*vcpu
)
177 trace_kvm_s390_skey_related_inst(vcpu
);
178 if (!(vcpu
->arch
.sie_block
->ictl
& (ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
)))
181 rc
= s390_enable_skey();
182 VCPU_EVENT(vcpu
, 3, "enabling storage keys for guest: %d", rc
);
184 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
);
188 static int try_handle_skey(struct kvm_vcpu
*vcpu
)
192 vcpu
->stat
.instruction_storage_key
++;
193 rc
= __skey_check_enable(vcpu
);
197 /* with storage-key facility, SIE interprets it for us */
198 kvm_s390_retry_instr(vcpu
);
199 VCPU_EVENT(vcpu
, 4, "%s", "retrying storage key operation");
202 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
203 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
207 static int handle_iske(struct kvm_vcpu
*vcpu
)
214 rc
= try_handle_skey(vcpu
);
216 return rc
!= -EAGAIN
? rc
: 0;
218 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
220 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
221 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
222 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
223 addr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(addr
));
224 if (kvm_is_error_hva(addr
))
225 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
227 down_read(¤t
->mm
->mmap_sem
);
228 rc
= get_guest_storage_key(current
->mm
, addr
, &key
);
229 up_read(¤t
->mm
->mmap_sem
);
231 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
232 vcpu
->run
->s
.regs
.gprs
[reg1
] &= ~0xff;
233 vcpu
->run
->s
.regs
.gprs
[reg1
] |= key
;
237 static int handle_rrbe(struct kvm_vcpu
*vcpu
)
243 rc
= try_handle_skey(vcpu
);
245 return rc
!= -EAGAIN
? rc
: 0;
247 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
249 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
250 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
251 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
252 addr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(addr
));
253 if (kvm_is_error_hva(addr
))
254 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
256 down_read(¤t
->mm
->mmap_sem
);
257 rc
= reset_guest_reference_bit(current
->mm
, addr
);
258 up_read(¤t
->mm
->mmap_sem
);
260 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
262 kvm_s390_set_psw_cc(vcpu
, rc
);
270 static int handle_sske(struct kvm_vcpu
*vcpu
)
272 unsigned char m3
= vcpu
->arch
.sie_block
->ipb
>> 28;
273 unsigned long start
, end
;
274 unsigned char key
, oldkey
;
278 rc
= try_handle_skey(vcpu
);
280 return rc
!= -EAGAIN
? rc
: 0;
282 if (!test_kvm_facility(vcpu
->kvm
, 8))
284 if (!test_kvm_facility(vcpu
->kvm
, 10))
285 m3
&= ~(SSKE_MC
| SSKE_MR
);
286 if (!test_kvm_facility(vcpu
->kvm
, 14))
289 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
291 key
= vcpu
->run
->s
.regs
.gprs
[reg1
] & 0xfe;
292 start
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
293 start
= kvm_s390_logical_to_effective(vcpu
, start
);
295 /* start already designates an absolute address */
296 end
= (start
+ (1UL << 20)) & ~((1UL << 20) - 1);
298 start
= kvm_s390_real_to_abs(vcpu
, start
);
299 end
= start
+ PAGE_SIZE
;
302 while (start
!= end
) {
303 unsigned long addr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(start
));
305 if (kvm_is_error_hva(addr
))
306 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
308 down_read(¤t
->mm
->mmap_sem
);
309 rc
= cond_set_guest_storage_key(current
->mm
, addr
, key
, &oldkey
,
310 m3
& SSKE_NQ
, m3
& SSKE_MR
,
312 up_read(¤t
->mm
->mmap_sem
);
314 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
318 if (m3
& (SSKE_MC
| SSKE_MR
)) {
320 /* skey in reg1 is unpredictable */
321 kvm_s390_set_psw_cc(vcpu
, 3);
323 kvm_s390_set_psw_cc(vcpu
, rc
);
324 vcpu
->run
->s
.regs
.gprs
[reg1
] &= ~0xff00UL
;
325 vcpu
->run
->s
.regs
.gprs
[reg1
] |= (u64
) oldkey
<< 8;
329 if (psw_bits(vcpu
->arch
.sie_block
->gpsw
).eaba
== PSW_AMODE_64BIT
)
330 vcpu
->run
->s
.regs
.gprs
[reg2
] &= ~PAGE_MASK
;
332 vcpu
->run
->s
.regs
.gprs
[reg2
] &= ~0xfffff000UL
;
333 end
= kvm_s390_logical_to_effective(vcpu
, end
);
334 vcpu
->run
->s
.regs
.gprs
[reg2
] |= end
;
339 static int handle_ipte_interlock(struct kvm_vcpu
*vcpu
)
341 vcpu
->stat
.instruction_ipte_interlock
++;
342 if (psw_bits(vcpu
->arch
.sie_block
->gpsw
).p
)
343 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
344 wait_event(vcpu
->kvm
->arch
.ipte_wq
, !ipte_lock_held(vcpu
));
345 kvm_s390_retry_instr(vcpu
);
346 VCPU_EVENT(vcpu
, 4, "%s", "retrying ipte interlock operation");
350 static int handle_test_block(struct kvm_vcpu
*vcpu
)
355 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
356 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
358 kvm_s390_get_regs_rre(vcpu
, NULL
, ®2
);
359 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
360 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
361 if (kvm_s390_check_low_addr_prot_real(vcpu
, addr
))
362 return kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
363 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
365 if (kvm_is_error_gpa(vcpu
->kvm
, addr
))
366 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
368 * We don't expect errors on modern systems, and do not care
369 * about storage keys (yet), so let's just clear the page.
371 if (kvm_clear_guest(vcpu
->kvm
, addr
, PAGE_SIZE
))
373 kvm_s390_set_psw_cc(vcpu
, 0);
374 vcpu
->run
->s
.regs
.gprs
[0] = 0;
378 static int handle_tpi(struct kvm_vcpu
*vcpu
)
380 struct kvm_s390_interrupt_info
*inti
;
387 addr
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
389 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
391 inti
= kvm_s390_get_io_int(vcpu
->kvm
, vcpu
->arch
.sie_block
->gcr
[6], 0);
393 kvm_s390_set_psw_cc(vcpu
, 0);
397 tpi_data
[0] = inti
->io
.subchannel_id
<< 16 | inti
->io
.subchannel_nr
;
398 tpi_data
[1] = inti
->io
.io_int_parm
;
399 tpi_data
[2] = inti
->io
.io_int_word
;
402 * Store the two-word I/O interruption code into the
405 len
= sizeof(tpi_data
) - 4;
406 rc
= write_guest(vcpu
, addr
, ar
, &tpi_data
, len
);
408 rc
= kvm_s390_inject_prog_cond(vcpu
, rc
);
409 goto reinject_interrupt
;
413 * Store the three-word I/O interruption code into
414 * the appropriate lowcore area.
416 len
= sizeof(tpi_data
);
417 if (write_guest_lc(vcpu
, __LC_SUBCHANNEL_ID
, &tpi_data
, len
)) {
418 /* failed writes to the low core are not recoverable */
420 goto reinject_interrupt
;
424 /* irq was successfully handed to the guest */
426 kvm_s390_set_psw_cc(vcpu
, 1);
430 * If we encounter a problem storing the interruption code, the
431 * instruction is suppressed from the guest's view: reinject the
434 if (kvm_s390_reinject_io_int(vcpu
->kvm
, inti
)) {
438 /* don't set the cc, a pgm irq was injected or we drop to user space */
439 return rc
? -EFAULT
: 0;
442 static int handle_tsch(struct kvm_vcpu
*vcpu
)
444 struct kvm_s390_interrupt_info
*inti
= NULL
;
445 const u64 isc_mask
= 0xffUL
<< 24; /* all iscs set */
447 /* a valid schid has at least one bit set */
448 if (vcpu
->run
->s
.regs
.gprs
[1])
449 inti
= kvm_s390_get_io_int(vcpu
->kvm
, isc_mask
,
450 vcpu
->run
->s
.regs
.gprs
[1]);
453 * Prepare exit to userspace.
454 * We indicate whether we dequeued a pending I/O interrupt
455 * so that userspace can re-inject it if the instruction gets
456 * a program check. While this may re-order the pending I/O
457 * interrupts, this is no problem since the priority is kept
460 vcpu
->run
->exit_reason
= KVM_EXIT_S390_TSCH
;
461 vcpu
->run
->s390_tsch
.dequeued
= !!inti
;
463 vcpu
->run
->s390_tsch
.subchannel_id
= inti
->io
.subchannel_id
;
464 vcpu
->run
->s390_tsch
.subchannel_nr
= inti
->io
.subchannel_nr
;
465 vcpu
->run
->s390_tsch
.io_int_parm
= inti
->io
.io_int_parm
;
466 vcpu
->run
->s390_tsch
.io_int_word
= inti
->io
.io_int_word
;
468 vcpu
->run
->s390_tsch
.ipb
= vcpu
->arch
.sie_block
->ipb
;
473 static int handle_io_inst(struct kvm_vcpu
*vcpu
)
475 VCPU_EVENT(vcpu
, 4, "%s", "I/O instruction");
477 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
478 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
480 if (vcpu
->kvm
->arch
.css_support
) {
482 * Most I/O instructions will be handled by userspace.
483 * Exceptions are tpi and the interrupt portion of tsch.
485 if (vcpu
->arch
.sie_block
->ipa
== 0xb236)
486 return handle_tpi(vcpu
);
487 if (vcpu
->arch
.sie_block
->ipa
== 0xb235)
488 return handle_tsch(vcpu
);
489 /* Handle in userspace. */
493 * Set condition code 3 to stop the guest from issuing channel
496 kvm_s390_set_psw_cc(vcpu
, 3);
501 static int handle_stfl(struct kvm_vcpu
*vcpu
)
506 vcpu
->stat
.instruction_stfl
++;
508 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
509 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
512 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
513 * into a u32 memory representation. They will remain bits 0-31.
515 fac
= *vcpu
->kvm
->arch
.model
.fac_list
>> 32;
516 rc
= write_guest_lc(vcpu
, offsetof(struct lowcore
, stfl_fac_list
),
520 VCPU_EVENT(vcpu
, 3, "STFL: store facility list 0x%x", fac
);
521 trace_kvm_s390_handle_stfl(vcpu
, fac
);
525 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
526 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
527 #define PSW_ADDR_24 0x0000000000ffffffUL
528 #define PSW_ADDR_31 0x000000007fffffffUL
530 int is_valid_psw(psw_t
*psw
)
532 if (psw
->mask
& PSW_MASK_UNASSIGNED
)
534 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_BA
) {
535 if (psw
->addr
& ~PSW_ADDR_31
)
538 if (!(psw
->mask
& PSW_MASK_ADDR_MODE
) && (psw
->addr
& ~PSW_ADDR_24
))
540 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_EA
)
547 int kvm_s390_handle_lpsw(struct kvm_vcpu
*vcpu
)
549 psw_t
*gpsw
= &vcpu
->arch
.sie_block
->gpsw
;
550 psw_compat_t new_psw
;
555 if (gpsw
->mask
& PSW_MASK_PSTATE
)
556 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
558 addr
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
560 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
562 rc
= read_guest(vcpu
, addr
, ar
, &new_psw
, sizeof(new_psw
));
564 return kvm_s390_inject_prog_cond(vcpu
, rc
);
565 if (!(new_psw
.mask
& PSW32_MASK_BASE
))
566 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
567 gpsw
->mask
= (new_psw
.mask
& ~PSW32_MASK_BASE
) << 32;
568 gpsw
->mask
|= new_psw
.addr
& PSW32_ADDR_AMODE
;
569 gpsw
->addr
= new_psw
.addr
& ~PSW32_ADDR_AMODE
;
570 if (!is_valid_psw(gpsw
))
571 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
575 static int handle_lpswe(struct kvm_vcpu
*vcpu
)
582 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
583 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
585 addr
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
587 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
588 rc
= read_guest(vcpu
, addr
, ar
, &new_psw
, sizeof(new_psw
));
590 return kvm_s390_inject_prog_cond(vcpu
, rc
);
591 vcpu
->arch
.sie_block
->gpsw
= new_psw
;
592 if (!is_valid_psw(&vcpu
->arch
.sie_block
->gpsw
))
593 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
597 static int handle_stidp(struct kvm_vcpu
*vcpu
)
599 u64 stidp_data
= vcpu
->kvm
->arch
.model
.cpuid
;
604 vcpu
->stat
.instruction_stidp
++;
606 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
607 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
609 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
612 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
614 rc
= write_guest(vcpu
, operand2
, ar
, &stidp_data
, sizeof(stidp_data
));
616 return kvm_s390_inject_prog_cond(vcpu
, rc
);
618 VCPU_EVENT(vcpu
, 3, "STIDP: store cpu id 0x%llx", stidp_data
);
622 static void handle_stsi_3_2_2(struct kvm_vcpu
*vcpu
, struct sysinfo_3_2_2
*mem
)
627 cpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
629 /* deal with other level 3 hypervisors */
630 if (stsi(mem
, 3, 2, 2))
634 for (n
= mem
->count
- 1; n
> 0 ; n
--)
635 memcpy(&mem
->vm
[n
], &mem
->vm
[n
- 1], sizeof(mem
->vm
[0]));
637 memset(&mem
->vm
[0], 0, sizeof(mem
->vm
[0]));
638 mem
->vm
[0].cpus_total
= cpus
;
639 mem
->vm
[0].cpus_configured
= cpus
;
640 mem
->vm
[0].cpus_standby
= 0;
641 mem
->vm
[0].cpus_reserved
= 0;
642 mem
->vm
[0].caf
= 1000;
643 memcpy(mem
->vm
[0].name
, "KVMguest", 8);
644 ASCEBC(mem
->vm
[0].name
, 8);
645 memcpy(mem
->vm
[0].cpi
, "KVM/Linux ", 16);
646 ASCEBC(mem
->vm
[0].cpi
, 16);
649 static void insert_stsi_usr_data(struct kvm_vcpu
*vcpu
, u64 addr
, u8 ar
,
650 u8 fc
, u8 sel1
, u16 sel2
)
652 vcpu
->run
->exit_reason
= KVM_EXIT_S390_STSI
;
653 vcpu
->run
->s390_stsi
.addr
= addr
;
654 vcpu
->run
->s390_stsi
.ar
= ar
;
655 vcpu
->run
->s390_stsi
.fc
= fc
;
656 vcpu
->run
->s390_stsi
.sel1
= sel1
;
657 vcpu
->run
->s390_stsi
.sel2
= sel2
;
660 static int handle_stsi(struct kvm_vcpu
*vcpu
)
662 int fc
= (vcpu
->run
->s
.regs
.gprs
[0] & 0xf0000000) >> 28;
663 int sel1
= vcpu
->run
->s
.regs
.gprs
[0] & 0xff;
664 int sel2
= vcpu
->run
->s
.regs
.gprs
[1] & 0xffff;
665 unsigned long mem
= 0;
670 vcpu
->stat
.instruction_stsi
++;
671 VCPU_EVENT(vcpu
, 3, "STSI: fc: %u sel1: %u sel2: %u", fc
, sel1
, sel2
);
673 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
674 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
677 kvm_s390_set_psw_cc(vcpu
, 3);
681 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x0fffff00
682 || vcpu
->run
->s
.regs
.gprs
[1] & 0xffff0000)
683 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
686 vcpu
->run
->s
.regs
.gprs
[0] = 3 << 28;
687 kvm_s390_set_psw_cc(vcpu
, 0);
691 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
693 if (operand2
& 0xfff)
694 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
697 case 1: /* same handling for 1 and 2 */
699 mem
= get_zeroed_page(GFP_KERNEL
);
702 if (stsi((void *) mem
, fc
, sel1
, sel2
))
706 if (sel1
!= 2 || sel2
!= 2)
708 mem
= get_zeroed_page(GFP_KERNEL
);
711 handle_stsi_3_2_2(vcpu
, (void *) mem
);
715 rc
= write_guest(vcpu
, operand2
, ar
, (void *)mem
, PAGE_SIZE
);
717 rc
= kvm_s390_inject_prog_cond(vcpu
, rc
);
720 if (vcpu
->kvm
->arch
.user_stsi
) {
721 insert_stsi_usr_data(vcpu
, operand2
, ar
, fc
, sel1
, sel2
);
724 trace_kvm_s390_handle_stsi(vcpu
, fc
, sel1
, sel2
, operand2
);
726 kvm_s390_set_psw_cc(vcpu
, 0);
727 vcpu
->run
->s
.regs
.gprs
[0] = 0;
730 kvm_s390_set_psw_cc(vcpu
, 3);
736 static const intercept_handler_t b2_handlers
[256] = {
737 [0x02] = handle_stidp
,
738 [0x04] = handle_set_clock
,
739 [0x10] = handle_set_prefix
,
740 [0x11] = handle_store_prefix
,
741 [0x12] = handle_store_cpu_address
,
742 [0x14] = kvm_s390_handle_vsie
,
743 [0x21] = handle_ipte_interlock
,
744 [0x29] = handle_iske
,
745 [0x2a] = handle_rrbe
,
746 [0x2b] = handle_sske
,
747 [0x2c] = handle_test_block
,
748 [0x30] = handle_io_inst
,
749 [0x31] = handle_io_inst
,
750 [0x32] = handle_io_inst
,
751 [0x33] = handle_io_inst
,
752 [0x34] = handle_io_inst
,
753 [0x35] = handle_io_inst
,
754 [0x36] = handle_io_inst
,
755 [0x37] = handle_io_inst
,
756 [0x38] = handle_io_inst
,
757 [0x39] = handle_io_inst
,
758 [0x3a] = handle_io_inst
,
759 [0x3b] = handle_io_inst
,
760 [0x3c] = handle_io_inst
,
761 [0x50] = handle_ipte_interlock
,
762 [0x5f] = handle_io_inst
,
763 [0x74] = handle_io_inst
,
764 [0x76] = handle_io_inst
,
765 [0x7d] = handle_stsi
,
766 [0xb1] = handle_stfl
,
767 [0xb2] = handle_lpswe
,
770 int kvm_s390_handle_b2(struct kvm_vcpu
*vcpu
)
772 intercept_handler_t handler
;
775 * A lot of B2 instructions are priviledged. Here we check for
776 * the privileged ones, that we can handle in the kernel.
777 * Anything else goes to userspace.
779 handler
= b2_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
781 return handler(vcpu
);
786 static int handle_epsw(struct kvm_vcpu
*vcpu
)
790 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
792 /* This basically extracts the mask half of the psw. */
793 vcpu
->run
->s
.regs
.gprs
[reg1
] &= 0xffffffff00000000UL
;
794 vcpu
->run
->s
.regs
.gprs
[reg1
] |= vcpu
->arch
.sie_block
->gpsw
.mask
>> 32;
796 vcpu
->run
->s
.regs
.gprs
[reg2
] &= 0xffffffff00000000UL
;
797 vcpu
->run
->s
.regs
.gprs
[reg2
] |=
798 vcpu
->arch
.sie_block
->gpsw
.mask
& 0x00000000ffffffffUL
;
803 #define PFMF_RESERVED 0xfffc0101UL
804 #define PFMF_SK 0x00020000UL
805 #define PFMF_CF 0x00010000UL
806 #define PFMF_UI 0x00008000UL
807 #define PFMF_FSC 0x00007000UL
808 #define PFMF_NQ 0x00000800UL
809 #define PFMF_MR 0x00000400UL
810 #define PFMF_MC 0x00000200UL
811 #define PFMF_KEY 0x000000feUL
813 static int handle_pfmf(struct kvm_vcpu
*vcpu
)
815 bool mr
= false, mc
= false, nq
;
817 unsigned long start
, end
;
820 vcpu
->stat
.instruction_pfmf
++;
822 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
824 if (!test_kvm_facility(vcpu
->kvm
, 8))
825 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
827 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
828 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
830 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_RESERVED
)
831 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
833 /* Only provide non-quiescing support if enabled for the guest */
834 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
&&
835 !test_kvm_facility(vcpu
->kvm
, 14))
836 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
838 /* Only provide conditional-SSKE support if enabled for the guest */
839 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_SK
&&
840 test_kvm_facility(vcpu
->kvm
, 10)) {
841 mr
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_MR
;
842 mc
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_MC
;
845 nq
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
;
846 key
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_KEY
;
847 start
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
848 start
= kvm_s390_logical_to_effective(vcpu
, start
);
850 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
851 if (kvm_s390_check_low_addr_prot_real(vcpu
, start
))
852 return kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
855 switch (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) {
857 /* only 4k frames specify a real address */
858 start
= kvm_s390_real_to_abs(vcpu
, start
);
859 end
= (start
+ (1UL << 12)) & ~((1UL << 12) - 1);
862 end
= (start
+ (1UL << 20)) & ~((1UL << 20) - 1);
865 /* only support 2G frame size if EDAT2 is available and we are
866 not in 24-bit addressing mode */
867 if (!test_kvm_facility(vcpu
->kvm
, 78) ||
868 psw_bits(vcpu
->arch
.sie_block
->gpsw
).eaba
== PSW_AMODE_24BIT
)
869 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
870 end
= (start
+ (1UL << 31)) & ~((1UL << 31) - 1);
873 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
876 while (start
!= end
) {
877 unsigned long useraddr
;
879 /* Translate guest address to host address */
880 useraddr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(start
));
881 if (kvm_is_error_hva(useraddr
))
882 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
884 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
885 if (clear_user((void __user
*)useraddr
, PAGE_SIZE
))
886 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
889 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_SK
) {
890 int rc
= __skey_check_enable(vcpu
);
894 down_read(¤t
->mm
->mmap_sem
);
895 rc
= cond_set_guest_storage_key(current
->mm
, useraddr
,
896 key
, NULL
, nq
, mr
, mc
);
897 up_read(¤t
->mm
->mmap_sem
);
899 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
904 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) {
905 if (psw_bits(vcpu
->arch
.sie_block
->gpsw
).eaba
== PSW_AMODE_64BIT
) {
906 vcpu
->run
->s
.regs
.gprs
[reg2
] = end
;
908 vcpu
->run
->s
.regs
.gprs
[reg2
] &= ~0xffffffffUL
;
909 end
= kvm_s390_logical_to_effective(vcpu
, end
);
910 vcpu
->run
->s
.regs
.gprs
[reg2
] |= end
;
916 static int handle_essa(struct kvm_vcpu
*vcpu
)
918 /* entries expected to be 1FF */
919 int entries
= (vcpu
->arch
.sie_block
->cbrlo
& ~PAGE_MASK
) >> 3;
920 unsigned long *cbrlo
;
924 VCPU_EVENT(vcpu
, 4, "ESSA: release %d pages", entries
);
925 gmap
= vcpu
->arch
.gmap
;
926 vcpu
->stat
.instruction_essa
++;
927 if (!vcpu
->kvm
->arch
.use_cmma
)
928 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
930 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
931 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
933 if (((vcpu
->arch
.sie_block
->ipb
& 0xf0000000) >> 28) > 6)
934 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
936 /* Retry the ESSA instruction */
937 kvm_s390_retry_instr(vcpu
);
938 vcpu
->arch
.sie_block
->cbrlo
&= PAGE_MASK
; /* reset nceo */
939 cbrlo
= phys_to_virt(vcpu
->arch
.sie_block
->cbrlo
);
940 down_read(&gmap
->mm
->mmap_sem
);
941 for (i
= 0; i
< entries
; ++i
)
942 __gmap_zap(gmap
, cbrlo
[i
]);
943 up_read(&gmap
->mm
->mmap_sem
);
947 static const intercept_handler_t b9_handlers
[256] = {
948 [0x8a] = handle_ipte_interlock
,
949 [0x8d] = handle_epsw
,
950 [0x8e] = handle_ipte_interlock
,
951 [0x8f] = handle_ipte_interlock
,
952 [0xab] = handle_essa
,
953 [0xaf] = handle_pfmf
,
956 int kvm_s390_handle_b9(struct kvm_vcpu
*vcpu
)
958 intercept_handler_t handler
;
960 /* This is handled just as for the B2 instructions. */
961 handler
= b9_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
963 return handler(vcpu
);
968 int kvm_s390_handle_lctl(struct kvm_vcpu
*vcpu
)
970 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
971 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
972 int reg
, rc
, nr_regs
;
977 vcpu
->stat
.instruction_lctl
++;
979 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
980 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
982 ga
= kvm_s390_get_base_disp_rs(vcpu
, &ar
);
985 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
987 VCPU_EVENT(vcpu
, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
988 trace_kvm_s390_handle_lctl(vcpu
, 0, reg1
, reg3
, ga
);
990 nr_regs
= ((reg3
- reg1
) & 0xf) + 1;
991 rc
= read_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u32
));
993 return kvm_s390_inject_prog_cond(vcpu
, rc
);
997 vcpu
->arch
.sie_block
->gcr
[reg
] &= 0xffffffff00000000ul
;
998 vcpu
->arch
.sie_block
->gcr
[reg
] |= ctl_array
[nr_regs
++];
1001 reg
= (reg
+ 1) % 16;
1003 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1007 int kvm_s390_handle_stctl(struct kvm_vcpu
*vcpu
)
1009 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1010 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1011 int reg
, rc
, nr_regs
;
1016 vcpu
->stat
.instruction_stctl
++;
1018 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1019 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1021 ga
= kvm_s390_get_base_disp_rs(vcpu
, &ar
);
1024 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1026 VCPU_EVENT(vcpu
, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1027 trace_kvm_s390_handle_stctl(vcpu
, 0, reg1
, reg3
, ga
);
1032 ctl_array
[nr_regs
++] = vcpu
->arch
.sie_block
->gcr
[reg
];
1035 reg
= (reg
+ 1) % 16;
1037 rc
= write_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u32
));
1038 return rc
? kvm_s390_inject_prog_cond(vcpu
, rc
) : 0;
1041 static int handle_lctlg(struct kvm_vcpu
*vcpu
)
1043 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1044 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1045 int reg
, rc
, nr_regs
;
1050 vcpu
->stat
.instruction_lctlg
++;
1052 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1053 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1055 ga
= kvm_s390_get_base_disp_rsy(vcpu
, &ar
);
1058 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1060 VCPU_EVENT(vcpu
, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1061 trace_kvm_s390_handle_lctl(vcpu
, 1, reg1
, reg3
, ga
);
1063 nr_regs
= ((reg3
- reg1
) & 0xf) + 1;
1064 rc
= read_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u64
));
1066 return kvm_s390_inject_prog_cond(vcpu
, rc
);
1070 vcpu
->arch
.sie_block
->gcr
[reg
] = ctl_array
[nr_regs
++];
1073 reg
= (reg
+ 1) % 16;
1075 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1079 static int handle_stctg(struct kvm_vcpu
*vcpu
)
1081 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1082 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1083 int reg
, rc
, nr_regs
;
1088 vcpu
->stat
.instruction_stctg
++;
1090 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1091 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1093 ga
= kvm_s390_get_base_disp_rsy(vcpu
, &ar
);
1096 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1098 VCPU_EVENT(vcpu
, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1099 trace_kvm_s390_handle_stctl(vcpu
, 1, reg1
, reg3
, ga
);
1104 ctl_array
[nr_regs
++] = vcpu
->arch
.sie_block
->gcr
[reg
];
1107 reg
= (reg
+ 1) % 16;
1109 rc
= write_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u64
));
1110 return rc
? kvm_s390_inject_prog_cond(vcpu
, rc
) : 0;
1113 static const intercept_handler_t eb_handlers
[256] = {
1114 [0x2f] = handle_lctlg
,
1115 [0x25] = handle_stctg
,
1121 int kvm_s390_handle_eb(struct kvm_vcpu
*vcpu
)
1123 intercept_handler_t handler
;
1125 handler
= eb_handlers
[vcpu
->arch
.sie_block
->ipb
& 0xff];
1127 return handler(vcpu
);
1131 static int handle_tprot(struct kvm_vcpu
*vcpu
)
1133 u64 address1
, address2
;
1134 unsigned long hva
, gpa
;
1135 int ret
= 0, cc
= 0;
1139 vcpu
->stat
.instruction_tprot
++;
1141 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1142 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1144 kvm_s390_get_base_disp_sse(vcpu
, &address1
, &address2
, &ar
, NULL
);
1146 /* we only handle the Linux memory detection case:
1148 * everything else goes to userspace. */
1149 if (address2
& 0xf0)
1151 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
1153 ret
= guest_translate_address(vcpu
, address1
, ar
, &gpa
, GACC_STORE
);
1154 if (ret
== PGM_PROTECTION
) {
1155 /* Write protected? Try again with read-only... */
1157 ret
= guest_translate_address(vcpu
, address1
, ar
, &gpa
,
1161 if (ret
== PGM_ADDRESSING
|| ret
== PGM_TRANSLATION_SPEC
) {
1162 ret
= kvm_s390_inject_program_int(vcpu
, ret
);
1163 } else if (ret
> 0) {
1164 /* Translation not available */
1165 kvm_s390_set_psw_cc(vcpu
, 3);
1171 hva
= gfn_to_hva_prot(vcpu
->kvm
, gpa_to_gfn(gpa
), &writable
);
1172 if (kvm_is_error_hva(hva
)) {
1173 ret
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
1176 cc
= 1; /* Write not permitted ==> read-only */
1177 kvm_s390_set_psw_cc(vcpu
, cc
);
1178 /* Note: CC2 only occurs for storage keys (not supported yet) */
1181 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
1186 int kvm_s390_handle_e5(struct kvm_vcpu
*vcpu
)
1188 /* For e5xx... instructions we only handle TPROT */
1189 if ((vcpu
->arch
.sie_block
->ipa
& 0x00ff) == 0x01)
1190 return handle_tprot(vcpu
);
1194 static int handle_sckpf(struct kvm_vcpu
*vcpu
)
1198 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1199 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1201 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x00000000ffff0000)
1202 return kvm_s390_inject_program_int(vcpu
,
1205 value
= vcpu
->run
->s
.regs
.gprs
[0] & 0x000000000000ffff;
1206 vcpu
->arch
.sie_block
->todpr
= value
;
1211 static int handle_ptff(struct kvm_vcpu
*vcpu
)
1213 /* we don't emulate any control instructions yet */
1214 kvm_s390_set_psw_cc(vcpu
, 3);
1218 static const intercept_handler_t x01_handlers
[256] = {
1219 [0x04] = handle_ptff
,
1220 [0x07] = handle_sckpf
,
1223 int kvm_s390_handle_01(struct kvm_vcpu
*vcpu
)
1225 intercept_handler_t handler
;
1227 handler
= x01_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
1229 return handler(vcpu
);