1 // SPDX-License-Identifier: GPL-2.0
3 * handling privileged instructions
5 * Copyright IBM Corp. 2008, 2018
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
11 #include <linux/kvm.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/compat.h>
15 #include <linux/mm_types.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/facility.h>
19 #include <asm/current.h>
20 #include <asm/debug.h>
21 #include <asm/ebcdic.h>
22 #include <asm/sysinfo.h>
23 #include <asm/pgtable.h>
24 #include <asm/page-states.h>
25 #include <asm/pgalloc.h>
28 #include <asm/ptrace.h>
29 #include <asm/compat.h>
35 static int handle_ri(struct kvm_vcpu
*vcpu
)
37 vcpu
->stat
.instruction_ri
++;
39 if (test_kvm_facility(vcpu
->kvm
, 64)) {
40 VCPU_EVENT(vcpu
, 3, "%s", "ENABLE: RI (lazy)");
41 vcpu
->arch
.sie_block
->ecb3
|= ECB3_RI
;
42 kvm_s390_retry_instr(vcpu
);
45 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
48 int kvm_s390_handle_aa(struct kvm_vcpu
*vcpu
)
50 if ((vcpu
->arch
.sie_block
->ipa
& 0xf) <= 4)
51 return handle_ri(vcpu
);
56 static int handle_gs(struct kvm_vcpu
*vcpu
)
58 vcpu
->stat
.instruction_gs
++;
60 if (test_kvm_facility(vcpu
->kvm
, 133)) {
61 VCPU_EVENT(vcpu
, 3, "%s", "ENABLE: GS (lazy)");
64 current
->thread
.gs_cb
= (struct gs_cb
*)&vcpu
->run
->s
.regs
.gscb
;
65 restore_gs_cb(current
->thread
.gs_cb
);
67 vcpu
->arch
.sie_block
->ecb
|= ECB_GS
;
68 vcpu
->arch
.sie_block
->ecd
|= ECD_HOSTREGMGMT
;
69 vcpu
->arch
.gs_enabled
= 1;
70 kvm_s390_retry_instr(vcpu
);
73 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
76 int kvm_s390_handle_e3(struct kvm_vcpu
*vcpu
)
78 int code
= vcpu
->arch
.sie_block
->ipb
& 0xff;
80 if (code
== 0x49 || code
== 0x4d)
81 return handle_gs(vcpu
);
85 /* Handle SCK (SET CLOCK) interception */
86 static int handle_set_clock(struct kvm_vcpu
*vcpu
)
88 struct kvm_s390_vm_tod_clock gtod
= { 0 };
93 vcpu
->stat
.instruction_sck
++;
95 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
96 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
98 op2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
99 if (op2
& 7) /* Operand must be on a doubleword boundary */
100 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
101 rc
= read_guest(vcpu
, op2
, ar
, >od
.tod
, sizeof(gtod
.tod
));
103 return kvm_s390_inject_prog_cond(vcpu
, rc
);
105 VCPU_EVENT(vcpu
, 3, "SCK: setting guest TOD to 0x%llx", gtod
.tod
);
106 kvm_s390_set_tod_clock(vcpu
->kvm
, >od
);
108 kvm_s390_set_psw_cc(vcpu
, 0);
112 static int handle_set_prefix(struct kvm_vcpu
*vcpu
)
119 vcpu
->stat
.instruction_spx
++;
121 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
122 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
124 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
126 /* must be word boundary */
128 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
131 rc
= read_guest(vcpu
, operand2
, ar
, &address
, sizeof(address
));
133 return kvm_s390_inject_prog_cond(vcpu
, rc
);
135 address
&= 0x7fffe000u
;
138 * Make sure the new value is valid memory. We only need to check the
139 * first page, since address is 8k aligned and memory pieces are always
140 * at least 1MB aligned and have at least a size of 1MB.
142 if (kvm_is_error_gpa(vcpu
->kvm
, address
))
143 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
145 kvm_s390_set_prefix(vcpu
, address
);
146 trace_kvm_s390_handle_prefix(vcpu
, 1, address
);
150 static int handle_store_prefix(struct kvm_vcpu
*vcpu
)
157 vcpu
->stat
.instruction_stpx
++;
159 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
160 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
162 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
164 /* must be word boundary */
166 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
168 address
= kvm_s390_get_prefix(vcpu
);
171 rc
= write_guest(vcpu
, operand2
, ar
, &address
, sizeof(address
));
173 return kvm_s390_inject_prog_cond(vcpu
, rc
);
175 VCPU_EVENT(vcpu
, 3, "STPX: storing prefix 0x%x into 0x%llx", address
, operand2
);
176 trace_kvm_s390_handle_prefix(vcpu
, 0, address
);
180 static int handle_store_cpu_address(struct kvm_vcpu
*vcpu
)
182 u16 vcpu_id
= vcpu
->vcpu_id
;
187 vcpu
->stat
.instruction_stap
++;
189 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
190 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
192 ga
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
195 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
197 rc
= write_guest(vcpu
, ga
, ar
, &vcpu_id
, sizeof(vcpu_id
));
199 return kvm_s390_inject_prog_cond(vcpu
, rc
);
201 VCPU_EVENT(vcpu
, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id
, ga
);
202 trace_kvm_s390_handle_stap(vcpu
, ga
);
206 int kvm_s390_skey_check_enable(struct kvm_vcpu
*vcpu
)
209 struct kvm_s390_sie_block
*sie_block
= vcpu
->arch
.sie_block
;
211 trace_kvm_s390_skey_related_inst(vcpu
);
212 if (!(sie_block
->ictl
& (ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
)) &&
213 !kvm_s390_test_cpuflags(vcpu
, CPUSTAT_KSS
))
216 rc
= s390_enable_skey();
217 VCPU_EVENT(vcpu
, 3, "enabling storage keys for guest: %d", rc
);
219 if (kvm_s390_test_cpuflags(vcpu
, CPUSTAT_KSS
))
220 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_KSS
);
222 sie_block
->ictl
&= ~(ICTL_ISKE
| ICTL_SSKE
|
228 static int try_handle_skey(struct kvm_vcpu
*vcpu
)
232 rc
= kvm_s390_skey_check_enable(vcpu
);
236 /* with storage-key facility, SIE interprets it for us */
237 kvm_s390_retry_instr(vcpu
);
238 VCPU_EVENT(vcpu
, 4, "%s", "retrying storage key operation");
244 static int handle_iske(struct kvm_vcpu
*vcpu
)
251 vcpu
->stat
.instruction_iske
++;
253 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
254 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
256 rc
= try_handle_skey(vcpu
);
258 return rc
!= -EAGAIN
? rc
: 0;
260 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
262 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
263 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
264 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
265 addr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(addr
));
266 if (kvm_is_error_hva(addr
))
267 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
269 down_read(¤t
->mm
->mmap_sem
);
270 rc
= get_guest_storage_key(current
->mm
, addr
, &key
);
271 up_read(¤t
->mm
->mmap_sem
);
273 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
274 vcpu
->run
->s
.regs
.gprs
[reg1
] &= ~0xff;
275 vcpu
->run
->s
.regs
.gprs
[reg1
] |= key
;
279 static int handle_rrbe(struct kvm_vcpu
*vcpu
)
285 vcpu
->stat
.instruction_rrbe
++;
287 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
288 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
290 rc
= try_handle_skey(vcpu
);
292 return rc
!= -EAGAIN
? rc
: 0;
294 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
296 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
297 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
298 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
299 addr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(addr
));
300 if (kvm_is_error_hva(addr
))
301 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
303 down_read(¤t
->mm
->mmap_sem
);
304 rc
= reset_guest_reference_bit(current
->mm
, addr
);
305 up_read(¤t
->mm
->mmap_sem
);
307 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
309 kvm_s390_set_psw_cc(vcpu
, rc
);
317 static int handle_sske(struct kvm_vcpu
*vcpu
)
319 unsigned char m3
= vcpu
->arch
.sie_block
->ipb
>> 28;
320 unsigned long start
, end
;
321 unsigned char key
, oldkey
;
325 vcpu
->stat
.instruction_sske
++;
327 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
328 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
330 rc
= try_handle_skey(vcpu
);
332 return rc
!= -EAGAIN
? rc
: 0;
334 if (!test_kvm_facility(vcpu
->kvm
, 8))
336 if (!test_kvm_facility(vcpu
->kvm
, 10))
337 m3
&= ~(SSKE_MC
| SSKE_MR
);
338 if (!test_kvm_facility(vcpu
->kvm
, 14))
341 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
343 key
= vcpu
->run
->s
.regs
.gprs
[reg1
] & 0xfe;
344 start
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
345 start
= kvm_s390_logical_to_effective(vcpu
, start
);
347 /* start already designates an absolute address */
348 end
= (start
+ _SEGMENT_SIZE
) & ~(_SEGMENT_SIZE
- 1);
350 start
= kvm_s390_real_to_abs(vcpu
, start
);
351 end
= start
+ PAGE_SIZE
;
354 while (start
!= end
) {
355 unsigned long addr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(start
));
357 if (kvm_is_error_hva(addr
))
358 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
360 down_read(¤t
->mm
->mmap_sem
);
361 rc
= cond_set_guest_storage_key(current
->mm
, addr
, key
, &oldkey
,
362 m3
& SSKE_NQ
, m3
& SSKE_MR
,
364 up_read(¤t
->mm
->mmap_sem
);
366 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
370 if (m3
& (SSKE_MC
| SSKE_MR
)) {
372 /* skey in reg1 is unpredictable */
373 kvm_s390_set_psw_cc(vcpu
, 3);
375 kvm_s390_set_psw_cc(vcpu
, rc
);
376 vcpu
->run
->s
.regs
.gprs
[reg1
] &= ~0xff00UL
;
377 vcpu
->run
->s
.regs
.gprs
[reg1
] |= (u64
) oldkey
<< 8;
381 if (psw_bits(vcpu
->arch
.sie_block
->gpsw
).eaba
== PSW_BITS_AMODE_64BIT
)
382 vcpu
->run
->s
.regs
.gprs
[reg2
] &= ~PAGE_MASK
;
384 vcpu
->run
->s
.regs
.gprs
[reg2
] &= ~0xfffff000UL
;
385 end
= kvm_s390_logical_to_effective(vcpu
, end
);
386 vcpu
->run
->s
.regs
.gprs
[reg2
] |= end
;
391 static int handle_ipte_interlock(struct kvm_vcpu
*vcpu
)
393 vcpu
->stat
.instruction_ipte_interlock
++;
394 if (psw_bits(vcpu
->arch
.sie_block
->gpsw
).pstate
)
395 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
396 wait_event(vcpu
->kvm
->arch
.ipte_wq
, !ipte_lock_held(vcpu
));
397 kvm_s390_retry_instr(vcpu
);
398 VCPU_EVENT(vcpu
, 4, "%s", "retrying ipte interlock operation");
402 static int handle_test_block(struct kvm_vcpu
*vcpu
)
407 vcpu
->stat
.instruction_tb
++;
409 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
410 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
412 kvm_s390_get_regs_rre(vcpu
, NULL
, ®2
);
413 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
414 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
415 if (kvm_s390_check_low_addr_prot_real(vcpu
, addr
))
416 return kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
417 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
419 if (kvm_is_error_gpa(vcpu
->kvm
, addr
))
420 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
422 * We don't expect errors on modern systems, and do not care
423 * about storage keys (yet), so let's just clear the page.
425 if (kvm_clear_guest(vcpu
->kvm
, addr
, PAGE_SIZE
))
427 kvm_s390_set_psw_cc(vcpu
, 0);
428 vcpu
->run
->s
.regs
.gprs
[0] = 0;
432 static int handle_tpi(struct kvm_vcpu
*vcpu
)
434 struct kvm_s390_interrupt_info
*inti
;
441 vcpu
->stat
.instruction_tpi
++;
443 addr
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
445 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
447 inti
= kvm_s390_get_io_int(vcpu
->kvm
, vcpu
->arch
.sie_block
->gcr
[6], 0);
449 kvm_s390_set_psw_cc(vcpu
, 0);
453 tpi_data
[0] = inti
->io
.subchannel_id
<< 16 | inti
->io
.subchannel_nr
;
454 tpi_data
[1] = inti
->io
.io_int_parm
;
455 tpi_data
[2] = inti
->io
.io_int_word
;
458 * Store the two-word I/O interruption code into the
461 len
= sizeof(tpi_data
) - 4;
462 rc
= write_guest(vcpu
, addr
, ar
, &tpi_data
, len
);
464 rc
= kvm_s390_inject_prog_cond(vcpu
, rc
);
465 goto reinject_interrupt
;
469 * Store the three-word I/O interruption code into
470 * the appropriate lowcore area.
472 len
= sizeof(tpi_data
);
473 if (write_guest_lc(vcpu
, __LC_SUBCHANNEL_ID
, &tpi_data
, len
)) {
474 /* failed writes to the low core are not recoverable */
476 goto reinject_interrupt
;
480 /* irq was successfully handed to the guest */
482 kvm_s390_set_psw_cc(vcpu
, 1);
486 * If we encounter a problem storing the interruption code, the
487 * instruction is suppressed from the guest's view: reinject the
490 if (kvm_s390_reinject_io_int(vcpu
->kvm
, inti
)) {
494 /* don't set the cc, a pgm irq was injected or we drop to user space */
495 return rc
? -EFAULT
: 0;
498 static int handle_tsch(struct kvm_vcpu
*vcpu
)
500 struct kvm_s390_interrupt_info
*inti
= NULL
;
501 const u64 isc_mask
= 0xffUL
<< 24; /* all iscs set */
503 vcpu
->stat
.instruction_tsch
++;
505 /* a valid schid has at least one bit set */
506 if (vcpu
->run
->s
.regs
.gprs
[1])
507 inti
= kvm_s390_get_io_int(vcpu
->kvm
, isc_mask
,
508 vcpu
->run
->s
.regs
.gprs
[1]);
511 * Prepare exit to userspace.
512 * We indicate whether we dequeued a pending I/O interrupt
513 * so that userspace can re-inject it if the instruction gets
514 * a program check. While this may re-order the pending I/O
515 * interrupts, this is no problem since the priority is kept
518 vcpu
->run
->exit_reason
= KVM_EXIT_S390_TSCH
;
519 vcpu
->run
->s390_tsch
.dequeued
= !!inti
;
521 vcpu
->run
->s390_tsch
.subchannel_id
= inti
->io
.subchannel_id
;
522 vcpu
->run
->s390_tsch
.subchannel_nr
= inti
->io
.subchannel_nr
;
523 vcpu
->run
->s390_tsch
.io_int_parm
= inti
->io
.io_int_parm
;
524 vcpu
->run
->s390_tsch
.io_int_word
= inti
->io
.io_int_word
;
526 vcpu
->run
->s390_tsch
.ipb
= vcpu
->arch
.sie_block
->ipb
;
531 static int handle_io_inst(struct kvm_vcpu
*vcpu
)
533 VCPU_EVENT(vcpu
, 4, "%s", "I/O instruction");
535 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
536 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
538 if (vcpu
->kvm
->arch
.css_support
) {
540 * Most I/O instructions will be handled by userspace.
541 * Exceptions are tpi and the interrupt portion of tsch.
543 if (vcpu
->arch
.sie_block
->ipa
== 0xb236)
544 return handle_tpi(vcpu
);
545 if (vcpu
->arch
.sie_block
->ipa
== 0xb235)
546 return handle_tsch(vcpu
);
547 /* Handle in userspace. */
548 vcpu
->stat
.instruction_io_other
++;
552 * Set condition code 3 to stop the guest from issuing channel
555 kvm_s390_set_psw_cc(vcpu
, 3);
560 static int handle_stfl(struct kvm_vcpu
*vcpu
)
565 vcpu
->stat
.instruction_stfl
++;
567 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
568 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
571 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
572 * into a u32 memory representation. They will remain bits 0-31.
574 fac
= *vcpu
->kvm
->arch
.model
.fac_list
>> 32;
575 rc
= write_guest_lc(vcpu
, offsetof(struct lowcore
, stfl_fac_list
),
579 VCPU_EVENT(vcpu
, 3, "STFL: store facility list 0x%x", fac
);
580 trace_kvm_s390_handle_stfl(vcpu
, fac
);
584 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
585 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
586 #define PSW_ADDR_24 0x0000000000ffffffUL
587 #define PSW_ADDR_31 0x000000007fffffffUL
589 int is_valid_psw(psw_t
*psw
)
591 if (psw
->mask
& PSW_MASK_UNASSIGNED
)
593 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_BA
) {
594 if (psw
->addr
& ~PSW_ADDR_31
)
597 if (!(psw
->mask
& PSW_MASK_ADDR_MODE
) && (psw
->addr
& ~PSW_ADDR_24
))
599 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_EA
)
606 int kvm_s390_handle_lpsw(struct kvm_vcpu
*vcpu
)
608 psw_t
*gpsw
= &vcpu
->arch
.sie_block
->gpsw
;
609 psw_compat_t new_psw
;
614 vcpu
->stat
.instruction_lpsw
++;
616 if (gpsw
->mask
& PSW_MASK_PSTATE
)
617 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
619 addr
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
621 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
623 rc
= read_guest(vcpu
, addr
, ar
, &new_psw
, sizeof(new_psw
));
625 return kvm_s390_inject_prog_cond(vcpu
, rc
);
626 if (!(new_psw
.mask
& PSW32_MASK_BASE
))
627 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
628 gpsw
->mask
= (new_psw
.mask
& ~PSW32_MASK_BASE
) << 32;
629 gpsw
->mask
|= new_psw
.addr
& PSW32_ADDR_AMODE
;
630 gpsw
->addr
= new_psw
.addr
& ~PSW32_ADDR_AMODE
;
631 if (!is_valid_psw(gpsw
))
632 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
636 static int handle_lpswe(struct kvm_vcpu
*vcpu
)
643 vcpu
->stat
.instruction_lpswe
++;
645 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
646 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
648 addr
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
650 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
651 rc
= read_guest(vcpu
, addr
, ar
, &new_psw
, sizeof(new_psw
));
653 return kvm_s390_inject_prog_cond(vcpu
, rc
);
654 vcpu
->arch
.sie_block
->gpsw
= new_psw
;
655 if (!is_valid_psw(&vcpu
->arch
.sie_block
->gpsw
))
656 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
660 static int handle_stidp(struct kvm_vcpu
*vcpu
)
662 u64 stidp_data
= vcpu
->kvm
->arch
.model
.cpuid
;
667 vcpu
->stat
.instruction_stidp
++;
669 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
670 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
672 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
675 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
677 rc
= write_guest(vcpu
, operand2
, ar
, &stidp_data
, sizeof(stidp_data
));
679 return kvm_s390_inject_prog_cond(vcpu
, rc
);
681 VCPU_EVENT(vcpu
, 3, "STIDP: store cpu id 0x%llx", stidp_data
);
685 static void handle_stsi_3_2_2(struct kvm_vcpu
*vcpu
, struct sysinfo_3_2_2
*mem
)
690 cpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
692 /* deal with other level 3 hypervisors */
693 if (stsi(mem
, 3, 2, 2))
697 for (n
= mem
->count
- 1; n
> 0 ; n
--)
698 memcpy(&mem
->vm
[n
], &mem
->vm
[n
- 1], sizeof(mem
->vm
[0]));
700 memset(&mem
->vm
[0], 0, sizeof(mem
->vm
[0]));
701 mem
->vm
[0].cpus_total
= cpus
;
702 mem
->vm
[0].cpus_configured
= cpus
;
703 mem
->vm
[0].cpus_standby
= 0;
704 mem
->vm
[0].cpus_reserved
= 0;
705 mem
->vm
[0].caf
= 1000;
706 memcpy(mem
->vm
[0].name
, "KVMguest", 8);
707 ASCEBC(mem
->vm
[0].name
, 8);
708 memcpy(mem
->vm
[0].cpi
, "KVM/Linux ", 16);
709 ASCEBC(mem
->vm
[0].cpi
, 16);
712 static void insert_stsi_usr_data(struct kvm_vcpu
*vcpu
, u64 addr
, u8 ar
,
713 u8 fc
, u8 sel1
, u16 sel2
)
715 vcpu
->run
->exit_reason
= KVM_EXIT_S390_STSI
;
716 vcpu
->run
->s390_stsi
.addr
= addr
;
717 vcpu
->run
->s390_stsi
.ar
= ar
;
718 vcpu
->run
->s390_stsi
.fc
= fc
;
719 vcpu
->run
->s390_stsi
.sel1
= sel1
;
720 vcpu
->run
->s390_stsi
.sel2
= sel2
;
723 static int handle_stsi(struct kvm_vcpu
*vcpu
)
725 int fc
= (vcpu
->run
->s
.regs
.gprs
[0] & 0xf0000000) >> 28;
726 int sel1
= vcpu
->run
->s
.regs
.gprs
[0] & 0xff;
727 int sel2
= vcpu
->run
->s
.regs
.gprs
[1] & 0xffff;
728 unsigned long mem
= 0;
733 vcpu
->stat
.instruction_stsi
++;
734 VCPU_EVENT(vcpu
, 3, "STSI: fc: %u sel1: %u sel2: %u", fc
, sel1
, sel2
);
736 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
737 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
740 kvm_s390_set_psw_cc(vcpu
, 3);
744 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x0fffff00
745 || vcpu
->run
->s
.regs
.gprs
[1] & 0xffff0000)
746 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
749 vcpu
->run
->s
.regs
.gprs
[0] = 3 << 28;
750 kvm_s390_set_psw_cc(vcpu
, 0);
754 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
756 if (operand2
& 0xfff)
757 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
760 case 1: /* same handling for 1 and 2 */
762 mem
= get_zeroed_page(GFP_KERNEL
);
765 if (stsi((void *) mem
, fc
, sel1
, sel2
))
769 if (sel1
!= 2 || sel2
!= 2)
771 mem
= get_zeroed_page(GFP_KERNEL
);
774 handle_stsi_3_2_2(vcpu
, (void *) mem
);
778 rc
= write_guest(vcpu
, operand2
, ar
, (void *)mem
, PAGE_SIZE
);
780 rc
= kvm_s390_inject_prog_cond(vcpu
, rc
);
783 if (vcpu
->kvm
->arch
.user_stsi
) {
784 insert_stsi_usr_data(vcpu
, operand2
, ar
, fc
, sel1
, sel2
);
787 trace_kvm_s390_handle_stsi(vcpu
, fc
, sel1
, sel2
, operand2
);
789 kvm_s390_set_psw_cc(vcpu
, 0);
790 vcpu
->run
->s
.regs
.gprs
[0] = 0;
793 kvm_s390_set_psw_cc(vcpu
, 3);
799 int kvm_s390_handle_b2(struct kvm_vcpu
*vcpu
)
801 switch (vcpu
->arch
.sie_block
->ipa
& 0x00ff) {
803 return handle_stidp(vcpu
);
805 return handle_set_clock(vcpu
);
807 return handle_set_prefix(vcpu
);
809 return handle_store_prefix(vcpu
);
811 return handle_store_cpu_address(vcpu
);
813 return kvm_s390_handle_vsie(vcpu
);
816 return handle_ipte_interlock(vcpu
);
818 return handle_iske(vcpu
);
820 return handle_rrbe(vcpu
);
822 return handle_sske(vcpu
);
824 return handle_test_block(vcpu
);
841 return handle_io_inst(vcpu
);
843 return handle_sthyi(vcpu
);
845 return handle_stsi(vcpu
);
847 return handle_stfl(vcpu
);
849 return handle_lpswe(vcpu
);
855 static int handle_epsw(struct kvm_vcpu
*vcpu
)
859 vcpu
->stat
.instruction_epsw
++;
861 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
863 /* This basically extracts the mask half of the psw. */
864 vcpu
->run
->s
.regs
.gprs
[reg1
] &= 0xffffffff00000000UL
;
865 vcpu
->run
->s
.regs
.gprs
[reg1
] |= vcpu
->arch
.sie_block
->gpsw
.mask
>> 32;
867 vcpu
->run
->s
.regs
.gprs
[reg2
] &= 0xffffffff00000000UL
;
868 vcpu
->run
->s
.regs
.gprs
[reg2
] |=
869 vcpu
->arch
.sie_block
->gpsw
.mask
& 0x00000000ffffffffUL
;
874 #define PFMF_RESERVED 0xfffc0101UL
875 #define PFMF_SK 0x00020000UL
876 #define PFMF_CF 0x00010000UL
877 #define PFMF_UI 0x00008000UL
878 #define PFMF_FSC 0x00007000UL
879 #define PFMF_NQ 0x00000800UL
880 #define PFMF_MR 0x00000400UL
881 #define PFMF_MC 0x00000200UL
882 #define PFMF_KEY 0x000000feUL
884 static int handle_pfmf(struct kvm_vcpu
*vcpu
)
886 bool mr
= false, mc
= false, nq
;
888 unsigned long start
, end
;
891 vcpu
->stat
.instruction_pfmf
++;
893 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
895 if (!test_kvm_facility(vcpu
->kvm
, 8))
896 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
898 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
899 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
901 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_RESERVED
)
902 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
904 /* Only provide non-quiescing support if enabled for the guest */
905 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
&&
906 !test_kvm_facility(vcpu
->kvm
, 14))
907 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
909 /* Only provide conditional-SSKE support if enabled for the guest */
910 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_SK
&&
911 test_kvm_facility(vcpu
->kvm
, 10)) {
912 mr
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_MR
;
913 mc
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_MC
;
916 nq
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
;
917 key
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_KEY
;
918 start
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
919 start
= kvm_s390_logical_to_effective(vcpu
, start
);
921 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
922 if (kvm_s390_check_low_addr_prot_real(vcpu
, start
))
923 return kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
926 switch (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) {
928 /* only 4k frames specify a real address */
929 start
= kvm_s390_real_to_abs(vcpu
, start
);
930 end
= (start
+ PAGE_SIZE
) & ~(PAGE_SIZE
- 1);
933 end
= (start
+ _SEGMENT_SIZE
) & ~(_SEGMENT_SIZE
- 1);
936 /* only support 2G frame size if EDAT2 is available and we are
937 not in 24-bit addressing mode */
938 if (!test_kvm_facility(vcpu
->kvm
, 78) ||
939 psw_bits(vcpu
->arch
.sie_block
->gpsw
).eaba
== PSW_BITS_AMODE_24BIT
)
940 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
941 end
= (start
+ _REGION3_SIZE
) & ~(_REGION3_SIZE
- 1);
944 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
947 while (start
!= end
) {
948 unsigned long useraddr
;
950 /* Translate guest address to host address */
951 useraddr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(start
));
952 if (kvm_is_error_hva(useraddr
))
953 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
955 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
956 if (clear_user((void __user
*)useraddr
, PAGE_SIZE
))
957 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
960 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_SK
) {
961 int rc
= kvm_s390_skey_check_enable(vcpu
);
965 down_read(¤t
->mm
->mmap_sem
);
966 rc
= cond_set_guest_storage_key(current
->mm
, useraddr
,
967 key
, NULL
, nq
, mr
, mc
);
968 up_read(¤t
->mm
->mmap_sem
);
970 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
975 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) {
976 if (psw_bits(vcpu
->arch
.sie_block
->gpsw
).eaba
== PSW_BITS_AMODE_64BIT
) {
977 vcpu
->run
->s
.regs
.gprs
[reg2
] = end
;
979 vcpu
->run
->s
.regs
.gprs
[reg2
] &= ~0xffffffffUL
;
980 end
= kvm_s390_logical_to_effective(vcpu
, end
);
981 vcpu
->run
->s
.regs
.gprs
[reg2
] |= end
;
987 static inline int do_essa(struct kvm_vcpu
*vcpu
, const int orc
)
989 struct kvm_s390_migration_state
*ms
= vcpu
->kvm
->arch
.migration_state
;
990 int r1
, r2
, nappended
, entries
;
991 unsigned long gfn
, hva
, res
, pgstev
, ptev
;
992 unsigned long *cbrlo
;
995 * We don't need to set SD.FPF.SK to 1 here, because if we have a
996 * machine check here we either handle it or crash
999 kvm_s390_get_regs_rre(vcpu
, &r1
, &r2
);
1000 gfn
= vcpu
->run
->s
.regs
.gprs
[r2
] >> PAGE_SHIFT
;
1001 hva
= gfn_to_hva(vcpu
->kvm
, gfn
);
1002 entries
= (vcpu
->arch
.sie_block
->cbrlo
& ~PAGE_MASK
) >> 3;
1004 if (kvm_is_error_hva(hva
))
1005 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
1007 nappended
= pgste_perform_essa(vcpu
->kvm
->mm
, hva
, orc
, &ptev
, &pgstev
);
1008 if (nappended
< 0) {
1009 res
= orc
? 0x10 : 0;
1010 vcpu
->run
->s
.regs
.gprs
[r1
] = res
; /* Exception Indication */
1013 res
= (pgstev
& _PGSTE_GPS_USAGE_MASK
) >> 22;
1015 * Set the block-content state part of the result. 0 means resident, so
1016 * nothing to do if the page is valid. 2 is for preserved pages
1017 * (non-present and non-zero), and 3 for zero pages (non-present and
1020 if (ptev
& _PAGE_INVALID
) {
1022 if (pgstev
& _PGSTE_GPS_ZERO
)
1025 if (pgstev
& _PGSTE_GPS_NODAT
)
1027 vcpu
->run
->s
.regs
.gprs
[r1
] = res
;
1029 * It is possible that all the normal 511 slots were full, in which case
1030 * we will now write in the 512th slot, which is reserved for host use.
1031 * In both cases we let the normal essa handling code process all the
1032 * slots, including the reserved one, if needed.
1034 if (nappended
> 0) {
1035 cbrlo
= phys_to_virt(vcpu
->arch
.sie_block
->cbrlo
& PAGE_MASK
);
1036 cbrlo
[entries
] = gfn
<< PAGE_SHIFT
;
1039 if (orc
&& gfn
< ms
->bitmap_size
) {
1040 /* increment only if we are really flipping the bit to 1 */
1041 if (!test_and_set_bit(gfn
, ms
->pgste_bitmap
))
1042 atomic64_inc(&ms
->dirty_pages
);
1048 static int handle_essa(struct kvm_vcpu
*vcpu
)
1050 /* entries expected to be 1FF */
1051 int entries
= (vcpu
->arch
.sie_block
->cbrlo
& ~PAGE_MASK
) >> 3;
1052 unsigned long *cbrlo
;
1056 VCPU_EVENT(vcpu
, 4, "ESSA: release %d pages", entries
);
1057 gmap
= vcpu
->arch
.gmap
;
1058 vcpu
->stat
.instruction_essa
++;
1059 if (!vcpu
->kvm
->arch
.use_cmma
)
1060 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
1062 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1063 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1064 /* Check for invalid operation request code */
1065 orc
= (vcpu
->arch
.sie_block
->ipb
& 0xf0000000) >> 28;
1066 /* ORCs 0-6 are always valid */
1067 if (orc
> (test_kvm_facility(vcpu
->kvm
, 147) ? ESSA_SET_STABLE_NODAT
1068 : ESSA_SET_STABLE_IF_RESIDENT
))
1069 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1071 if (likely(!vcpu
->kvm
->arch
.migration_state
)) {
1073 * CMMA is enabled in the KVM settings, but is disabled in
1074 * the SIE block and in the mm_context, and we are not doing
1075 * a migration. Enable CMMA in the mm_context.
1076 * Since we need to take a write lock to write to the context
1077 * to avoid races with storage keys handling, we check if the
1078 * value really needs to be written to; if the value is
1079 * already correct, we do nothing and avoid the lock.
1081 if (vcpu
->kvm
->mm
->context
.uses_cmm
== 0) {
1082 down_write(&vcpu
->kvm
->mm
->mmap_sem
);
1083 vcpu
->kvm
->mm
->context
.uses_cmm
= 1;
1084 up_write(&vcpu
->kvm
->mm
->mmap_sem
);
1087 * If we are here, we are supposed to have CMMA enabled in
1088 * the SIE block. Enabling CMMA works on a per-CPU basis,
1089 * while the context use_cmma flag is per process.
1090 * It's possible that the context flag is enabled and the
1091 * SIE flag is not, so we set the flag always; if it was
1092 * already set, nothing changes, otherwise we enable it
1095 vcpu
->arch
.sie_block
->ecb2
|= ECB2_CMMA
;
1096 /* Retry the ESSA instruction */
1097 kvm_s390_retry_instr(vcpu
);
1099 /* Account for the possible extra cbrl entry */
1100 i
= do_essa(vcpu
, orc
);
1105 vcpu
->arch
.sie_block
->cbrlo
&= PAGE_MASK
; /* reset nceo */
1106 cbrlo
= phys_to_virt(vcpu
->arch
.sie_block
->cbrlo
);
1107 down_read(&gmap
->mm
->mmap_sem
);
1108 for (i
= 0; i
< entries
; ++i
)
1109 __gmap_zap(gmap
, cbrlo
[i
]);
1110 up_read(&gmap
->mm
->mmap_sem
);
1114 int kvm_s390_handle_b9(struct kvm_vcpu
*vcpu
)
1116 switch (vcpu
->arch
.sie_block
->ipa
& 0x00ff) {
1120 return handle_ipte_interlock(vcpu
);
1122 return handle_epsw(vcpu
);
1124 return handle_essa(vcpu
);
1126 return handle_pfmf(vcpu
);
1132 int kvm_s390_handle_lctl(struct kvm_vcpu
*vcpu
)
1134 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1135 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1136 int reg
, rc
, nr_regs
;
1141 vcpu
->stat
.instruction_lctl
++;
1143 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1144 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1146 ga
= kvm_s390_get_base_disp_rs(vcpu
, &ar
);
1149 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1151 VCPU_EVENT(vcpu
, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1152 trace_kvm_s390_handle_lctl(vcpu
, 0, reg1
, reg3
, ga
);
1154 nr_regs
= ((reg3
- reg1
) & 0xf) + 1;
1155 rc
= read_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u32
));
1157 return kvm_s390_inject_prog_cond(vcpu
, rc
);
1161 vcpu
->arch
.sie_block
->gcr
[reg
] &= 0xffffffff00000000ul
;
1162 vcpu
->arch
.sie_block
->gcr
[reg
] |= ctl_array
[nr_regs
++];
1165 reg
= (reg
+ 1) % 16;
1167 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1171 int kvm_s390_handle_stctl(struct kvm_vcpu
*vcpu
)
1173 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1174 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1175 int reg
, rc
, nr_regs
;
1180 vcpu
->stat
.instruction_stctl
++;
1182 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1183 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1185 ga
= kvm_s390_get_base_disp_rs(vcpu
, &ar
);
1188 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1190 VCPU_EVENT(vcpu
, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1191 trace_kvm_s390_handle_stctl(vcpu
, 0, reg1
, reg3
, ga
);
1196 ctl_array
[nr_regs
++] = vcpu
->arch
.sie_block
->gcr
[reg
];
1199 reg
= (reg
+ 1) % 16;
1201 rc
= write_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u32
));
1202 return rc
? kvm_s390_inject_prog_cond(vcpu
, rc
) : 0;
1205 static int handle_lctlg(struct kvm_vcpu
*vcpu
)
1207 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1208 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1209 int reg
, rc
, nr_regs
;
1214 vcpu
->stat
.instruction_lctlg
++;
1216 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1217 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1219 ga
= kvm_s390_get_base_disp_rsy(vcpu
, &ar
);
1222 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1224 VCPU_EVENT(vcpu
, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1225 trace_kvm_s390_handle_lctl(vcpu
, 1, reg1
, reg3
, ga
);
1227 nr_regs
= ((reg3
- reg1
) & 0xf) + 1;
1228 rc
= read_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u64
));
1230 return kvm_s390_inject_prog_cond(vcpu
, rc
);
1234 vcpu
->arch
.sie_block
->gcr
[reg
] = ctl_array
[nr_regs
++];
1237 reg
= (reg
+ 1) % 16;
1239 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1243 static int handle_stctg(struct kvm_vcpu
*vcpu
)
1245 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1246 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1247 int reg
, rc
, nr_regs
;
1252 vcpu
->stat
.instruction_stctg
++;
1254 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1255 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1257 ga
= kvm_s390_get_base_disp_rsy(vcpu
, &ar
);
1260 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1262 VCPU_EVENT(vcpu
, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1263 trace_kvm_s390_handle_stctl(vcpu
, 1, reg1
, reg3
, ga
);
1268 ctl_array
[nr_regs
++] = vcpu
->arch
.sie_block
->gcr
[reg
];
1271 reg
= (reg
+ 1) % 16;
1273 rc
= write_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u64
));
1274 return rc
? kvm_s390_inject_prog_cond(vcpu
, rc
) : 0;
1277 int kvm_s390_handle_eb(struct kvm_vcpu
*vcpu
)
1279 switch (vcpu
->arch
.sie_block
->ipb
& 0x000000ff) {
1281 return handle_stctg(vcpu
);
1283 return handle_lctlg(vcpu
);
1287 return handle_ri(vcpu
);
1293 static int handle_tprot(struct kvm_vcpu
*vcpu
)
1295 u64 address1
, address2
;
1296 unsigned long hva
, gpa
;
1297 int ret
= 0, cc
= 0;
1301 vcpu
->stat
.instruction_tprot
++;
1303 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1304 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1306 kvm_s390_get_base_disp_sse(vcpu
, &address1
, &address2
, &ar
, NULL
);
1308 /* we only handle the Linux memory detection case:
1310 * everything else goes to userspace. */
1311 if (address2
& 0xf0)
1313 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
1315 ret
= guest_translate_address(vcpu
, address1
, ar
, &gpa
, GACC_STORE
);
1316 if (ret
== PGM_PROTECTION
) {
1317 /* Write protected? Try again with read-only... */
1319 ret
= guest_translate_address(vcpu
, address1
, ar
, &gpa
,
1323 if (ret
== PGM_ADDRESSING
|| ret
== PGM_TRANSLATION_SPEC
) {
1324 ret
= kvm_s390_inject_program_int(vcpu
, ret
);
1325 } else if (ret
> 0) {
1326 /* Translation not available */
1327 kvm_s390_set_psw_cc(vcpu
, 3);
1333 hva
= gfn_to_hva_prot(vcpu
->kvm
, gpa_to_gfn(gpa
), &writable
);
1334 if (kvm_is_error_hva(hva
)) {
1335 ret
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
1338 cc
= 1; /* Write not permitted ==> read-only */
1339 kvm_s390_set_psw_cc(vcpu
, cc
);
1340 /* Note: CC2 only occurs for storage keys (not supported yet) */
1343 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
1348 int kvm_s390_handle_e5(struct kvm_vcpu
*vcpu
)
1350 switch (vcpu
->arch
.sie_block
->ipa
& 0x00ff) {
1352 return handle_tprot(vcpu
);
1358 static int handle_sckpf(struct kvm_vcpu
*vcpu
)
1362 vcpu
->stat
.instruction_sckpf
++;
1364 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1365 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1367 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x00000000ffff0000)
1368 return kvm_s390_inject_program_int(vcpu
,
1371 value
= vcpu
->run
->s
.regs
.gprs
[0] & 0x000000000000ffff;
1372 vcpu
->arch
.sie_block
->todpr
= value
;
1377 static int handle_ptff(struct kvm_vcpu
*vcpu
)
1379 vcpu
->stat
.instruction_ptff
++;
1381 /* we don't emulate any control instructions yet */
1382 kvm_s390_set_psw_cc(vcpu
, 3);
1386 int kvm_s390_handle_01(struct kvm_vcpu
*vcpu
)
1388 switch (vcpu
->arch
.sie_block
->ipa
& 0x00ff) {
1390 return handle_ptff(vcpu
);
1392 return handle_sckpf(vcpu
);