2 * handling privileged instructions
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
28 #include <asm/ptrace.h>
29 #include <asm/compat.h>
35 static int handle_ri(struct kvm_vcpu
*vcpu
)
37 if (test_kvm_facility(vcpu
->kvm
, 64)) {
38 vcpu
->arch
.sie_block
->ecb3
|= 0x01;
39 kvm_s390_retry_instr(vcpu
);
42 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
45 int kvm_s390_handle_aa(struct kvm_vcpu
*vcpu
)
47 if ((vcpu
->arch
.sie_block
->ipa
& 0xf) <= 4)
48 return handle_ri(vcpu
);
53 /* Handle SCK (SET CLOCK) interception */
54 static int handle_set_clock(struct kvm_vcpu
*vcpu
)
60 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
61 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
63 op2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
64 if (op2
& 7) /* Operand must be on a doubleword boundary */
65 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
66 rc
= read_guest(vcpu
, op2
, ar
, &val
, sizeof(val
));
68 return kvm_s390_inject_prog_cond(vcpu
, rc
);
70 VCPU_EVENT(vcpu
, 3, "SCK: setting guest TOD to 0x%llx", val
);
71 kvm_s390_set_tod_clock(vcpu
->kvm
, val
);
73 kvm_s390_set_psw_cc(vcpu
, 0);
77 static int handle_set_prefix(struct kvm_vcpu
*vcpu
)
84 vcpu
->stat
.instruction_spx
++;
86 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
87 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
89 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
91 /* must be word boundary */
93 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
96 rc
= read_guest(vcpu
, operand2
, ar
, &address
, sizeof(address
));
98 return kvm_s390_inject_prog_cond(vcpu
, rc
);
100 address
&= 0x7fffe000u
;
103 * Make sure the new value is valid memory. We only need to check the
104 * first page, since address is 8k aligned and memory pieces are always
105 * at least 1MB aligned and have at least a size of 1MB.
107 if (kvm_is_error_gpa(vcpu
->kvm
, address
))
108 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
110 kvm_s390_set_prefix(vcpu
, address
);
111 trace_kvm_s390_handle_prefix(vcpu
, 1, address
);
115 static int handle_store_prefix(struct kvm_vcpu
*vcpu
)
122 vcpu
->stat
.instruction_stpx
++;
124 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
125 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
127 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
129 /* must be word boundary */
131 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
133 address
= kvm_s390_get_prefix(vcpu
);
136 rc
= write_guest(vcpu
, operand2
, ar
, &address
, sizeof(address
));
138 return kvm_s390_inject_prog_cond(vcpu
, rc
);
140 VCPU_EVENT(vcpu
, 3, "STPX: storing prefix 0x%x into 0x%llx", address
, operand2
);
141 trace_kvm_s390_handle_prefix(vcpu
, 0, address
);
145 static int handle_store_cpu_address(struct kvm_vcpu
*vcpu
)
147 u16 vcpu_id
= vcpu
->vcpu_id
;
152 vcpu
->stat
.instruction_stap
++;
154 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
155 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
157 ga
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
160 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
162 rc
= write_guest(vcpu
, ga
, ar
, &vcpu_id
, sizeof(vcpu_id
));
164 return kvm_s390_inject_prog_cond(vcpu
, rc
);
166 VCPU_EVENT(vcpu
, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id
, ga
);
167 trace_kvm_s390_handle_stap(vcpu
, ga
);
171 static int __skey_check_enable(struct kvm_vcpu
*vcpu
)
175 trace_kvm_s390_skey_related_inst(vcpu
);
176 if (!(vcpu
->arch
.sie_block
->ictl
& (ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
)))
179 rc
= s390_enable_skey();
180 VCPU_EVENT(vcpu
, 3, "enabling storage keys for guest: %d", rc
);
182 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
);
186 static int try_handle_skey(struct kvm_vcpu
*vcpu
)
190 vcpu
->stat
.instruction_storage_key
++;
191 rc
= __skey_check_enable(vcpu
);
195 /* with storage-key facility, SIE interprets it for us */
196 kvm_s390_retry_instr(vcpu
);
197 VCPU_EVENT(vcpu
, 4, "%s", "retrying storage key operation");
200 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
201 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
205 static int handle_iske(struct kvm_vcpu
*vcpu
)
212 rc
= try_handle_skey(vcpu
);
214 return rc
!= -EAGAIN
? rc
: 0;
216 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
218 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
219 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
220 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
221 addr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(addr
));
222 if (kvm_is_error_hva(addr
))
223 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
225 down_read(¤t
->mm
->mmap_sem
);
226 rc
= get_guest_storage_key(current
->mm
, addr
, &key
);
227 up_read(¤t
->mm
->mmap_sem
);
229 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
230 vcpu
->run
->s
.regs
.gprs
[reg1
] &= ~0xff;
231 vcpu
->run
->s
.regs
.gprs
[reg1
] |= key
;
235 static int handle_rrbe(struct kvm_vcpu
*vcpu
)
241 rc
= try_handle_skey(vcpu
);
243 return rc
!= -EAGAIN
? rc
: 0;
245 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
247 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
248 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
249 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
250 addr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(addr
));
251 if (kvm_is_error_hva(addr
))
252 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
254 down_read(¤t
->mm
->mmap_sem
);
255 rc
= reset_guest_reference_bit(current
->mm
, addr
);
256 up_read(¤t
->mm
->mmap_sem
);
258 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
260 kvm_s390_set_psw_cc(vcpu
, rc
);
268 static int handle_sske(struct kvm_vcpu
*vcpu
)
270 unsigned char m3
= vcpu
->arch
.sie_block
->ipb
>> 28;
271 unsigned long start
, end
;
272 unsigned char key
, oldkey
;
276 rc
= try_handle_skey(vcpu
);
278 return rc
!= -EAGAIN
? rc
: 0;
280 if (!test_kvm_facility(vcpu
->kvm
, 8))
282 if (!test_kvm_facility(vcpu
->kvm
, 10))
283 m3
&= ~(SSKE_MC
| SSKE_MR
);
284 if (!test_kvm_facility(vcpu
->kvm
, 14))
287 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
289 key
= vcpu
->run
->s
.regs
.gprs
[reg1
] & 0xfe;
290 start
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
291 start
= kvm_s390_logical_to_effective(vcpu
, start
);
293 /* start already designates an absolute address */
294 end
= (start
+ (1UL << 20)) & ~((1UL << 20) - 1);
296 start
= kvm_s390_real_to_abs(vcpu
, start
);
297 end
= start
+ PAGE_SIZE
;
300 while (start
!= end
) {
301 unsigned long addr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(start
));
303 if (kvm_is_error_hva(addr
))
304 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
306 down_read(¤t
->mm
->mmap_sem
);
307 rc
= cond_set_guest_storage_key(current
->mm
, addr
, key
, &oldkey
,
308 m3
& SSKE_NQ
, m3
& SSKE_MR
,
310 up_read(¤t
->mm
->mmap_sem
);
312 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
316 if (m3
& (SSKE_MC
| SSKE_MR
)) {
318 /* skey in reg1 is unpredictable */
319 kvm_s390_set_psw_cc(vcpu
, 3);
321 kvm_s390_set_psw_cc(vcpu
, rc
);
322 vcpu
->run
->s
.regs
.gprs
[reg1
] &= ~0xff00UL
;
323 vcpu
->run
->s
.regs
.gprs
[reg1
] |= (u64
) oldkey
<< 8;
327 if (psw_bits(vcpu
->arch
.sie_block
->gpsw
).eaba
== PSW_AMODE_64BIT
)
328 vcpu
->run
->s
.regs
.gprs
[reg2
] &= ~PAGE_MASK
;
330 vcpu
->run
->s
.regs
.gprs
[reg2
] &= ~0xfffff000UL
;
331 end
= kvm_s390_logical_to_effective(vcpu
, end
);
332 vcpu
->run
->s
.regs
.gprs
[reg2
] |= end
;
337 static int handle_ipte_interlock(struct kvm_vcpu
*vcpu
)
339 vcpu
->stat
.instruction_ipte_interlock
++;
340 if (psw_bits(vcpu
->arch
.sie_block
->gpsw
).p
)
341 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
342 wait_event(vcpu
->kvm
->arch
.ipte_wq
, !ipte_lock_held(vcpu
));
343 kvm_s390_retry_instr(vcpu
);
344 VCPU_EVENT(vcpu
, 4, "%s", "retrying ipte interlock operation");
348 static int handle_test_block(struct kvm_vcpu
*vcpu
)
353 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
354 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
356 kvm_s390_get_regs_rre(vcpu
, NULL
, ®2
);
357 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
358 addr
= kvm_s390_logical_to_effective(vcpu
, addr
);
359 if (kvm_s390_check_low_addr_prot_real(vcpu
, addr
))
360 return kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
361 addr
= kvm_s390_real_to_abs(vcpu
, addr
);
363 if (kvm_is_error_gpa(vcpu
->kvm
, addr
))
364 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
366 * We don't expect errors on modern systems, and do not care
367 * about storage keys (yet), so let's just clear the page.
369 if (kvm_clear_guest(vcpu
->kvm
, addr
, PAGE_SIZE
))
371 kvm_s390_set_psw_cc(vcpu
, 0);
372 vcpu
->run
->s
.regs
.gprs
[0] = 0;
376 static int handle_tpi(struct kvm_vcpu
*vcpu
)
378 struct kvm_s390_interrupt_info
*inti
;
385 addr
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
387 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
389 inti
= kvm_s390_get_io_int(vcpu
->kvm
, vcpu
->arch
.sie_block
->gcr
[6], 0);
391 kvm_s390_set_psw_cc(vcpu
, 0);
395 tpi_data
[0] = inti
->io
.subchannel_id
<< 16 | inti
->io
.subchannel_nr
;
396 tpi_data
[1] = inti
->io
.io_int_parm
;
397 tpi_data
[2] = inti
->io
.io_int_word
;
400 * Store the two-word I/O interruption code into the
403 len
= sizeof(tpi_data
) - 4;
404 rc
= write_guest(vcpu
, addr
, ar
, &tpi_data
, len
);
406 rc
= kvm_s390_inject_prog_cond(vcpu
, rc
);
407 goto reinject_interrupt
;
411 * Store the three-word I/O interruption code into
412 * the appropriate lowcore area.
414 len
= sizeof(tpi_data
);
415 if (write_guest_lc(vcpu
, __LC_SUBCHANNEL_ID
, &tpi_data
, len
)) {
416 /* failed writes to the low core are not recoverable */
418 goto reinject_interrupt
;
422 /* irq was successfully handed to the guest */
424 kvm_s390_set_psw_cc(vcpu
, 1);
428 * If we encounter a problem storing the interruption code, the
429 * instruction is suppressed from the guest's view: reinject the
432 if (kvm_s390_reinject_io_int(vcpu
->kvm
, inti
)) {
436 /* don't set the cc, a pgm irq was injected or we drop to user space */
437 return rc
? -EFAULT
: 0;
440 static int handle_tsch(struct kvm_vcpu
*vcpu
)
442 struct kvm_s390_interrupt_info
*inti
= NULL
;
443 const u64 isc_mask
= 0xffUL
<< 24; /* all iscs set */
445 /* a valid schid has at least one bit set */
446 if (vcpu
->run
->s
.regs
.gprs
[1])
447 inti
= kvm_s390_get_io_int(vcpu
->kvm
, isc_mask
,
448 vcpu
->run
->s
.regs
.gprs
[1]);
451 * Prepare exit to userspace.
452 * We indicate whether we dequeued a pending I/O interrupt
453 * so that userspace can re-inject it if the instruction gets
454 * a program check. While this may re-order the pending I/O
455 * interrupts, this is no problem since the priority is kept
458 vcpu
->run
->exit_reason
= KVM_EXIT_S390_TSCH
;
459 vcpu
->run
->s390_tsch
.dequeued
= !!inti
;
461 vcpu
->run
->s390_tsch
.subchannel_id
= inti
->io
.subchannel_id
;
462 vcpu
->run
->s390_tsch
.subchannel_nr
= inti
->io
.subchannel_nr
;
463 vcpu
->run
->s390_tsch
.io_int_parm
= inti
->io
.io_int_parm
;
464 vcpu
->run
->s390_tsch
.io_int_word
= inti
->io
.io_int_word
;
466 vcpu
->run
->s390_tsch
.ipb
= vcpu
->arch
.sie_block
->ipb
;
471 static int handle_io_inst(struct kvm_vcpu
*vcpu
)
473 VCPU_EVENT(vcpu
, 4, "%s", "I/O instruction");
475 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
476 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
478 if (vcpu
->kvm
->arch
.css_support
) {
480 * Most I/O instructions will be handled by userspace.
481 * Exceptions are tpi and the interrupt portion of tsch.
483 if (vcpu
->arch
.sie_block
->ipa
== 0xb236)
484 return handle_tpi(vcpu
);
485 if (vcpu
->arch
.sie_block
->ipa
== 0xb235)
486 return handle_tsch(vcpu
);
487 /* Handle in userspace. */
491 * Set condition code 3 to stop the guest from issuing channel
494 kvm_s390_set_psw_cc(vcpu
, 3);
499 static int handle_stfl(struct kvm_vcpu
*vcpu
)
504 vcpu
->stat
.instruction_stfl
++;
506 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
507 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
510 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
511 * into a u32 memory representation. They will remain bits 0-31.
513 fac
= *vcpu
->kvm
->arch
.model
.fac_list
>> 32;
514 rc
= write_guest_lc(vcpu
, offsetof(struct lowcore
, stfl_fac_list
),
518 VCPU_EVENT(vcpu
, 3, "STFL: store facility list 0x%x", fac
);
519 trace_kvm_s390_handle_stfl(vcpu
, fac
);
523 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
524 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
525 #define PSW_ADDR_24 0x0000000000ffffffUL
526 #define PSW_ADDR_31 0x000000007fffffffUL
528 int is_valid_psw(psw_t
*psw
)
530 if (psw
->mask
& PSW_MASK_UNASSIGNED
)
532 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_BA
) {
533 if (psw
->addr
& ~PSW_ADDR_31
)
536 if (!(psw
->mask
& PSW_MASK_ADDR_MODE
) && (psw
->addr
& ~PSW_ADDR_24
))
538 if ((psw
->mask
& PSW_MASK_ADDR_MODE
) == PSW_MASK_EA
)
545 int kvm_s390_handle_lpsw(struct kvm_vcpu
*vcpu
)
547 psw_t
*gpsw
= &vcpu
->arch
.sie_block
->gpsw
;
548 psw_compat_t new_psw
;
553 if (gpsw
->mask
& PSW_MASK_PSTATE
)
554 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
556 addr
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
558 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
560 rc
= read_guest(vcpu
, addr
, ar
, &new_psw
, sizeof(new_psw
));
562 return kvm_s390_inject_prog_cond(vcpu
, rc
);
563 if (!(new_psw
.mask
& PSW32_MASK_BASE
))
564 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
565 gpsw
->mask
= (new_psw
.mask
& ~PSW32_MASK_BASE
) << 32;
566 gpsw
->mask
|= new_psw
.addr
& PSW32_ADDR_AMODE
;
567 gpsw
->addr
= new_psw
.addr
& ~PSW32_ADDR_AMODE
;
568 if (!is_valid_psw(gpsw
))
569 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
573 static int handle_lpswe(struct kvm_vcpu
*vcpu
)
580 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
581 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
583 addr
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
585 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
586 rc
= read_guest(vcpu
, addr
, ar
, &new_psw
, sizeof(new_psw
));
588 return kvm_s390_inject_prog_cond(vcpu
, rc
);
589 vcpu
->arch
.sie_block
->gpsw
= new_psw
;
590 if (!is_valid_psw(&vcpu
->arch
.sie_block
->gpsw
))
591 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
595 static int handle_stidp(struct kvm_vcpu
*vcpu
)
597 u64 stidp_data
= vcpu
->kvm
->arch
.model
.cpuid
;
602 vcpu
->stat
.instruction_stidp
++;
604 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
605 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
607 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
610 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
612 rc
= write_guest(vcpu
, operand2
, ar
, &stidp_data
, sizeof(stidp_data
));
614 return kvm_s390_inject_prog_cond(vcpu
, rc
);
616 VCPU_EVENT(vcpu
, 3, "STIDP: store cpu id 0x%llx", stidp_data
);
620 static void handle_stsi_3_2_2(struct kvm_vcpu
*vcpu
, struct sysinfo_3_2_2
*mem
)
625 cpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
627 /* deal with other level 3 hypervisors */
628 if (stsi(mem
, 3, 2, 2))
632 for (n
= mem
->count
- 1; n
> 0 ; n
--)
633 memcpy(&mem
->vm
[n
], &mem
->vm
[n
- 1], sizeof(mem
->vm
[0]));
635 memset(&mem
->vm
[0], 0, sizeof(mem
->vm
[0]));
636 mem
->vm
[0].cpus_total
= cpus
;
637 mem
->vm
[0].cpus_configured
= cpus
;
638 mem
->vm
[0].cpus_standby
= 0;
639 mem
->vm
[0].cpus_reserved
= 0;
640 mem
->vm
[0].caf
= 1000;
641 memcpy(mem
->vm
[0].name
, "KVMguest", 8);
642 ASCEBC(mem
->vm
[0].name
, 8);
643 memcpy(mem
->vm
[0].cpi
, "KVM/Linux ", 16);
644 ASCEBC(mem
->vm
[0].cpi
, 16);
647 static void insert_stsi_usr_data(struct kvm_vcpu
*vcpu
, u64 addr
, ar_t ar
,
648 u8 fc
, u8 sel1
, u16 sel2
)
650 vcpu
->run
->exit_reason
= KVM_EXIT_S390_STSI
;
651 vcpu
->run
->s390_stsi
.addr
= addr
;
652 vcpu
->run
->s390_stsi
.ar
= ar
;
653 vcpu
->run
->s390_stsi
.fc
= fc
;
654 vcpu
->run
->s390_stsi
.sel1
= sel1
;
655 vcpu
->run
->s390_stsi
.sel2
= sel2
;
658 static int handle_stsi(struct kvm_vcpu
*vcpu
)
660 int fc
= (vcpu
->run
->s
.regs
.gprs
[0] & 0xf0000000) >> 28;
661 int sel1
= vcpu
->run
->s
.regs
.gprs
[0] & 0xff;
662 int sel2
= vcpu
->run
->s
.regs
.gprs
[1] & 0xffff;
663 unsigned long mem
= 0;
668 vcpu
->stat
.instruction_stsi
++;
669 VCPU_EVENT(vcpu
, 3, "STSI: fc: %u sel1: %u sel2: %u", fc
, sel1
, sel2
);
671 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
672 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
675 kvm_s390_set_psw_cc(vcpu
, 3);
679 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x0fffff00
680 || vcpu
->run
->s
.regs
.gprs
[1] & 0xffff0000)
681 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
684 vcpu
->run
->s
.regs
.gprs
[0] = 3 << 28;
685 kvm_s390_set_psw_cc(vcpu
, 0);
689 operand2
= kvm_s390_get_base_disp_s(vcpu
, &ar
);
691 if (operand2
& 0xfff)
692 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
695 case 1: /* same handling for 1 and 2 */
697 mem
= get_zeroed_page(GFP_KERNEL
);
700 if (stsi((void *) mem
, fc
, sel1
, sel2
))
704 if (sel1
!= 2 || sel2
!= 2)
706 mem
= get_zeroed_page(GFP_KERNEL
);
709 handle_stsi_3_2_2(vcpu
, (void *) mem
);
713 rc
= write_guest(vcpu
, operand2
, ar
, (void *)mem
, PAGE_SIZE
);
715 rc
= kvm_s390_inject_prog_cond(vcpu
, rc
);
718 if (vcpu
->kvm
->arch
.user_stsi
) {
719 insert_stsi_usr_data(vcpu
, operand2
, ar
, fc
, sel1
, sel2
);
722 trace_kvm_s390_handle_stsi(vcpu
, fc
, sel1
, sel2
, operand2
);
724 kvm_s390_set_psw_cc(vcpu
, 0);
725 vcpu
->run
->s
.regs
.gprs
[0] = 0;
728 kvm_s390_set_psw_cc(vcpu
, 3);
734 static const intercept_handler_t b2_handlers
[256] = {
735 [0x02] = handle_stidp
,
736 [0x04] = handle_set_clock
,
737 [0x10] = handle_set_prefix
,
738 [0x11] = handle_store_prefix
,
739 [0x12] = handle_store_cpu_address
,
740 [0x14] = kvm_s390_handle_vsie
,
741 [0x21] = handle_ipte_interlock
,
742 [0x29] = handle_iske
,
743 [0x2a] = handle_rrbe
,
744 [0x2b] = handle_sske
,
745 [0x2c] = handle_test_block
,
746 [0x30] = handle_io_inst
,
747 [0x31] = handle_io_inst
,
748 [0x32] = handle_io_inst
,
749 [0x33] = handle_io_inst
,
750 [0x34] = handle_io_inst
,
751 [0x35] = handle_io_inst
,
752 [0x36] = handle_io_inst
,
753 [0x37] = handle_io_inst
,
754 [0x38] = handle_io_inst
,
755 [0x39] = handle_io_inst
,
756 [0x3a] = handle_io_inst
,
757 [0x3b] = handle_io_inst
,
758 [0x3c] = handle_io_inst
,
759 [0x50] = handle_ipte_interlock
,
760 [0x5f] = handle_io_inst
,
761 [0x74] = handle_io_inst
,
762 [0x76] = handle_io_inst
,
763 [0x7d] = handle_stsi
,
764 [0xb1] = handle_stfl
,
765 [0xb2] = handle_lpswe
,
768 int kvm_s390_handle_b2(struct kvm_vcpu
*vcpu
)
770 intercept_handler_t handler
;
773 * A lot of B2 instructions are priviledged. Here we check for
774 * the privileged ones, that we can handle in the kernel.
775 * Anything else goes to userspace.
777 handler
= b2_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
779 return handler(vcpu
);
784 static int handle_epsw(struct kvm_vcpu
*vcpu
)
788 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
790 /* This basically extracts the mask half of the psw. */
791 vcpu
->run
->s
.regs
.gprs
[reg1
] &= 0xffffffff00000000UL
;
792 vcpu
->run
->s
.regs
.gprs
[reg1
] |= vcpu
->arch
.sie_block
->gpsw
.mask
>> 32;
794 vcpu
->run
->s
.regs
.gprs
[reg2
] &= 0xffffffff00000000UL
;
795 vcpu
->run
->s
.regs
.gprs
[reg2
] |=
796 vcpu
->arch
.sie_block
->gpsw
.mask
& 0x00000000ffffffffUL
;
801 #define PFMF_RESERVED 0xfffc0101UL
802 #define PFMF_SK 0x00020000UL
803 #define PFMF_CF 0x00010000UL
804 #define PFMF_UI 0x00008000UL
805 #define PFMF_FSC 0x00007000UL
806 #define PFMF_NQ 0x00000800UL
807 #define PFMF_MR 0x00000400UL
808 #define PFMF_MC 0x00000200UL
809 #define PFMF_KEY 0x000000feUL
811 static int handle_pfmf(struct kvm_vcpu
*vcpu
)
813 bool mr
= false, mc
= false, nq
;
815 unsigned long start
, end
;
818 vcpu
->stat
.instruction_pfmf
++;
820 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
822 if (!test_kvm_facility(vcpu
->kvm
, 8))
823 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
825 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
826 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
828 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_RESERVED
)
829 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
831 /* Only provide non-quiescing support if enabled for the guest */
832 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
&&
833 !test_kvm_facility(vcpu
->kvm
, 14))
834 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
836 /* Only provide conditional-SSKE support if enabled for the guest */
837 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_SK
&&
838 test_kvm_facility(vcpu
->kvm
, 10)) {
839 mr
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_MR
;
840 mc
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_MC
;
843 nq
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_NQ
;
844 key
= vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_KEY
;
845 start
= vcpu
->run
->s
.regs
.gprs
[reg2
] & PAGE_MASK
;
846 start
= kvm_s390_logical_to_effective(vcpu
, start
);
848 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
849 if (kvm_s390_check_low_addr_prot_real(vcpu
, start
))
850 return kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
853 switch (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) {
855 /* only 4k frames specify a real address */
856 start
= kvm_s390_real_to_abs(vcpu
, start
);
857 end
= (start
+ (1UL << 12)) & ~((1UL << 12) - 1);
860 end
= (start
+ (1UL << 20)) & ~((1UL << 20) - 1);
863 /* only support 2G frame size if EDAT2 is available and we are
864 not in 24-bit addressing mode */
865 if (!test_kvm_facility(vcpu
->kvm
, 78) ||
866 psw_bits(vcpu
->arch
.sie_block
->gpsw
).eaba
== PSW_AMODE_24BIT
)
867 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
868 end
= (start
+ (1UL << 31)) & ~((1UL << 31) - 1);
871 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
874 while (start
!= end
) {
875 unsigned long useraddr
;
877 /* Translate guest address to host address */
878 useraddr
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(start
));
879 if (kvm_is_error_hva(useraddr
))
880 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
882 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_CF
) {
883 if (clear_user((void __user
*)useraddr
, PAGE_SIZE
))
884 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
887 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_SK
) {
888 int rc
= __skey_check_enable(vcpu
);
892 down_read(¤t
->mm
->mmap_sem
);
893 rc
= cond_set_guest_storage_key(current
->mm
, useraddr
,
894 key
, NULL
, nq
, mr
, mc
);
895 up_read(¤t
->mm
->mmap_sem
);
897 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
902 if (vcpu
->run
->s
.regs
.gprs
[reg1
] & PFMF_FSC
) {
903 if (psw_bits(vcpu
->arch
.sie_block
->gpsw
).eaba
== PSW_AMODE_64BIT
) {
904 vcpu
->run
->s
.regs
.gprs
[reg2
] = end
;
906 vcpu
->run
->s
.regs
.gprs
[reg2
] &= ~0xffffffffUL
;
907 end
= kvm_s390_logical_to_effective(vcpu
, end
);
908 vcpu
->run
->s
.regs
.gprs
[reg2
] |= end
;
914 static int handle_essa(struct kvm_vcpu
*vcpu
)
916 /* entries expected to be 1FF */
917 int entries
= (vcpu
->arch
.sie_block
->cbrlo
& ~PAGE_MASK
) >> 3;
918 unsigned long *cbrlo
;
922 VCPU_EVENT(vcpu
, 4, "ESSA: release %d pages", entries
);
923 gmap
= vcpu
->arch
.gmap
;
924 vcpu
->stat
.instruction_essa
++;
925 if (!vcpu
->kvm
->arch
.use_cmma
)
926 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
928 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
929 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
931 if (((vcpu
->arch
.sie_block
->ipb
& 0xf0000000) >> 28) > 6)
932 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
934 /* Retry the ESSA instruction */
935 kvm_s390_retry_instr(vcpu
);
936 vcpu
->arch
.sie_block
->cbrlo
&= PAGE_MASK
; /* reset nceo */
937 cbrlo
= phys_to_virt(vcpu
->arch
.sie_block
->cbrlo
);
938 down_read(&gmap
->mm
->mmap_sem
);
939 for (i
= 0; i
< entries
; ++i
)
940 __gmap_zap(gmap
, cbrlo
[i
]);
941 up_read(&gmap
->mm
->mmap_sem
);
945 static const intercept_handler_t b9_handlers
[256] = {
946 [0x8a] = handle_ipte_interlock
,
947 [0x8d] = handle_epsw
,
948 [0x8e] = handle_ipte_interlock
,
949 [0x8f] = handle_ipte_interlock
,
950 [0xab] = handle_essa
,
951 [0xaf] = handle_pfmf
,
954 int kvm_s390_handle_b9(struct kvm_vcpu
*vcpu
)
956 intercept_handler_t handler
;
958 /* This is handled just as for the B2 instructions. */
959 handler
= b9_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
961 return handler(vcpu
);
966 int kvm_s390_handle_lctl(struct kvm_vcpu
*vcpu
)
968 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
969 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
970 int reg
, rc
, nr_regs
;
975 vcpu
->stat
.instruction_lctl
++;
977 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
978 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
980 ga
= kvm_s390_get_base_disp_rs(vcpu
, &ar
);
983 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
985 VCPU_EVENT(vcpu
, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
986 trace_kvm_s390_handle_lctl(vcpu
, 0, reg1
, reg3
, ga
);
988 nr_regs
= ((reg3
- reg1
) & 0xf) + 1;
989 rc
= read_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u32
));
991 return kvm_s390_inject_prog_cond(vcpu
, rc
);
995 vcpu
->arch
.sie_block
->gcr
[reg
] &= 0xffffffff00000000ul
;
996 vcpu
->arch
.sie_block
->gcr
[reg
] |= ctl_array
[nr_regs
++];
999 reg
= (reg
+ 1) % 16;
1001 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1005 int kvm_s390_handle_stctl(struct kvm_vcpu
*vcpu
)
1007 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1008 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1009 int reg
, rc
, nr_regs
;
1014 vcpu
->stat
.instruction_stctl
++;
1016 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1017 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1019 ga
= kvm_s390_get_base_disp_rs(vcpu
, &ar
);
1022 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1024 VCPU_EVENT(vcpu
, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1025 trace_kvm_s390_handle_stctl(vcpu
, 0, reg1
, reg3
, ga
);
1030 ctl_array
[nr_regs
++] = vcpu
->arch
.sie_block
->gcr
[reg
];
1033 reg
= (reg
+ 1) % 16;
1035 rc
= write_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u32
));
1036 return rc
? kvm_s390_inject_prog_cond(vcpu
, rc
) : 0;
1039 static int handle_lctlg(struct kvm_vcpu
*vcpu
)
1041 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1042 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1043 int reg
, rc
, nr_regs
;
1048 vcpu
->stat
.instruction_lctlg
++;
1050 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1051 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1053 ga
= kvm_s390_get_base_disp_rsy(vcpu
, &ar
);
1056 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1058 VCPU_EVENT(vcpu
, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1059 trace_kvm_s390_handle_lctl(vcpu
, 1, reg1
, reg3
, ga
);
1061 nr_regs
= ((reg3
- reg1
) & 0xf) + 1;
1062 rc
= read_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u64
));
1064 return kvm_s390_inject_prog_cond(vcpu
, rc
);
1068 vcpu
->arch
.sie_block
->gcr
[reg
] = ctl_array
[nr_regs
++];
1071 reg
= (reg
+ 1) % 16;
1073 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1077 static int handle_stctg(struct kvm_vcpu
*vcpu
)
1079 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
1080 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
1081 int reg
, rc
, nr_regs
;
1086 vcpu
->stat
.instruction_stctg
++;
1088 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1089 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1091 ga
= kvm_s390_get_base_disp_rsy(vcpu
, &ar
);
1094 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1096 VCPU_EVENT(vcpu
, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1
, reg3
, ga
);
1097 trace_kvm_s390_handle_stctl(vcpu
, 1, reg1
, reg3
, ga
);
1102 ctl_array
[nr_regs
++] = vcpu
->arch
.sie_block
->gcr
[reg
];
1105 reg
= (reg
+ 1) % 16;
1107 rc
= write_guest(vcpu
, ga
, ar
, ctl_array
, nr_regs
* sizeof(u64
));
1108 return rc
? kvm_s390_inject_prog_cond(vcpu
, rc
) : 0;
1111 static const intercept_handler_t eb_handlers
[256] = {
1112 [0x2f] = handle_lctlg
,
1113 [0x25] = handle_stctg
,
1119 int kvm_s390_handle_eb(struct kvm_vcpu
*vcpu
)
1121 intercept_handler_t handler
;
1123 handler
= eb_handlers
[vcpu
->arch
.sie_block
->ipb
& 0xff];
1125 return handler(vcpu
);
1129 static int handle_tprot(struct kvm_vcpu
*vcpu
)
1131 u64 address1
, address2
;
1132 unsigned long hva
, gpa
;
1133 int ret
= 0, cc
= 0;
1137 vcpu
->stat
.instruction_tprot
++;
1139 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1140 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1142 kvm_s390_get_base_disp_sse(vcpu
, &address1
, &address2
, &ar
, NULL
);
1144 /* we only handle the Linux memory detection case:
1146 * everything else goes to userspace. */
1147 if (address2
& 0xf0)
1149 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
1151 ret
= guest_translate_address(vcpu
, address1
, ar
, &gpa
, GACC_STORE
);
1152 if (ret
== PGM_PROTECTION
) {
1153 /* Write protected? Try again with read-only... */
1155 ret
= guest_translate_address(vcpu
, address1
, ar
, &gpa
,
1159 if (ret
== PGM_ADDRESSING
|| ret
== PGM_TRANSLATION_SPEC
) {
1160 ret
= kvm_s390_inject_program_int(vcpu
, ret
);
1161 } else if (ret
> 0) {
1162 /* Translation not available */
1163 kvm_s390_set_psw_cc(vcpu
, 3);
1169 hva
= gfn_to_hva_prot(vcpu
->kvm
, gpa_to_gfn(gpa
), &writable
);
1170 if (kvm_is_error_hva(hva
)) {
1171 ret
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
1174 cc
= 1; /* Write not permitted ==> read-only */
1175 kvm_s390_set_psw_cc(vcpu
, cc
);
1176 /* Note: CC2 only occurs for storage keys (not supported yet) */
1179 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_DAT
)
1184 int kvm_s390_handle_e5(struct kvm_vcpu
*vcpu
)
1186 /* For e5xx... instructions we only handle TPROT */
1187 if ((vcpu
->arch
.sie_block
->ipa
& 0x00ff) == 0x01)
1188 return handle_tprot(vcpu
);
1192 static int handle_sckpf(struct kvm_vcpu
*vcpu
)
1196 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1197 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1199 if (vcpu
->run
->s
.regs
.gprs
[0] & 0x00000000ffff0000)
1200 return kvm_s390_inject_program_int(vcpu
,
1203 value
= vcpu
->run
->s
.regs
.gprs
[0] & 0x000000000000ffff;
1204 vcpu
->arch
.sie_block
->todpr
= value
;
1209 static int handle_ptff(struct kvm_vcpu
*vcpu
)
1211 /* we don't emulate any control instructions yet */
1212 kvm_s390_set_psw_cc(vcpu
, 3);
1216 static const intercept_handler_t x01_handlers
[256] = {
1217 [0x04] = handle_ptff
,
1218 [0x07] = handle_sckpf
,
1221 int kvm_s390_handle_01(struct kvm_vcpu
*vcpu
)
1223 intercept_handler_t handler
;
1225 handler
= x01_handlers
[vcpu
->arch
.sie_block
->ipa
& 0x00ff];
1227 return handler(vcpu
);