1 // SPDX-License-Identifier: GPL-2.0
3 * in-kernel handling for sie intercepts
5 * Copyright IBM Corp. 2008, 2014
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
11 #include <linux/kvm_host.h>
12 #include <linux/errno.h>
13 #include <linux/pagemap.h>
15 #include <asm/kvm_host.h>
16 #include <asm/asm-offsets.h>
18 #include <asm/sysinfo.h>
23 #include "trace-s390.h"
25 u8
kvm_s390_get_ilen(struct kvm_vcpu
*vcpu
)
27 struct kvm_s390_sie_block
*sie_block
= vcpu
->arch
.sie_block
;
30 switch (vcpu
->arch
.sie_block
->icptcode
) {
36 /* instruction only stored for these icptcodes */
37 ilen
= insn_length(vcpu
->arch
.sie_block
->ipa
>> 8);
38 /* Use the length of the EXECUTE instruction if necessary */
39 if (sie_block
->icptstatus
& 1) {
40 ilen
= (sie_block
->icptstatus
>> 4) & 0x6;
46 /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
47 ilen
= vcpu
->arch
.sie_block
->pgmilc
& 0x6;
53 static int handle_stop(struct kvm_vcpu
*vcpu
)
55 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
57 uint8_t flags
, stop_pending
;
59 vcpu
->stat
.exit_stop_request
++;
61 /* delay the stop if any non-stop irq is pending */
62 if (kvm_s390_vcpu_has_irq(vcpu
, 1))
65 /* avoid races with the injection/SIGP STOP code */
67 flags
= li
->irq
.stop
.flags
;
68 stop_pending
= kvm_s390_is_stop_irq_pending(vcpu
);
69 spin_unlock(&li
->lock
);
71 trace_kvm_s390_stop_request(stop_pending
, flags
);
75 if (flags
& KVM_S390_STOP_FLAG_STORE_STATUS
) {
76 rc
= kvm_s390_vcpu_store_status(vcpu
,
77 KVM_S390_STORE_STATUS_NOADDR
);
82 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
83 kvm_s390_vcpu_stop(vcpu
);
87 static int handle_validity(struct kvm_vcpu
*vcpu
)
89 int viwhy
= vcpu
->arch
.sie_block
->ipb
>> 16;
91 vcpu
->stat
.exit_validity
++;
92 trace_kvm_s390_intercept_validity(vcpu
, viwhy
);
93 KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy
,
94 current
->pid
, vcpu
->kvm
);
96 /* do not warn on invalid runtime instrumentation mode */
97 WARN_ONCE(viwhy
!= 0x44, "kvm: unhandled validity intercept 0x%x\n",
102 static int handle_instruction(struct kvm_vcpu
*vcpu
)
104 vcpu
->stat
.exit_instruction
++;
105 trace_kvm_s390_intercept_instruction(vcpu
,
106 vcpu
->arch
.sie_block
->ipa
,
107 vcpu
->arch
.sie_block
->ipb
);
109 switch (vcpu
->arch
.sie_block
->ipa
>> 8) {
111 return kvm_s390_handle_01(vcpu
);
113 return kvm_s390_handle_lpsw(vcpu
);
115 return kvm_s390_handle_diag(vcpu
);
117 return kvm_s390_handle_aa(vcpu
);
119 return kvm_s390_handle_sigp(vcpu
);
121 return kvm_s390_handle_b2(vcpu
);
123 return kvm_s390_handle_stctl(vcpu
);
125 return kvm_s390_handle_lctl(vcpu
);
127 return kvm_s390_handle_b9(vcpu
);
129 return kvm_s390_handle_e3(vcpu
);
131 return kvm_s390_handle_e5(vcpu
);
133 return kvm_s390_handle_eb(vcpu
);
139 static int inject_prog_on_prog_intercept(struct kvm_vcpu
*vcpu
)
141 struct kvm_s390_pgm_info pgm_info
= {
142 .code
= vcpu
->arch
.sie_block
->iprcc
,
143 /* the PSW has already been rewound */
144 .flags
= KVM_S390_PGM_FLAGS_NO_REWIND
,
147 switch (vcpu
->arch
.sie_block
->iprcc
& ~PGM_PER
) {
148 case PGM_AFX_TRANSLATION
:
149 case PGM_ASX_TRANSLATION
:
150 case PGM_EX_TRANSLATION
:
151 case PGM_LFX_TRANSLATION
:
152 case PGM_LSTE_SEQUENCE
:
153 case PGM_LSX_TRANSLATION
:
154 case PGM_LX_TRANSLATION
:
155 case PGM_PRIMARY_AUTHORITY
:
156 case PGM_SECONDARY_AUTHORITY
:
157 case PGM_SPACE_SWITCH
:
158 pgm_info
.trans_exc_code
= vcpu
->arch
.sie_block
->tecmc
;
160 case PGM_ALEN_TRANSLATION
:
161 case PGM_ALE_SEQUENCE
:
162 case PGM_ASTE_INSTANCE
:
163 case PGM_ASTE_SEQUENCE
:
164 case PGM_ASTE_VALIDITY
:
165 case PGM_EXTENDED_AUTHORITY
:
166 pgm_info
.exc_access_id
= vcpu
->arch
.sie_block
->eai
;
169 case PGM_PAGE_TRANSLATION
:
170 case PGM_REGION_FIRST_TRANS
:
171 case PGM_REGION_SECOND_TRANS
:
172 case PGM_REGION_THIRD_TRANS
:
173 case PGM_SEGMENT_TRANSLATION
:
174 pgm_info
.trans_exc_code
= vcpu
->arch
.sie_block
->tecmc
;
175 pgm_info
.exc_access_id
= vcpu
->arch
.sie_block
->eai
;
176 pgm_info
.op_access_id
= vcpu
->arch
.sie_block
->oai
;
179 pgm_info
.mon_class_nr
= vcpu
->arch
.sie_block
->mcn
;
180 pgm_info
.mon_code
= vcpu
->arch
.sie_block
->tecmc
;
182 case PGM_VECTOR_PROCESSING
:
184 pgm_info
.data_exc_code
= vcpu
->arch
.sie_block
->dxc
;
187 pgm_info
.trans_exc_code
= vcpu
->arch
.sie_block
->tecmc
;
188 pgm_info
.exc_access_id
= vcpu
->arch
.sie_block
->eai
;
194 if (vcpu
->arch
.sie_block
->iprcc
& PGM_PER
) {
195 pgm_info
.per_code
= vcpu
->arch
.sie_block
->perc
;
196 pgm_info
.per_atmid
= vcpu
->arch
.sie_block
->peratmid
;
197 pgm_info
.per_address
= vcpu
->arch
.sie_block
->peraddr
;
198 pgm_info
.per_access_id
= vcpu
->arch
.sie_block
->peraid
;
200 return kvm_s390_inject_prog_irq(vcpu
, &pgm_info
);
204 * restore ITDB to program-interruption TDB in guest lowcore
205 * and set TX abort indication if required
207 static int handle_itdb(struct kvm_vcpu
*vcpu
)
209 struct kvm_s390_itdb
*itdb
;
212 if (!IS_TE_ENABLED(vcpu
) || !IS_ITDB_VALID(vcpu
))
214 if (current
->thread
.per_flags
& PER_FLAG_NO_TE
)
216 itdb
= (struct kvm_s390_itdb
*)vcpu
->arch
.sie_block
->itdba
;
217 rc
= write_guest_lc(vcpu
, __LC_PGM_TDB
, itdb
, sizeof(*itdb
));
220 memset(itdb
, 0, sizeof(*itdb
));
225 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
227 static int handle_prog(struct kvm_vcpu
*vcpu
)
232 vcpu
->stat
.exit_program_interruption
++;
234 if (guestdbg_enabled(vcpu
) && per_event(vcpu
)) {
235 rc
= kvm_s390_handle_per_event(vcpu
);
238 /* the interrupt might have been filtered out completely */
239 if (vcpu
->arch
.sie_block
->iprcc
== 0)
243 trace_kvm_s390_intercept_prog(vcpu
, vcpu
->arch
.sie_block
->iprcc
);
244 if (vcpu
->arch
.sie_block
->iprcc
== PGM_SPECIFICATION
) {
245 rc
= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
, &psw
, sizeof(psw_t
));
248 /* Avoid endless loops of specification exceptions */
249 if (!is_valid_psw(&psw
))
252 rc
= handle_itdb(vcpu
);
256 return inject_prog_on_prog_intercept(vcpu
);
260 * handle_external_interrupt - used for external interruption interceptions
262 * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
263 * the new PSW does not have external interrupts disabled. In the first case,
264 * we've got to deliver the interrupt manually, and in the second case, we
265 * drop to userspace to handle the situation there.
267 static int handle_external_interrupt(struct kvm_vcpu
*vcpu
)
269 u16 eic
= vcpu
->arch
.sie_block
->eic
;
270 struct kvm_s390_irq irq
;
274 vcpu
->stat
.exit_external_interrupt
++;
276 rc
= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &newpsw
, sizeof(psw_t
));
279 /* We can not handle clock comparator or timer interrupt with bad PSW */
280 if ((eic
== EXT_IRQ_CLK_COMP
|| eic
== EXT_IRQ_CPU_TIMER
) &&
281 (newpsw
.mask
& PSW_MASK_EXT
))
285 case EXT_IRQ_CLK_COMP
:
286 irq
.type
= KVM_S390_INT_CLOCK_COMP
;
288 case EXT_IRQ_CPU_TIMER
:
289 irq
.type
= KVM_S390_INT_CPU_TIMER
;
291 case EXT_IRQ_EXTERNAL_CALL
:
292 irq
.type
= KVM_S390_INT_EXTERNAL_CALL
;
293 irq
.u
.extcall
.code
= vcpu
->arch
.sie_block
->extcpuaddr
;
294 rc
= kvm_s390_inject_vcpu(vcpu
, &irq
);
295 /* ignore if another external call is already pending */
303 return kvm_s390_inject_vcpu(vcpu
, &irq
);
307 * Handle MOVE PAGE partial execution interception.
309 * This interception can only happen for guests with DAT disabled and
310 * addresses that are currently not mapped in the host. Thus we try to
311 * set up the mappings for the corresponding user pages here (or throw
312 * addressing exceptions in case of illegal guest addresses).
314 static int handle_mvpg_pei(struct kvm_vcpu
*vcpu
)
316 unsigned long srcaddr
, dstaddr
;
319 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
321 /* Make sure that the source is paged-in */
322 rc
= guest_translate_address(vcpu
, vcpu
->run
->s
.regs
.gprs
[reg2
],
323 reg2
, &srcaddr
, GACC_FETCH
);
325 return kvm_s390_inject_prog_cond(vcpu
, rc
);
326 rc
= kvm_arch_fault_in_page(vcpu
, srcaddr
, 0);
330 /* Make sure that the destination is paged-in */
331 rc
= guest_translate_address(vcpu
, vcpu
->run
->s
.regs
.gprs
[reg1
],
332 reg1
, &dstaddr
, GACC_STORE
);
334 return kvm_s390_inject_prog_cond(vcpu
, rc
);
335 rc
= kvm_arch_fault_in_page(vcpu
, dstaddr
, 1);
339 kvm_s390_retry_instr(vcpu
);
344 static int handle_partial_execution(struct kvm_vcpu
*vcpu
)
346 vcpu
->stat
.exit_pei
++;
348 if (vcpu
->arch
.sie_block
->ipa
== 0xb254) /* MVPG */
349 return handle_mvpg_pei(vcpu
);
350 if (vcpu
->arch
.sie_block
->ipa
>> 8 == 0xae) /* SIGP */
351 return kvm_s390_handle_sigp_pei(vcpu
);
357 * Handle the sthyi instruction that provides the guest with system
358 * information, like current CPU resources available at each level of
361 int handle_sthyi(struct kvm_vcpu
*vcpu
)
363 int reg1
, reg2
, r
= 0;
364 u64 code
, addr
, cc
= 0, rc
= 0;
365 struct sthyi_sctns
*sctns
= NULL
;
367 if (!test_kvm_facility(vcpu
->kvm
, 74))
368 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
370 kvm_s390_get_regs_rre(vcpu
, ®1
, ®2
);
371 code
= vcpu
->run
->s
.regs
.gprs
[reg1
];
372 addr
= vcpu
->run
->s
.regs
.gprs
[reg2
];
374 vcpu
->stat
.instruction_sthyi
++;
375 VCPU_EVENT(vcpu
, 3, "STHYI: fc: %llu addr: 0x%016llx", code
, addr
);
376 trace_kvm_s390_handle_sthyi(vcpu
, code
, addr
);
378 if (reg1
== reg2
|| reg1
& 1 || reg2
& 1)
379 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
387 if (addr
& ~PAGE_MASK
)
388 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
390 sctns
= (void *)get_zeroed_page(GFP_KERNEL
);
394 cc
= sthyi_fill(sctns
, &rc
);
398 r
= write_guest(vcpu
, addr
, reg2
, sctns
, PAGE_SIZE
);
400 free_page((unsigned long)sctns
);
401 return kvm_s390_inject_prog_cond(vcpu
, r
);
405 free_page((unsigned long)sctns
);
406 vcpu
->run
->s
.regs
.gprs
[reg2
+ 1] = rc
;
407 kvm_s390_set_psw_cc(vcpu
, cc
);
411 static int handle_operexc(struct kvm_vcpu
*vcpu
)
413 psw_t oldpsw
, newpsw
;
416 vcpu
->stat
.exit_operation_exception
++;
417 trace_kvm_s390_handle_operexc(vcpu
, vcpu
->arch
.sie_block
->ipa
,
418 vcpu
->arch
.sie_block
->ipb
);
420 if (vcpu
->arch
.sie_block
->ipa
== 0xb256)
421 return handle_sthyi(vcpu
);
423 if (vcpu
->arch
.sie_block
->ipa
== 0 && vcpu
->kvm
->arch
.user_instr0
)
425 rc
= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
, &newpsw
, sizeof(psw_t
));
429 * Avoid endless loops of operation exceptions, if the pgm new
430 * PSW will cause a new operation exception.
431 * The heuristic checks if the pgm new psw is within 6 bytes before
432 * the faulting psw address (with same DAT, AS settings) and the
433 * new psw is not a wait psw and the fault was not triggered by
436 oldpsw
= vcpu
->arch
.sie_block
->gpsw
;
437 if (oldpsw
.addr
- newpsw
.addr
<= 6 &&
438 !(newpsw
.mask
& PSW_MASK_WAIT
) &&
439 !(oldpsw
.mask
& PSW_MASK_PSTATE
) &&
440 (newpsw
.mask
& PSW_MASK_ASC
) == (oldpsw
.mask
& PSW_MASK_ASC
) &&
441 (newpsw
.mask
& PSW_MASK_DAT
) == (oldpsw
.mask
& PSW_MASK_DAT
))
444 return kvm_s390_inject_program_int(vcpu
, PGM_OPERATION
);
447 int kvm_handle_sie_intercept(struct kvm_vcpu
*vcpu
)
451 if (kvm_is_ucontrol(vcpu
->kvm
))
454 switch (vcpu
->arch
.sie_block
->icptcode
) {
456 vcpu
->stat
.exit_external_request
++;
459 vcpu
->stat
.exit_io_request
++;
462 rc
= handle_instruction(vcpu
);
465 return handle_prog(vcpu
);
467 return handle_external_interrupt(vcpu
);
469 return kvm_s390_handle_wait(vcpu
);
471 return handle_validity(vcpu
);
473 return handle_stop(vcpu
);
475 rc
= handle_operexc(vcpu
);
478 rc
= handle_partial_execution(vcpu
);
481 rc
= kvm_s390_skey_check_enable(vcpu
);
487 /* process PER, also if the instrution is processed in user space */
488 if (vcpu
->arch
.sie_block
->icptstatus
& 0x02 &&
489 (!rc
|| rc
== -EOPNOTSUPP
))
490 per_rc
= kvm_s390_handle_per_ifetch_icpt(vcpu
);
491 return per_rc
? per_rc
: rc
;