1 // SPDX-License-Identifier: GPL-2.0
3 * kvm nested virtualization support for s390x
5 * Copyright IBM Corp. 2016, 2018
7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_host.h>
11 #include <linux/bug.h>
12 #include <linux/list.h>
13 #include <linux/bitmap.h>
14 #include <linux/sched/signal.h>
17 #include <asm/mmu_context.h>
25 struct kvm_s390_sie_block scb_s
; /* 0x0000 */
27 * the backup info for machine check. ensure it's at
28 * the same offset as that in struct sie_page!
30 struct mcck_volatile_info mcck_info
; /* 0x0200 */
32 * The pinned original scb. Be aware that other VCPUs can modify
33 * it while we read from it. Values that are used for conditions or
34 * are reused conditionally, should be accessed via READ_ONCE.
36 struct kvm_s390_sie_block
*scb_o
; /* 0x0218 */
37 /* the shadow gmap in use by the vsie_page */
38 struct gmap
*gmap
; /* 0x0220 */
39 /* address of the last reported fault to guest2 */
40 unsigned long fault_addr
; /* 0x0228 */
41 /* calculated guest addresses of satellite control blocks */
42 gpa_t sca_gpa
; /* 0x0230 */
43 gpa_t itdba_gpa
; /* 0x0238 */
44 gpa_t gvrd_gpa
; /* 0x0240 */
45 gpa_t riccbd_gpa
; /* 0x0248 */
46 gpa_t sdnx_gpa
; /* 0x0250 */
47 __u8 reserved
[0x0700 - 0x0258]; /* 0x0258 */
48 struct kvm_s390_crypto_cb crycb
; /* 0x0700 */
49 __u8 fac
[S390_ARCH_FAC_LIST_SIZE_BYTE
]; /* 0x0800 */
52 /* trigger a validity icpt for the given scb */
53 static int set_validity_icpt(struct kvm_s390_sie_block
*scb
,
57 scb
->ipb
= ((__u32
) reason_code
) << 16;
58 scb
->icptcode
= ICPT_VALIDITY
;
62 /* mark the prefix as unmapped, this will block the VSIE */
63 static void prefix_unmapped(struct vsie_page
*vsie_page
)
65 atomic_or(PROG_REQUEST
, &vsie_page
->scb_s
.prog20
);
68 /* mark the prefix as unmapped and wait until the VSIE has been left */
69 static void prefix_unmapped_sync(struct vsie_page
*vsie_page
)
71 prefix_unmapped(vsie_page
);
72 if (vsie_page
->scb_s
.prog0c
& PROG_IN_SIE
)
73 atomic_or(CPUSTAT_STOP_INT
, &vsie_page
->scb_s
.cpuflags
);
74 while (vsie_page
->scb_s
.prog0c
& PROG_IN_SIE
)
78 /* mark the prefix as mapped, this will allow the VSIE to run */
79 static void prefix_mapped(struct vsie_page
*vsie_page
)
81 atomic_andnot(PROG_REQUEST
, &vsie_page
->scb_s
.prog20
);
84 /* test if the prefix is mapped into the gmap shadow */
85 static int prefix_is_mapped(struct vsie_page
*vsie_page
)
87 return !(atomic_read(&vsie_page
->scb_s
.prog20
) & PROG_REQUEST
);
90 /* copy the updated intervention request bits into the shadow scb */
91 static void update_intervention_requests(struct vsie_page
*vsie_page
)
93 const int bits
= CPUSTAT_STOP_INT
| CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
;
96 cpuflags
= atomic_read(&vsie_page
->scb_o
->cpuflags
);
97 atomic_andnot(bits
, &vsie_page
->scb_s
.cpuflags
);
98 atomic_or(cpuflags
& bits
, &vsie_page
->scb_s
.cpuflags
);
101 /* shadow (filter and validate) the cpuflags */
102 static int prepare_cpuflags(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
104 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
105 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
106 int newflags
, cpuflags
= atomic_read(&scb_o
->cpuflags
);
108 /* we don't allow ESA/390 guests */
109 if (!(cpuflags
& CPUSTAT_ZARCH
))
110 return set_validity_icpt(scb_s
, 0x0001U
);
112 if (cpuflags
& (CPUSTAT_RRF
| CPUSTAT_MCDS
))
113 return set_validity_icpt(scb_s
, 0x0001U
);
114 else if (cpuflags
& (CPUSTAT_SLSV
| CPUSTAT_SLSR
))
115 return set_validity_icpt(scb_s
, 0x0007U
);
117 /* intervention requests will be set later */
118 newflags
= CPUSTAT_ZARCH
;
119 if (cpuflags
& CPUSTAT_GED
&& test_kvm_facility(vcpu
->kvm
, 8))
120 newflags
|= CPUSTAT_GED
;
121 if (cpuflags
& CPUSTAT_GED2
&& test_kvm_facility(vcpu
->kvm
, 78)) {
122 if (cpuflags
& CPUSTAT_GED
)
123 return set_validity_icpt(scb_s
, 0x0001U
);
124 newflags
|= CPUSTAT_GED2
;
126 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_GPERE
))
127 newflags
|= cpuflags
& CPUSTAT_P
;
128 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_GSLS
))
129 newflags
|= cpuflags
& CPUSTAT_SM
;
130 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_IBS
))
131 newflags
|= cpuflags
& CPUSTAT_IBS
;
132 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_KSS
))
133 newflags
|= cpuflags
& CPUSTAT_KSS
;
135 atomic_set(&scb_s
->cpuflags
, newflags
);
138 /* Copy to APCB FORMAT1 from APCB FORMAT0 */
139 static int setup_apcb10(struct kvm_vcpu
*vcpu
, struct kvm_s390_apcb1
*apcb_s
,
140 unsigned long apcb_o
, struct kvm_s390_apcb1
*apcb_h
)
142 struct kvm_s390_apcb0 tmp
;
144 if (read_guest_real(vcpu
, apcb_o
, &tmp
, sizeof(struct kvm_s390_apcb0
)))
147 apcb_s
->apm
[0] = apcb_h
->apm
[0] & tmp
.apm
[0];
148 apcb_s
->aqm
[0] = apcb_h
->aqm
[0] & tmp
.aqm
[0] & 0xffff000000000000UL
;
149 apcb_s
->adm
[0] = apcb_h
->adm
[0] & tmp
.adm
[0] & 0xffff000000000000UL
;
156 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
157 * @vcpu: pointer to the virtual CPU
158 * @apcb_s: pointer to start of apcb in the shadow crycb
159 * @apcb_o: pointer to start of original apcb in the guest2
160 * @apcb_h: pointer to start of apcb in the guest1
162 * Returns 0 and -EFAULT on error reading guest apcb
164 static int setup_apcb00(struct kvm_vcpu
*vcpu
, unsigned long *apcb_s
,
165 unsigned long apcb_o
, unsigned long *apcb_h
)
167 if (read_guest_real(vcpu
, apcb_o
, apcb_s
,
168 sizeof(struct kvm_s390_apcb0
)))
171 bitmap_and(apcb_s
, apcb_s
, apcb_h
, sizeof(struct kvm_s390_apcb0
));
177 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
178 * @vcpu: pointer to the virtual CPU
179 * @apcb_s: pointer to start of apcb in the shadow crycb
180 * @apcb_o: pointer to start of original guest apcb
181 * @apcb_h: pointer to start of apcb in the host
183 * Returns 0 and -EFAULT on error reading guest apcb
185 static int setup_apcb11(struct kvm_vcpu
*vcpu
, unsigned long *apcb_s
,
186 unsigned long apcb_o
,
187 unsigned long *apcb_h
)
189 if (read_guest_real(vcpu
, apcb_o
, apcb_s
,
190 sizeof(struct kvm_s390_apcb1
)))
193 bitmap_and(apcb_s
, apcb_s
, apcb_h
, sizeof(struct kvm_s390_apcb1
));
199 * setup_apcb - Create a shadow copy of the apcb.
200 * @vcpu: pointer to the virtual CPU
201 * @crycb_s: pointer to shadow crycb
202 * @crycb_o: pointer to original guest crycb
203 * @crycb_h: pointer to the host crycb
204 * @fmt_o: format of the original guest crycb.
205 * @fmt_h: format of the host crycb.
207 * Checks the compatibility between the guest and host crycb and calls the
208 * appropriate copy function.
210 * Return 0 or an error number if the guest and host crycb are incompatible.
212 static int setup_apcb(struct kvm_vcpu
*vcpu
, struct kvm_s390_crypto_cb
*crycb_s
,
214 struct kvm_s390_crypto_cb
*crycb_h
,
215 int fmt_o
, int fmt_h
)
217 struct kvm_s390_crypto_cb
*crycb
;
219 crycb
= (struct kvm_s390_crypto_cb
*) (unsigned long)crycb_o
;
223 if ((crycb_o
& PAGE_MASK
) != ((crycb_o
+ 256) & PAGE_MASK
))
225 if (fmt_h
!= CRYCB_FORMAT2
)
227 return setup_apcb11(vcpu
, (unsigned long *)&crycb_s
->apcb1
,
228 (unsigned long) &crycb
->apcb1
,
229 (unsigned long *)&crycb_h
->apcb1
);
233 return setup_apcb10(vcpu
, &crycb_s
->apcb1
,
234 (unsigned long) &crycb
->apcb0
,
237 return setup_apcb00(vcpu
,
238 (unsigned long *) &crycb_s
->apcb0
,
239 (unsigned long) &crycb
->apcb0
,
240 (unsigned long *) &crycb_h
->apcb0
);
244 if ((crycb_o
& PAGE_MASK
) != ((crycb_o
+ 32) & PAGE_MASK
))
249 return setup_apcb10(vcpu
, &crycb_s
->apcb1
,
250 (unsigned long) &crycb
->apcb0
,
254 return setup_apcb00(vcpu
,
255 (unsigned long *) &crycb_s
->apcb0
,
256 (unsigned long) &crycb
->apcb0
,
257 (unsigned long *) &crycb_h
->apcb0
);
264 * shadow_crycb - Create a shadow copy of the crycb block
265 * @vcpu: a pointer to the virtual CPU
266 * @vsie_page: a pointer to internal date used for the vSIE
268 * Create a shadow copy of the crycb block and setup key wrapping, if
269 * requested for guest 3 and enabled for guest 2.
271 * We accept format-1 or format-2, but we convert format-1 into format-2
272 * in the shadow CRYCB.
273 * Using format-2 enables the firmware to choose the right format when
274 * scheduling the SIE.
275 * There is nothing to do for format-0.
277 * This function centralize the issuing of set_validity_icpt() for all
278 * the subfunctions working on the crycb.
280 * Returns: - 0 if shadowed or nothing to do
281 * - > 0 if control has to be given to guest 2
283 static int shadow_crycb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
285 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
286 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
287 const uint32_t crycbd_o
= READ_ONCE(scb_o
->crycbd
);
288 const u32 crycb_addr
= crycbd_o
& 0x7ffffff8U
;
289 unsigned long *b1
, *b2
;
294 int key_msk
= test_kvm_facility(vcpu
->kvm
, 76);
295 int fmt_o
= crycbd_o
& CRYCB_FORMAT_MASK
;
296 int fmt_h
= vcpu
->arch
.sie_block
->crycbd
& CRYCB_FORMAT_MASK
;
301 apie_h
= vcpu
->arch
.sie_block
->eca
& ECA_APIE
;
302 apie_s
= apie_h
& scb_o
->eca
;
303 if (!apie_s
&& (!key_msk
|| (fmt_o
== CRYCB_FORMAT0
)))
307 return set_validity_icpt(scb_s
, 0x0039U
);
309 if (fmt_o
== CRYCB_FORMAT1
)
310 if ((crycb_addr
& PAGE_MASK
) !=
311 ((crycb_addr
+ 128) & PAGE_MASK
))
312 return set_validity_icpt(scb_s
, 0x003CU
);
315 ret
= setup_apcb(vcpu
, &vsie_page
->crycb
, crycb_addr
,
316 vcpu
->kvm
->arch
.crypto
.crycb
,
320 scb_s
->eca
|= scb_o
->eca
& ECA_APIE
;
323 /* we may only allow it if enabled for guest 2 */
324 ecb3_flags
= scb_o
->ecb3
& vcpu
->arch
.sie_block
->ecb3
&
325 (ECB3_AES
| ECB3_DEA
);
326 ecd_flags
= scb_o
->ecd
& vcpu
->arch
.sie_block
->ecd
& ECD_ECC
;
327 if (!ecb3_flags
&& !ecd_flags
)
330 /* copy only the wrapping keys */
331 if (read_guest_real(vcpu
, crycb_addr
+ 72,
332 vsie_page
->crycb
.dea_wrapping_key_mask
, 56))
333 return set_validity_icpt(scb_s
, 0x0035U
);
335 scb_s
->ecb3
|= ecb3_flags
;
336 scb_s
->ecd
|= ecd_flags
;
338 /* xor both blocks in one run */
339 b1
= (unsigned long *) vsie_page
->crycb
.dea_wrapping_key_mask
;
340 b2
= (unsigned long *)
341 vcpu
->kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
;
342 /* as 56%8 == 0, bitmap_xor won't overwrite any data */
343 bitmap_xor(b1
, b1
, b2
, BITS_PER_BYTE
* 56);
347 return set_validity_icpt(scb_s
, 0x0022U
);
349 return set_validity_icpt(scb_s
, 0x0035U
);
351 return set_validity_icpt(scb_s
, 0x003CU
);
353 scb_s
->crycbd
= ((__u32
)(__u64
) &vsie_page
->crycb
) | CRYCB_FORMAT2
;
357 /* shadow (round up/down) the ibc to avoid validity icpt */
358 static void prepare_ibc(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
360 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
361 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
362 /* READ_ONCE does not work on bitfields - use a temporary variable */
363 const uint32_t __new_ibc
= scb_o
->ibc
;
364 const uint32_t new_ibc
= READ_ONCE(__new_ibc
) & 0x0fffU
;
365 __u64 min_ibc
= (sclp
.ibc
>> 16) & 0x0fffU
;
368 /* ibc installed in g2 and requested for g3 */
369 if (vcpu
->kvm
->arch
.model
.ibc
&& new_ibc
) {
370 scb_s
->ibc
= new_ibc
;
371 /* takte care of the minimum ibc level of the machine */
372 if (scb_s
->ibc
< min_ibc
)
373 scb_s
->ibc
= min_ibc
;
374 /* take care of the maximum ibc level set for the guest */
375 if (scb_s
->ibc
> vcpu
->kvm
->arch
.model
.ibc
)
376 scb_s
->ibc
= vcpu
->kvm
->arch
.model
.ibc
;
380 /* unshadow the scb, copying parameters back to the real scb */
381 static void unshadow_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
383 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
384 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
387 scb_o
->icptcode
= scb_s
->icptcode
;
388 scb_o
->icptstatus
= scb_s
->icptstatus
;
389 scb_o
->ipa
= scb_s
->ipa
;
390 scb_o
->ipb
= scb_s
->ipb
;
391 scb_o
->gbea
= scb_s
->gbea
;
394 scb_o
->cputm
= scb_s
->cputm
;
395 scb_o
->ckc
= scb_s
->ckc
;
396 scb_o
->todpr
= scb_s
->todpr
;
399 scb_o
->gpsw
= scb_s
->gpsw
;
400 scb_o
->gg14
= scb_s
->gg14
;
401 scb_o
->gg15
= scb_s
->gg15
;
402 memcpy(scb_o
->gcr
, scb_s
->gcr
, 128);
403 scb_o
->pp
= scb_s
->pp
;
405 /* branch prediction */
406 if (test_kvm_facility(vcpu
->kvm
, 82)) {
407 scb_o
->fpf
&= ~FPF_BPBC
;
408 scb_o
->fpf
|= scb_s
->fpf
& FPF_BPBC
;
411 /* interrupt intercept */
412 switch (scb_s
->icptcode
) {
416 memcpy((void *)((u64
)scb_o
+ 0xc0),
417 (void *)((u64
)scb_s
+ 0xc0), 0xf0 - 0xc0);
421 memcpy((void *)((u64
)scb_o
+ 0xc0),
422 (void *)((u64
)scb_s
+ 0xc0), 0xd0 - 0xc0);
426 if (scb_s
->ihcpu
!= 0xffffU
)
427 scb_o
->ihcpu
= scb_s
->ihcpu
;
431 * Setup the shadow scb by copying and checking the relevant parts of the g2
434 * Returns: - 0 if the scb has been shadowed
435 * - > 0 if control has to be given to guest 2
437 static int shadow_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
439 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
440 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
441 /* READ_ONCE does not work on bitfields - use a temporary variable */
442 const uint32_t __new_prefix
= scb_o
->prefix
;
443 const uint32_t new_prefix
= READ_ONCE(__new_prefix
);
444 const bool wants_tx
= READ_ONCE(scb_o
->ecb
) & ECB_TE
;
445 bool had_tx
= scb_s
->ecb
& ECB_TE
;
446 unsigned long new_mso
= 0;
449 /* make sure we don't have any leftovers when reusing the scb */
459 rc
= prepare_cpuflags(vcpu
, vsie_page
);
464 scb_s
->cputm
= scb_o
->cputm
;
465 scb_s
->ckc
= scb_o
->ckc
;
466 scb_s
->todpr
= scb_o
->todpr
;
467 scb_s
->epoch
= scb_o
->epoch
;
470 scb_s
->gpsw
= scb_o
->gpsw
;
471 scb_s
->gg14
= scb_o
->gg14
;
472 scb_s
->gg15
= scb_o
->gg15
;
473 memcpy(scb_s
->gcr
, scb_o
->gcr
, 128);
474 scb_s
->pp
= scb_o
->pp
;
476 /* interception / execution handling */
477 scb_s
->gbea
= scb_o
->gbea
;
478 scb_s
->lctl
= scb_o
->lctl
;
479 scb_s
->svcc
= scb_o
->svcc
;
480 scb_s
->ictl
= scb_o
->ictl
;
482 * SKEY handling functions can't deal with false setting of PTE invalid
483 * bits. Therefore we cannot provide interpretation and would later
484 * have to provide own emulation handlers.
486 if (!(atomic_read(&scb_s
->cpuflags
) & CPUSTAT_KSS
))
487 scb_s
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
;
489 scb_s
->icpua
= scb_o
->icpua
;
491 if (!(atomic_read(&scb_s
->cpuflags
) & CPUSTAT_SM
))
492 new_mso
= READ_ONCE(scb_o
->mso
) & 0xfffffffffff00000UL
;
493 /* if the hva of the prefix changes, we have to remap the prefix */
494 if (scb_s
->mso
!= new_mso
|| scb_s
->prefix
!= new_prefix
)
495 prefix_unmapped(vsie_page
);
496 /* SIE will do mso/msl validity and exception checks for us */
497 scb_s
->msl
= scb_o
->msl
& 0xfffffffffff00000UL
;
498 scb_s
->mso
= new_mso
;
499 scb_s
->prefix
= new_prefix
;
501 /* We have to definetly flush the tlb if this scb never ran */
502 if (scb_s
->ihcpu
!= 0xffffU
)
503 scb_s
->ihcpu
= scb_o
->ihcpu
;
505 /* MVPG and Protection Exception Interpretation are always available */
506 scb_s
->eca
|= scb_o
->eca
& (ECA_MVPGI
| ECA_PROTEXCI
);
507 /* Host-protection-interruption introduced with ESOP */
508 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_ESOP
))
509 scb_s
->ecb
|= scb_o
->ecb
& ECB_HOSTPROTINT
;
510 /* transactional execution */
511 if (test_kvm_facility(vcpu
->kvm
, 73) && wants_tx
) {
512 /* remap the prefix is tx is toggled on */
514 prefix_unmapped(vsie_page
);
515 scb_s
->ecb
|= ECB_TE
;
517 /* branch prediction */
518 if (test_kvm_facility(vcpu
->kvm
, 82))
519 scb_s
->fpf
|= scb_o
->fpf
& FPF_BPBC
;
521 if (test_kvm_facility(vcpu
->kvm
, 129)) {
522 scb_s
->eca
|= scb_o
->eca
& ECA_VX
;
523 scb_s
->ecd
|= scb_o
->ecd
& ECD_HOSTREGMGMT
;
525 /* Run-time-Instrumentation */
526 if (test_kvm_facility(vcpu
->kvm
, 64))
527 scb_s
->ecb3
|= scb_o
->ecb3
& ECB3_RI
;
528 /* Instruction Execution Prevention */
529 if (test_kvm_facility(vcpu
->kvm
, 130))
530 scb_s
->ecb2
|= scb_o
->ecb2
& ECB2_IEP
;
531 /* Guarded Storage */
532 if (test_kvm_facility(vcpu
->kvm
, 133)) {
533 scb_s
->ecb
|= scb_o
->ecb
& ECB_GS
;
534 scb_s
->ecd
|= scb_o
->ecd
& ECD_HOSTREGMGMT
;
536 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_SIIF
))
537 scb_s
->eca
|= scb_o
->eca
& ECA_SII
;
538 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_IB
))
539 scb_s
->eca
|= scb_o
->eca
& ECA_IB
;
540 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_CEI
))
541 scb_s
->eca
|= scb_o
->eca
& ECA_CEI
;
542 /* Epoch Extension */
543 if (test_kvm_facility(vcpu
->kvm
, 139))
544 scb_s
->ecd
|= scb_o
->ecd
& ECD_MEF
;
547 if (test_kvm_facility(vcpu
->kvm
, 156))
548 scb_s
->ecd
|= scb_o
->ecd
& ECD_ETOKENF
;
550 scb_s
->hpid
= HPID_VSIE
;
552 prepare_ibc(vcpu
, vsie_page
);
553 rc
= shadow_crycb(vcpu
, vsie_page
);
556 unshadow_scb(vcpu
, vsie_page
);
560 void kvm_s390_vsie_gmap_notifier(struct gmap
*gmap
, unsigned long start
,
563 struct kvm
*kvm
= gmap
->private;
564 struct vsie_page
*cur
;
565 unsigned long prefix
;
569 if (!gmap_is_shadow(gmap
))
571 if (start
>= 1UL << 31)
572 /* We are only interested in prefix pages */
576 * Only new shadow blocks are added to the list during runtime,
577 * therefore we can safely reference them all the time.
579 for (i
= 0; i
< kvm
->arch
.vsie
.page_count
; i
++) {
580 page
= READ_ONCE(kvm
->arch
.vsie
.pages
[i
]);
583 cur
= page_to_virt(page
);
584 if (READ_ONCE(cur
->gmap
) != gmap
)
586 prefix
= cur
->scb_s
.prefix
<< GUEST_PREFIX_SHIFT
;
587 /* with mso/msl, the prefix lies at an offset */
588 prefix
+= cur
->scb_s
.mso
;
589 if (prefix
<= end
&& start
<= prefix
+ 2 * PAGE_SIZE
- 1)
590 prefix_unmapped_sync(cur
);
595 * Map the first prefix page and if tx is enabled also the second prefix page.
597 * The prefix will be protected, a gmap notifier will inform about unmaps.
598 * The shadow scb must not be executed until the prefix is remapped, this is
599 * guaranteed by properly handling PROG_REQUEST.
601 * Returns: - 0 on if successfully mapped or already mapped
602 * - > 0 if control has to be given to guest 2
603 * - -EAGAIN if the caller can retry immediately
604 * - -ENOMEM if out of memory
606 static int map_prefix(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
608 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
609 u64 prefix
= scb_s
->prefix
<< GUEST_PREFIX_SHIFT
;
612 if (prefix_is_mapped(vsie_page
))
615 /* mark it as mapped so we can catch any concurrent unmappers */
616 prefix_mapped(vsie_page
);
618 /* with mso/msl, the prefix lies at offset *mso* */
619 prefix
+= scb_s
->mso
;
621 rc
= kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
, prefix
);
622 if (!rc
&& (scb_s
->ecb
& ECB_TE
))
623 rc
= kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
,
626 * We don't have to mprotect, we will be called for all unshadows.
627 * SIE will detect if protection applies and trigger a validity.
630 prefix_unmapped(vsie_page
);
631 if (rc
> 0 || rc
== -EFAULT
)
632 rc
= set_validity_icpt(scb_s
, 0x0037U
);
637 * Pin the guest page given by gpa and set hpa to the pinned host address.
638 * Will always be pinned writable.
640 * Returns: - 0 on success
641 * - -EINVAL if the gpa is not valid guest storage
643 static int pin_guest_page(struct kvm
*kvm
, gpa_t gpa
, hpa_t
*hpa
)
647 page
= gfn_to_page(kvm
, gpa_to_gfn(gpa
));
648 if (is_error_page(page
))
650 *hpa
= (hpa_t
) page_to_virt(page
) + (gpa
& ~PAGE_MASK
);
654 /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
655 static void unpin_guest_page(struct kvm
*kvm
, gpa_t gpa
, hpa_t hpa
)
657 kvm_release_pfn_dirty(hpa
>> PAGE_SHIFT
);
658 /* mark the page always as dirty for migration */
659 mark_page_dirty(kvm
, gpa_to_gfn(gpa
));
662 /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
663 static void unpin_blocks(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
665 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
668 hpa
= (u64
) scb_s
->scaoh
<< 32 | scb_s
->scaol
;
670 unpin_guest_page(vcpu
->kvm
, vsie_page
->sca_gpa
, hpa
);
671 vsie_page
->sca_gpa
= 0;
678 unpin_guest_page(vcpu
->kvm
, vsie_page
->itdba_gpa
, hpa
);
679 vsie_page
->itdba_gpa
= 0;
685 unpin_guest_page(vcpu
->kvm
, vsie_page
->gvrd_gpa
, hpa
);
686 vsie_page
->gvrd_gpa
= 0;
692 unpin_guest_page(vcpu
->kvm
, vsie_page
->riccbd_gpa
, hpa
);
693 vsie_page
->riccbd_gpa
= 0;
699 unpin_guest_page(vcpu
->kvm
, vsie_page
->sdnx_gpa
, hpa
);
700 vsie_page
->sdnx_gpa
= 0;
706 * Instead of shadowing some blocks, we can simply forward them because the
707 * addresses in the scb are 64 bit long.
709 * This works as long as the data lies in one page. If blocks ever exceed one
710 * page, we have to fall back to shadowing.
712 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
713 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
715 * Returns: - 0 if all blocks were pinned.
716 * - > 0 if control has to be given to guest 2
717 * - -ENOMEM if out of memory
719 static int pin_blocks(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
721 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
722 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
727 gpa
= READ_ONCE(scb_o
->scaol
) & ~0xfUL
;
728 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_64BSCAO
))
729 gpa
|= (u64
) READ_ONCE(scb_o
->scaoh
) << 32;
731 if (gpa
< 2 * PAGE_SIZE
)
732 rc
= set_validity_icpt(scb_s
, 0x0038U
);
733 else if ((gpa
& ~0x1fffUL
) == kvm_s390_get_prefix(vcpu
))
734 rc
= set_validity_icpt(scb_s
, 0x0011U
);
735 else if ((gpa
& PAGE_MASK
) !=
736 ((gpa
+ sizeof(struct bsca_block
) - 1) & PAGE_MASK
))
737 rc
= set_validity_icpt(scb_s
, 0x003bU
);
739 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
741 rc
= set_validity_icpt(scb_s
, 0x0034U
);
745 vsie_page
->sca_gpa
= gpa
;
746 scb_s
->scaoh
= (u32
)((u64
)hpa
>> 32);
747 scb_s
->scaol
= (u32
)(u64
)hpa
;
750 gpa
= READ_ONCE(scb_o
->itdba
) & ~0xffUL
;
751 if (gpa
&& (scb_s
->ecb
& ECB_TE
)) {
752 if (gpa
< 2 * PAGE_SIZE
) {
753 rc
= set_validity_icpt(scb_s
, 0x0080U
);
756 /* 256 bytes cannot cross page boundaries */
757 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
759 rc
= set_validity_icpt(scb_s
, 0x0080U
);
762 vsie_page
->itdba_gpa
= gpa
;
766 gpa
= READ_ONCE(scb_o
->gvrd
) & ~0x1ffUL
;
767 if (gpa
&& (scb_s
->eca
& ECA_VX
) && !(scb_s
->ecd
& ECD_HOSTREGMGMT
)) {
768 if (gpa
< 2 * PAGE_SIZE
) {
769 rc
= set_validity_icpt(scb_s
, 0x1310U
);
773 * 512 bytes vector registers cannot cross page boundaries
774 * if this block gets bigger, we have to shadow it.
776 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
778 rc
= set_validity_icpt(scb_s
, 0x1310U
);
781 vsie_page
->gvrd_gpa
= gpa
;
785 gpa
= READ_ONCE(scb_o
->riccbd
) & ~0x3fUL
;
786 if (gpa
&& (scb_s
->ecb3
& ECB3_RI
)) {
787 if (gpa
< 2 * PAGE_SIZE
) {
788 rc
= set_validity_icpt(scb_s
, 0x0043U
);
791 /* 64 bytes cannot cross page boundaries */
792 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
794 rc
= set_validity_icpt(scb_s
, 0x0043U
);
797 /* Validity 0x0044 will be checked by SIE */
798 vsie_page
->riccbd_gpa
= gpa
;
801 if (((scb_s
->ecb
& ECB_GS
) && !(scb_s
->ecd
& ECD_HOSTREGMGMT
)) ||
802 (scb_s
->ecd
& ECD_ETOKENF
)) {
805 gpa
= READ_ONCE(scb_o
->sdnxo
) & ~0xfUL
;
806 sdnxc
= READ_ONCE(scb_o
->sdnxo
) & 0xfUL
;
807 if (!gpa
|| gpa
< 2 * PAGE_SIZE
) {
808 rc
= set_validity_icpt(scb_s
, 0x10b0U
);
811 if (sdnxc
< 6 || sdnxc
> 12) {
812 rc
= set_validity_icpt(scb_s
, 0x10b1U
);
815 if (gpa
& ((1 << sdnxc
) - 1)) {
816 rc
= set_validity_icpt(scb_s
, 0x10b2U
);
819 /* Due to alignment rules (checked above) this cannot
820 * cross page boundaries
822 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
824 rc
= set_validity_icpt(scb_s
, 0x10b0U
);
827 vsie_page
->sdnx_gpa
= gpa
;
828 scb_s
->sdnxo
= hpa
| sdnxc
;
832 unpin_blocks(vcpu
, vsie_page
);
836 /* unpin the scb provided by guest 2, marking it as dirty */
837 static void unpin_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
,
840 hpa_t hpa
= (hpa_t
) vsie_page
->scb_o
;
843 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
844 vsie_page
->scb_o
= NULL
;
848 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
850 * Returns: - 0 if the scb was pinned.
851 * - > 0 if control has to be given to guest 2
853 static int pin_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
,
859 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
861 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
865 vsie_page
->scb_o
= (struct kvm_s390_sie_block
*) hpa
;
870 * Inject a fault into guest 2.
872 * Returns: - > 0 if control has to be given to guest 2
873 * < 0 if an error occurred during injection.
875 static int inject_fault(struct kvm_vcpu
*vcpu
, __u16 code
, __u64 vaddr
,
878 struct kvm_s390_pgm_info pgm
= {
881 /* 0-51: virtual address */
882 (vaddr
& 0xfffffffffffff000UL
) |
883 /* 52-53: store / fetch */
884 (((unsigned int) !write_flag
) + 1) << 10,
885 /* 62-63: asce id (alway primary == 0) */
886 .exc_access_id
= 0, /* always primary */
887 .op_access_id
= 0, /* not MVPG */
891 if (code
== PGM_PROTECTION
)
892 pgm
.trans_exc_code
|= 0x4UL
;
894 rc
= kvm_s390_inject_prog_irq(vcpu
, &pgm
);
899 * Handle a fault during vsie execution on a gmap shadow.
901 * Returns: - 0 if the fault was resolved
902 * - > 0 if control has to be given to guest 2
903 * - < 0 if an error occurred
905 static int handle_fault(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
909 if (current
->thread
.gmap_int_code
== PGM_PROTECTION
)
910 /* we can directly forward all protection exceptions */
911 return inject_fault(vcpu
, PGM_PROTECTION
,
912 current
->thread
.gmap_addr
, 1);
914 rc
= kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
,
915 current
->thread
.gmap_addr
);
917 rc
= inject_fault(vcpu
, rc
,
918 current
->thread
.gmap_addr
,
919 current
->thread
.gmap_write_flag
);
921 vsie_page
->fault_addr
= current
->thread
.gmap_addr
;
927 * Retry the previous fault that required guest 2 intervention. This avoids
928 * one superfluous SIE re-entry and direct exit.
930 * Will ignore any errors. The next SIE fault will do proper fault handling.
932 static void handle_last_fault(struct kvm_vcpu
*vcpu
,
933 struct vsie_page
*vsie_page
)
935 if (vsie_page
->fault_addr
)
936 kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
,
937 vsie_page
->fault_addr
);
938 vsie_page
->fault_addr
= 0;
941 static inline void clear_vsie_icpt(struct vsie_page
*vsie_page
)
943 vsie_page
->scb_s
.icptcode
= 0;
946 /* rewind the psw and clear the vsie icpt, so we can retry execution */
947 static void retry_vsie_icpt(struct vsie_page
*vsie_page
)
949 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
950 int ilen
= insn_length(scb_s
->ipa
>> 8);
952 /* take care of EXECUTE instructions */
953 if (scb_s
->icptstatus
& 1) {
954 ilen
= (scb_s
->icptstatus
>> 4) & 0x6;
958 scb_s
->gpsw
.addr
= __rewind_psw(scb_s
->gpsw
, ilen
);
959 clear_vsie_icpt(vsie_page
);
963 * Try to shadow + enable the guest 2 provided facility list.
964 * Retry instruction execution if enabled for and provided by guest 2.
966 * Returns: - 0 if handled (retry or guest 2 icpt)
967 * - > 0 if control has to be given to guest 2
969 static int handle_stfle(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
971 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
972 __u32 fac
= READ_ONCE(vsie_page
->scb_o
->fac
) & 0x7ffffff8U
;
974 if (fac
&& test_kvm_facility(vcpu
->kvm
, 7)) {
975 retry_vsie_icpt(vsie_page
);
976 if (read_guest_real(vcpu
, fac
, &vsie_page
->fac
,
977 sizeof(vsie_page
->fac
)))
978 return set_validity_icpt(scb_s
, 0x1090U
);
979 scb_s
->fac
= (__u32
)(__u64
) &vsie_page
->fac
;
985 * Run the vsie on a shadow scb and a shadow gmap, without any further
986 * sanity checks, handling SIE faults.
988 * Returns: - 0 everything went fine
989 * - > 0 if control has to be given to guest 2
990 * - < 0 if an error occurred
992 static int do_vsie_run(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
993 __releases(vcpu
->kvm
->srcu
)
994 __acquires(vcpu
->kvm
->srcu
)
996 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
997 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
998 int guest_bp_isolation
;
1001 handle_last_fault(vcpu
, vsie_page
);
1005 if (test_cpu_flag(CIF_MCCK_PENDING
))
1008 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1010 /* save current guest state of bp isolation override */
1011 guest_bp_isolation
= test_thread_flag(TIF_ISOLATE_BP_GUEST
);
1014 * The guest is running with BPBC, so we have to force it on for our
1015 * nested guest. This is done by enabling BPBC globally, so the BPBC
1016 * control in the SCB (which the nested guest can modify) is simply
1019 if (test_kvm_facility(vcpu
->kvm
, 82) &&
1020 vcpu
->arch
.sie_block
->fpf
& FPF_BPBC
)
1021 set_thread_flag(TIF_ISOLATE_BP_GUEST
);
1023 local_irq_disable();
1024 guest_enter_irqoff();
1028 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
1029 * and VCPU requests also hinder the vSIE from running and lead
1030 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
1031 * also kick the vSIE.
1033 vcpu
->arch
.sie_block
->prog0c
|= PROG_IN_SIE
;
1035 if (!kvm_s390_vcpu_sie_inhibited(vcpu
))
1036 rc
= sie64a(scb_s
, vcpu
->run
->s
.regs
.gprs
);
1038 vcpu
->arch
.sie_block
->prog0c
&= ~PROG_IN_SIE
;
1040 local_irq_disable();
1041 guest_exit_irqoff();
1044 /* restore guest state for bp isolation override */
1045 if (!guest_bp_isolation
)
1046 clear_thread_flag(TIF_ISOLATE_BP_GUEST
);
1048 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1051 VCPU_EVENT(vcpu
, 3, "%s", "machine check");
1052 kvm_s390_reinject_machine_check(vcpu
, &vsie_page
->mcck_info
);
1057 rc
= 0; /* we could still have an icpt */
1058 else if (rc
== -EFAULT
)
1059 return handle_fault(vcpu
, vsie_page
);
1061 switch (scb_s
->icptcode
) {
1063 if (scb_s
->ipa
== 0xb2b0)
1064 rc
= handle_stfle(vcpu
, vsie_page
);
1067 /* stop not requested by g2 - must have been a kick */
1068 if (!(atomic_read(&scb_o
->cpuflags
) & CPUSTAT_STOP_INT
))
1069 clear_vsie_icpt(vsie_page
);
1072 if ((scb_s
->ipa
& 0xf000) != 0xf000)
1073 scb_s
->ipa
+= 0x1000;
1079 static void release_gmap_shadow(struct vsie_page
*vsie_page
)
1081 if (vsie_page
->gmap
)
1082 gmap_put(vsie_page
->gmap
);
1083 WRITE_ONCE(vsie_page
->gmap
, NULL
);
1084 prefix_unmapped(vsie_page
);
1087 static int acquire_gmap_shadow(struct kvm_vcpu
*vcpu
,
1088 struct vsie_page
*vsie_page
)
1095 asce
= vcpu
->arch
.sie_block
->gcr
[1];
1096 cr0
.val
= vcpu
->arch
.sie_block
->gcr
[0];
1097 edat
= cr0
.edat
&& test_kvm_facility(vcpu
->kvm
, 8);
1098 edat
+= edat
&& test_kvm_facility(vcpu
->kvm
, 78);
1101 * ASCE or EDAT could have changed since last icpt, or the gmap
1102 * we're holding has been unshadowed. If the gmap is still valid,
1103 * we can safely reuse it.
1105 if (vsie_page
->gmap
&& gmap_shadow_valid(vsie_page
->gmap
, asce
, edat
))
1108 /* release the old shadow - if any, and mark the prefix as unmapped */
1109 release_gmap_shadow(vsie_page
);
1110 gmap
= gmap_shadow(vcpu
->arch
.gmap
, asce
, edat
);
1112 return PTR_ERR(gmap
);
1113 gmap
->private = vcpu
->kvm
;
1114 WRITE_ONCE(vsie_page
->gmap
, gmap
);
1119 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1121 static void register_shadow_scb(struct kvm_vcpu
*vcpu
,
1122 struct vsie_page
*vsie_page
)
1124 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
1126 WRITE_ONCE(vcpu
->arch
.vsie_block
, &vsie_page
->scb_s
);
1128 * External calls have to lead to a kick of the vcpu and
1129 * therefore the vsie -> Simulate Wait state.
1131 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_WAIT
);
1133 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1134 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1137 scb_s
->epoch
+= vcpu
->kvm
->arch
.epoch
;
1139 if (scb_s
->ecd
& ECD_MEF
) {
1140 scb_s
->epdx
+= vcpu
->kvm
->arch
.epdx
;
1141 if (scb_s
->epoch
< vcpu
->kvm
->arch
.epoch
)
1149 * Unregister a shadow scb from a VCPU.
1151 static void unregister_shadow_scb(struct kvm_vcpu
*vcpu
)
1153 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_WAIT
);
1154 WRITE_ONCE(vcpu
->arch
.vsie_block
, NULL
);
1158 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1159 * prefix pages and faults.
1161 * Returns: - 0 if no errors occurred
1162 * - > 0 if control has to be given to guest 2
1163 * - -ENOMEM if out of memory
1165 static int vsie_run(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
1167 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
1171 rc
= acquire_gmap_shadow(vcpu
, vsie_page
);
1173 rc
= map_prefix(vcpu
, vsie_page
);
1175 gmap_enable(vsie_page
->gmap
);
1176 update_intervention_requests(vsie_page
);
1177 rc
= do_vsie_run(vcpu
, vsie_page
);
1178 gmap_enable(vcpu
->arch
.gmap
);
1180 atomic_andnot(PROG_BLOCK_SIE
, &scb_s
->prog20
);
1184 if (rc
|| scb_s
->icptcode
|| signal_pending(current
) ||
1185 kvm_s390_vcpu_has_irq(vcpu
, 0) ||
1186 kvm_s390_vcpu_sie_inhibited(vcpu
))
1190 if (rc
== -EFAULT
) {
1192 * Addressing exceptions are always presentes as intercepts.
1193 * As addressing exceptions are suppressing and our guest 3 PSW
1194 * points at the responsible instruction, we have to
1195 * forward the PSW and set the ilc. If we can't read guest 3
1196 * instruction, we can use an arbitrary ilc. Let's always use
1197 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1198 * memory. (we could also fake the shadow so the hardware
1201 scb_s
->icptcode
= ICPT_PROGI
;
1202 scb_s
->iprcc
= PGM_ADDRESSING
;
1204 scb_s
->gpsw
.addr
= __rewind_psw(scb_s
->gpsw
, 4);
1210 * Get or create a vsie page for a scb address.
1212 * Returns: - address of a vsie page (cached or new one)
1213 * - NULL if the same scb address is already used by another VCPU
1214 * - ERR_PTR(-ENOMEM) if out of memory
1216 static struct vsie_page
*get_vsie_page(struct kvm
*kvm
, unsigned long addr
)
1218 struct vsie_page
*vsie_page
;
1223 page
= radix_tree_lookup(&kvm
->arch
.vsie
.addr_to_page
, addr
>> 9);
1226 if (page_ref_inc_return(page
) == 2)
1227 return page_to_virt(page
);
1232 * We want at least #online_vcpus shadows, so every VCPU can execute
1233 * the VSIE in parallel.
1235 nr_vcpus
= atomic_read(&kvm
->online_vcpus
);
1237 mutex_lock(&kvm
->arch
.vsie
.mutex
);
1238 if (kvm
->arch
.vsie
.page_count
< nr_vcpus
) {
1239 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
| GFP_DMA
);
1241 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1242 return ERR_PTR(-ENOMEM
);
1245 kvm
->arch
.vsie
.pages
[kvm
->arch
.vsie
.page_count
] = page
;
1246 kvm
->arch
.vsie
.page_count
++;
1248 /* reuse an existing entry that belongs to nobody */
1250 page
= kvm
->arch
.vsie
.pages
[kvm
->arch
.vsie
.next
];
1251 if (page_ref_inc_return(page
) == 2)
1254 kvm
->arch
.vsie
.next
++;
1255 kvm
->arch
.vsie
.next
%= nr_vcpus
;
1257 radix_tree_delete(&kvm
->arch
.vsie
.addr_to_page
, page
->index
>> 9);
1260 /* double use of the same address */
1261 if (radix_tree_insert(&kvm
->arch
.vsie
.addr_to_page
, addr
>> 9, page
)) {
1263 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1266 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1268 vsie_page
= page_to_virt(page
);
1269 memset(&vsie_page
->scb_s
, 0, sizeof(struct kvm_s390_sie_block
));
1270 release_gmap_shadow(vsie_page
);
1271 vsie_page
->fault_addr
= 0;
1272 vsie_page
->scb_s
.ihcpu
= 0xffffU
;
1276 /* put a vsie page acquired via get_vsie_page */
1277 static void put_vsie_page(struct kvm
*kvm
, struct vsie_page
*vsie_page
)
1279 struct page
*page
= pfn_to_page(__pa(vsie_page
) >> PAGE_SHIFT
);
1284 int kvm_s390_handle_vsie(struct kvm_vcpu
*vcpu
)
1286 struct vsie_page
*vsie_page
;
1287 unsigned long scb_addr
;
1290 vcpu
->stat
.instruction_sie
++;
1291 if (!test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_SIEF2
))
1293 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1294 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1296 BUILD_BUG_ON(sizeof(struct vsie_page
) != PAGE_SIZE
);
1297 scb_addr
= kvm_s390_get_base_disp_s(vcpu
, NULL
);
1299 /* 512 byte alignment */
1300 if (unlikely(scb_addr
& 0x1ffUL
))
1301 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1303 if (signal_pending(current
) || kvm_s390_vcpu_has_irq(vcpu
, 0) ||
1304 kvm_s390_vcpu_sie_inhibited(vcpu
))
1307 vsie_page
= get_vsie_page(vcpu
->kvm
, scb_addr
);
1308 if (IS_ERR(vsie_page
))
1309 return PTR_ERR(vsie_page
);
1310 else if (!vsie_page
)
1311 /* double use of sie control block - simply do nothing */
1314 rc
= pin_scb(vcpu
, vsie_page
, scb_addr
);
1317 rc
= shadow_scb(vcpu
, vsie_page
);
1320 rc
= pin_blocks(vcpu
, vsie_page
);
1323 register_shadow_scb(vcpu
, vsie_page
);
1324 rc
= vsie_run(vcpu
, vsie_page
);
1325 unregister_shadow_scb(vcpu
);
1326 unpin_blocks(vcpu
, vsie_page
);
1328 unshadow_scb(vcpu
, vsie_page
);
1330 unpin_scb(vcpu
, vsie_page
, scb_addr
);
1332 put_vsie_page(vcpu
->kvm
, vsie_page
);
1334 return rc
< 0 ? rc
: 0;
1337 /* Init the vsie data structures. To be called when a vm is initialized. */
1338 void kvm_s390_vsie_init(struct kvm
*kvm
)
1340 mutex_init(&kvm
->arch
.vsie
.mutex
);
1341 INIT_RADIX_TREE(&kvm
->arch
.vsie
.addr_to_page
, GFP_KERNEL
);
1344 /* Destroy the vsie data structures. To be called when a vm is destroyed. */
1345 void kvm_s390_vsie_destroy(struct kvm
*kvm
)
1347 struct vsie_page
*vsie_page
;
1351 mutex_lock(&kvm
->arch
.vsie
.mutex
);
1352 for (i
= 0; i
< kvm
->arch
.vsie
.page_count
; i
++) {
1353 page
= kvm
->arch
.vsie
.pages
[i
];
1354 kvm
->arch
.vsie
.pages
[i
] = NULL
;
1355 vsie_page
= page_to_virt(page
);
1356 release_gmap_shadow(vsie_page
);
1357 /* free the radix tree entry */
1358 radix_tree_delete(&kvm
->arch
.vsie
.addr_to_page
, page
->index
>> 9);
1361 kvm
->arch
.vsie
.page_count
= 0;
1362 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1365 void kvm_s390_vsie_kick(struct kvm_vcpu
*vcpu
)
1367 struct kvm_s390_sie_block
*scb
= READ_ONCE(vcpu
->arch
.vsie_block
);
1370 * Even if the VCPU lets go of the shadow sie block reference, it is
1371 * still valid in the cache. So we can safely kick it.
1374 atomic_or(PROG_BLOCK_SIE
, &scb
->prog20
);
1375 if (scb
->prog0c
& PROG_IN_SIE
)
1376 atomic_or(CPUSTAT_STOP_INT
, &scb
->cpuflags
);