2 * guest access functions
4 * Copyright IBM Corp. 2014
8 #include <linux/vmalloc.h>
10 #include <asm/pgtable.h>
17 unsigned long origin
: 52; /* Region- or Segment-Table Origin */
19 unsigned long g
: 1; /* Subspace Group Control */
20 unsigned long p
: 1; /* Private Space Control */
21 unsigned long s
: 1; /* Storage-Alteration-Event Control */
22 unsigned long x
: 1; /* Space-Switch-Event Control */
23 unsigned long r
: 1; /* Real-Space Control */
25 unsigned long dt
: 2; /* Designation-Type Control */
26 unsigned long tl
: 2; /* Region- or Segment-Table Length */
31 ASCE_TYPE_SEGMENT
= 0,
32 ASCE_TYPE_REGION3
= 1,
33 ASCE_TYPE_REGION2
= 2,
37 union region1_table_entry
{
40 unsigned long rto
: 52;/* Region-Table Origin */
42 unsigned long p
: 1; /* DAT-Protection Bit */
44 unsigned long tf
: 2; /* Region-Second-Table Offset */
45 unsigned long i
: 1; /* Region-Invalid Bit */
47 unsigned long tt
: 2; /* Table-Type Bits */
48 unsigned long tl
: 2; /* Region-Second-Table Length */
52 union region2_table_entry
{
55 unsigned long rto
: 52;/* Region-Table Origin */
57 unsigned long p
: 1; /* DAT-Protection Bit */
59 unsigned long tf
: 2; /* Region-Third-Table Offset */
60 unsigned long i
: 1; /* Region-Invalid Bit */
62 unsigned long tt
: 2; /* Table-Type Bits */
63 unsigned long tl
: 2; /* Region-Third-Table Length */
67 struct region3_table_entry_fc0
{
68 unsigned long sto
: 52;/* Segment-Table Origin */
70 unsigned long fc
: 1; /* Format-Control */
71 unsigned long p
: 1; /* DAT-Protection Bit */
73 unsigned long tf
: 2; /* Segment-Table Offset */
74 unsigned long i
: 1; /* Region-Invalid Bit */
75 unsigned long cr
: 1; /* Common-Region Bit */
76 unsigned long tt
: 2; /* Table-Type Bits */
77 unsigned long tl
: 2; /* Segment-Table Length */
80 struct region3_table_entry_fc1
{
81 unsigned long rfaa
: 33; /* Region-Frame Absolute Address */
83 unsigned long av
: 1; /* ACCF-Validity Control */
84 unsigned long acc
: 4; /* Access-Control Bits */
85 unsigned long f
: 1; /* Fetch-Protection Bit */
86 unsigned long fc
: 1; /* Format-Control */
87 unsigned long p
: 1; /* DAT-Protection Bit */
88 unsigned long co
: 1; /* Change-Recording Override */
90 unsigned long i
: 1; /* Region-Invalid Bit */
91 unsigned long cr
: 1; /* Common-Region Bit */
92 unsigned long tt
: 2; /* Table-Type Bits */
96 union region3_table_entry
{
98 struct region3_table_entry_fc0 fc0
;
99 struct region3_table_entry_fc1 fc1
;
102 unsigned long fc
: 1; /* Format-Control */
104 unsigned long i
: 1; /* Region-Invalid Bit */
105 unsigned long cr
: 1; /* Common-Region Bit */
106 unsigned long tt
: 2; /* Table-Type Bits */
111 struct segment_entry_fc0
{
112 unsigned long pto
: 53;/* Page-Table Origin */
113 unsigned long fc
: 1; /* Format-Control */
114 unsigned long p
: 1; /* DAT-Protection Bit */
116 unsigned long i
: 1; /* Segment-Invalid Bit */
117 unsigned long cs
: 1; /* Common-Segment Bit */
118 unsigned long tt
: 2; /* Table-Type Bits */
122 struct segment_entry_fc1
{
123 unsigned long sfaa
: 44; /* Segment-Frame Absolute Address */
125 unsigned long av
: 1; /* ACCF-Validity Control */
126 unsigned long acc
: 4; /* Access-Control Bits */
127 unsigned long f
: 1; /* Fetch-Protection Bit */
128 unsigned long fc
: 1; /* Format-Control */
129 unsigned long p
: 1; /* DAT-Protection Bit */
130 unsigned long co
: 1; /* Change-Recording Override */
132 unsigned long i
: 1; /* Segment-Invalid Bit */
133 unsigned long cs
: 1; /* Common-Segment Bit */
134 unsigned long tt
: 2; /* Table-Type Bits */
138 union segment_table_entry
{
140 struct segment_entry_fc0 fc0
;
141 struct segment_entry_fc1 fc1
;
144 unsigned long fc
: 1; /* Format-Control */
146 unsigned long i
: 1; /* Segment-Invalid Bit */
147 unsigned long cs
: 1; /* Common-Segment Bit */
148 unsigned long tt
: 2; /* Table-Type Bits */
154 TABLE_TYPE_SEGMENT
= 0,
155 TABLE_TYPE_REGION3
= 1,
156 TABLE_TYPE_REGION2
= 2,
157 TABLE_TYPE_REGION1
= 3
160 union page_table_entry
{
163 unsigned long pfra
: 52; /* Page-Frame Real Address */
164 unsigned long z
: 1; /* Zero Bit */
165 unsigned long i
: 1; /* Page-Invalid Bit */
166 unsigned long p
: 1; /* DAT-Protection Bit */
167 unsigned long co
: 1; /* Change-Recording Override */
173 * vaddress union in order to easily decode a virtual address into its
174 * region first index, region second index etc. parts.
179 unsigned long rfx
: 11;
180 unsigned long rsx
: 11;
181 unsigned long rtx
: 11;
182 unsigned long sx
: 11;
183 unsigned long px
: 8;
184 unsigned long bx
: 12;
187 unsigned long rfx01
: 2;
189 unsigned long rsx01
: 2;
191 unsigned long rtx01
: 2;
193 unsigned long sx01
: 2;
199 * raddress union which will contain the result (real or absolute address)
200 * after a page table walk. The rfaa, sfaa and pfra members are used to
201 * simply assign them the value of a region, segment or page table entry.
205 unsigned long rfaa
: 33; /* Region-Frame Absolute Address */
206 unsigned long sfaa
: 44; /* Segment-Frame Absolute Address */
207 unsigned long pfra
: 52; /* Page-Frame Real Address */
210 static int ipte_lock_count
;
211 static DEFINE_MUTEX(ipte_mutex
);
213 int ipte_lock_held(struct kvm_vcpu
*vcpu
)
215 union ipte_control
*ic
= &vcpu
->kvm
->arch
.sca
->ipte_control
;
217 if (vcpu
->arch
.sie_block
->eca
& 1)
219 return ipte_lock_count
!= 0;
222 static void ipte_lock_simple(struct kvm_vcpu
*vcpu
)
224 union ipte_control old
, new, *ic
;
226 mutex_lock(&ipte_mutex
);
228 if (ipte_lock_count
> 1)
230 ic
= &vcpu
->kvm
->arch
.sca
->ipte_control
;
232 old
= ACCESS_ONCE(*ic
);
235 old
= ACCESS_ONCE(*ic
);
239 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
241 mutex_unlock(&ipte_mutex
);
244 static void ipte_unlock_simple(struct kvm_vcpu
*vcpu
)
246 union ipte_control old
, new, *ic
;
248 mutex_lock(&ipte_mutex
);
252 ic
= &vcpu
->kvm
->arch
.sca
->ipte_control
;
254 new = old
= ACCESS_ONCE(*ic
);
256 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
257 if (!ipte_lock_count
)
258 wake_up(&vcpu
->kvm
->arch
.ipte_wq
);
260 mutex_unlock(&ipte_mutex
);
263 static void ipte_lock_siif(struct kvm_vcpu
*vcpu
)
265 union ipte_control old
, new, *ic
;
267 ic
= &vcpu
->kvm
->arch
.sca
->ipte_control
;
269 old
= ACCESS_ONCE(*ic
);
272 old
= ACCESS_ONCE(*ic
);
277 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
280 static void ipte_unlock_siif(struct kvm_vcpu
*vcpu
)
282 union ipte_control old
, new, *ic
;
284 ic
= &vcpu
->kvm
->arch
.sca
->ipte_control
;
286 new = old
= ACCESS_ONCE(*ic
);
290 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
292 wake_up(&vcpu
->kvm
->arch
.ipte_wq
);
295 void ipte_lock(struct kvm_vcpu
*vcpu
)
297 if (vcpu
->arch
.sie_block
->eca
& 1)
298 ipte_lock_siif(vcpu
);
300 ipte_lock_simple(vcpu
);
303 void ipte_unlock(struct kvm_vcpu
*vcpu
)
305 if (vcpu
->arch
.sie_block
->eca
& 1)
306 ipte_unlock_siif(vcpu
);
308 ipte_unlock_simple(vcpu
);
311 static unsigned long get_vcpu_asce(struct kvm_vcpu
*vcpu
)
313 switch (psw_bits(vcpu
->arch
.sie_block
->gpsw
).as
) {
315 return vcpu
->arch
.sie_block
->gcr
[1];
316 case PSW_AS_SECONDARY
:
317 return vcpu
->arch
.sie_block
->gcr
[7];
319 return vcpu
->arch
.sie_block
->gcr
[13];
324 static int deref_table(struct kvm
*kvm
, unsigned long gpa
, unsigned long *val
)
326 return kvm_read_guest(kvm
, gpa
, val
, sizeof(*val
));
330 * guest_translate - translate a guest virtual into a guest absolute address
332 * @gva: guest virtual address
333 * @gpa: points to where guest physical (absolute) address should be stored
334 * @write: indicates if access is a write access
336 * Translate a guest virtual address into a guest absolute address by means
337 * of dynamic address translation as specified by the architecuture.
338 * If the resulting absolute address is not available in the configuration
339 * an addressing exception is indicated and @gpa will not be changed.
341 * Returns: - zero on success; @gpa contains the resulting absolute address
342 * - a negative value if guest access failed due to e.g. broken
344 * - a positve value if an access exception happened. In this case
345 * the returned value is the program interruption code as defined
346 * by the architecture
348 static unsigned long guest_translate(struct kvm_vcpu
*vcpu
, unsigned long gva
,
349 unsigned long *gpa
, int write
)
351 union vaddress vaddr
= {.addr
= gva
};
352 union raddress raddr
= {.addr
= gva
};
353 union page_table_entry pte
;
354 int dat_protection
= 0;
355 union ctlreg0 ctlreg0
;
360 ctlreg0
.val
= vcpu
->arch
.sie_block
->gcr
[0];
361 edat1
= ctlreg0
.edat
&& test_vfacility(8);
362 edat2
= edat1
&& test_vfacility(78);
363 asce
.val
= get_vcpu_asce(vcpu
);
366 ptr
= asce
.origin
* 4096;
368 case ASCE_TYPE_REGION1
:
369 if (vaddr
.rfx01
> asce
.tl
)
370 return PGM_REGION_FIRST_TRANS
;
371 ptr
+= vaddr
.rfx
* 8;
373 case ASCE_TYPE_REGION2
:
375 return PGM_ASCE_TYPE
;
376 if (vaddr
.rsx01
> asce
.tl
)
377 return PGM_REGION_SECOND_TRANS
;
378 ptr
+= vaddr
.rsx
* 8;
380 case ASCE_TYPE_REGION3
:
381 if (vaddr
.rfx
|| vaddr
.rsx
)
382 return PGM_ASCE_TYPE
;
383 if (vaddr
.rtx01
> asce
.tl
)
384 return PGM_REGION_THIRD_TRANS
;
385 ptr
+= vaddr
.rtx
* 8;
387 case ASCE_TYPE_SEGMENT
:
388 if (vaddr
.rfx
|| vaddr
.rsx
|| vaddr
.rtx
)
389 return PGM_ASCE_TYPE
;
390 if (vaddr
.sx01
> asce
.tl
)
391 return PGM_SEGMENT_TRANSLATION
;
396 case ASCE_TYPE_REGION1
: {
397 union region1_table_entry rfte
;
399 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
400 return PGM_ADDRESSING
;
401 if (deref_table(vcpu
->kvm
, ptr
, &rfte
.val
))
404 return PGM_REGION_FIRST_TRANS
;
405 if (rfte
.tt
!= TABLE_TYPE_REGION1
)
406 return PGM_TRANSLATION_SPEC
;
407 if (vaddr
.rsx01
< rfte
.tf
|| vaddr
.rsx01
> rfte
.tl
)
408 return PGM_REGION_SECOND_TRANS
;
410 dat_protection
|= rfte
.p
;
411 ptr
= rfte
.rto
* 4096 + vaddr
.rsx
* 8;
414 case ASCE_TYPE_REGION2
: {
415 union region2_table_entry rste
;
417 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
418 return PGM_ADDRESSING
;
419 if (deref_table(vcpu
->kvm
, ptr
, &rste
.val
))
422 return PGM_REGION_SECOND_TRANS
;
423 if (rste
.tt
!= TABLE_TYPE_REGION2
)
424 return PGM_TRANSLATION_SPEC
;
425 if (vaddr
.rtx01
< rste
.tf
|| vaddr
.rtx01
> rste
.tl
)
426 return PGM_REGION_THIRD_TRANS
;
428 dat_protection
|= rste
.p
;
429 ptr
= rste
.rto
* 4096 + vaddr
.rtx
* 8;
432 case ASCE_TYPE_REGION3
: {
433 union region3_table_entry rtte
;
435 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
436 return PGM_ADDRESSING
;
437 if (deref_table(vcpu
->kvm
, ptr
, &rtte
.val
))
440 return PGM_REGION_THIRD_TRANS
;
441 if (rtte
.tt
!= TABLE_TYPE_REGION3
)
442 return PGM_TRANSLATION_SPEC
;
443 if (rtte
.cr
&& asce
.p
&& edat2
)
444 return PGM_TRANSLATION_SPEC
;
445 if (rtte
.fc
&& edat2
) {
446 dat_protection
|= rtte
.fc1
.p
;
447 raddr
.rfaa
= rtte
.fc1
.rfaa
;
448 goto absolute_address
;
450 if (vaddr
.sx01
< rtte
.fc0
.tf
)
451 return PGM_SEGMENT_TRANSLATION
;
452 if (vaddr
.sx01
> rtte
.fc0
.tl
)
453 return PGM_SEGMENT_TRANSLATION
;
455 dat_protection
|= rtte
.fc0
.p
;
456 ptr
= rtte
.fc0
.sto
* 4096 + vaddr
.sx
* 8;
459 case ASCE_TYPE_SEGMENT
: {
460 union segment_table_entry ste
;
462 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
463 return PGM_ADDRESSING
;
464 if (deref_table(vcpu
->kvm
, ptr
, &ste
.val
))
467 return PGM_SEGMENT_TRANSLATION
;
468 if (ste
.tt
!= TABLE_TYPE_SEGMENT
)
469 return PGM_TRANSLATION_SPEC
;
470 if (ste
.cs
&& asce
.p
)
471 return PGM_TRANSLATION_SPEC
;
472 if (ste
.fc
&& edat1
) {
473 dat_protection
|= ste
.fc1
.p
;
474 raddr
.sfaa
= ste
.fc1
.sfaa
;
475 goto absolute_address
;
477 dat_protection
|= ste
.fc0
.p
;
478 ptr
= ste
.fc0
.pto
* 2048 + vaddr
.px
* 8;
481 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
482 return PGM_ADDRESSING
;
483 if (deref_table(vcpu
->kvm
, ptr
, &pte
.val
))
486 return PGM_PAGE_TRANSLATION
;
488 return PGM_TRANSLATION_SPEC
;
489 if (pte
.co
&& !edat1
)
490 return PGM_TRANSLATION_SPEC
;
491 dat_protection
|= pte
.p
;
492 raddr
.pfra
= pte
.pfra
;
494 raddr
.addr
= kvm_s390_real_to_abs(vcpu
, raddr
.addr
);
496 if (write
&& dat_protection
)
497 return PGM_PROTECTION
;
498 if (kvm_is_error_gpa(vcpu
->kvm
, raddr
.addr
))
499 return PGM_ADDRESSING
;
504 static inline int is_low_address(unsigned long ga
)
506 /* Check for address ranges 0..511 and 4096..4607 */
507 return (ga
& ~0x11fful
) == 0;
510 static int low_address_protection_enabled(struct kvm_vcpu
*vcpu
)
512 union ctlreg0 ctlreg0
= {.val
= vcpu
->arch
.sie_block
->gcr
[0]};
513 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
518 asce
.val
= get_vcpu_asce(vcpu
);
519 if (psw_bits(*psw
).t
&& asce
.p
)
524 struct trans_exc_code_bits
{
525 unsigned long addr
: 52; /* Translation-exception Address */
526 unsigned long fsi
: 2; /* Access Exception Fetch/Store Indication */
528 unsigned long b61
: 1;
529 unsigned long as
: 2; /* ASCE Identifier */
533 FSI_UNKNOWN
= 0, /* Unknown wether fetch or store */
534 FSI_STORE
= 1, /* Exception was due to store operation */
535 FSI_FETCH
= 2 /* Exception was due to fetch operation */
538 static int guest_page_range(struct kvm_vcpu
*vcpu
, unsigned long ga
,
539 unsigned long *pages
, unsigned long nr_pages
,
542 struct kvm_s390_pgm_info
*pgm
= &vcpu
->arch
.pgm
;
543 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
544 struct trans_exc_code_bits
*tec_bits
;
547 memset(pgm
, 0, sizeof(*pgm
));
548 tec_bits
= (struct trans_exc_code_bits
*)&pgm
->trans_exc_code
;
549 tec_bits
->fsi
= write
? FSI_STORE
: FSI_FETCH
;
550 tec_bits
->as
= psw_bits(*psw
).as
;
551 lap_enabled
= low_address_protection_enabled(vcpu
);
553 ga
= kvm_s390_logical_to_effective(vcpu
, ga
);
554 tec_bits
->addr
= ga
>> PAGE_SHIFT
;
555 if (write
&& lap_enabled
&& is_low_address(ga
)) {
556 pgm
->code
= PGM_PROTECTION
;
560 if (psw_bits(*psw
).t
) {
561 rc
= guest_translate(vcpu
, ga
, pages
, write
);
564 if (rc
== PGM_PROTECTION
)
569 *pages
= kvm_s390_real_to_abs(vcpu
, ga
);
570 if (kvm_is_error_gpa(vcpu
->kvm
, *pages
))
571 pgm
->code
= PGM_ADDRESSING
;
582 int access_guest(struct kvm_vcpu
*vcpu
, unsigned long ga
, void *data
,
583 unsigned long len
, int write
)
585 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
586 unsigned long _len
, nr_pages
, gpa
, idx
;
587 unsigned long pages_array
[2];
588 unsigned long *pages
;
595 /* Access register mode is not supported yet. */
596 if (psw_bits(*psw
).t
&& psw_bits(*psw
).as
== PSW_AS_ACCREG
)
598 nr_pages
= (((ga
& ~PAGE_MASK
) + len
- 1) >> PAGE_SHIFT
) + 1;
600 if (nr_pages
> ARRAY_SIZE(pages_array
))
601 pages
= vmalloc(nr_pages
* sizeof(unsigned long));
604 asce
.val
= get_vcpu_asce(vcpu
);
605 need_ipte_lock
= psw_bits(*psw
).t
&& !asce
.r
;
608 rc
= guest_page_range(vcpu
, ga
, pages
, nr_pages
, write
);
609 for (idx
= 0; idx
< nr_pages
&& !rc
; idx
++) {
610 gpa
= *(pages
+ idx
) + (ga
& ~PAGE_MASK
);
611 _len
= min(PAGE_SIZE
- (gpa
& ~PAGE_MASK
), len
);
613 rc
= kvm_write_guest(vcpu
->kvm
, gpa
, data
, _len
);
615 rc
= kvm_read_guest(vcpu
->kvm
, gpa
, data
, _len
);
622 if (nr_pages
> ARRAY_SIZE(pages_array
))
627 int access_guest_real(struct kvm_vcpu
*vcpu
, unsigned long gra
,
628 void *data
, unsigned long len
, int write
)
630 unsigned long _len
, gpa
;
634 gpa
= kvm_s390_real_to_abs(vcpu
, gra
);
635 _len
= min(PAGE_SIZE
- (gpa
& ~PAGE_MASK
), len
);
637 rc
= write_guest_abs(vcpu
, gpa
, data
, _len
);
639 rc
= read_guest_abs(vcpu
, gpa
, data
, _len
);
648 * guest_translate_address - translate guest logical into guest absolute address
650 * Parameter semantics are the same as the ones from guest_translate.
651 * The memory contents at the guest address are not changed.
653 * Note: The IPTE lock is not taken during this function, so the caller
654 * has to take care of this.
656 int guest_translate_address(struct kvm_vcpu
*vcpu
, unsigned long gva
,
657 unsigned long *gpa
, int write
)
659 struct kvm_s390_pgm_info
*pgm
= &vcpu
->arch
.pgm
;
660 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
661 struct trans_exc_code_bits
*tec
;
665 /* Access register mode is not supported yet. */
666 if (psw_bits(*psw
).t
&& psw_bits(*psw
).as
== PSW_AS_ACCREG
)
669 gva
= kvm_s390_logical_to_effective(vcpu
, gva
);
670 memset(pgm
, 0, sizeof(*pgm
));
671 tec
= (struct trans_exc_code_bits
*)&pgm
->trans_exc_code
;
672 tec
->as
= psw_bits(*psw
).as
;
673 tec
->fsi
= write
? FSI_STORE
: FSI_FETCH
;
674 tec
->addr
= gva
>> PAGE_SHIFT
;
675 if (is_low_address(gva
) && low_address_protection_enabled(vcpu
)) {
677 rc
= pgm
->code
= PGM_PROTECTION
;
682 asce
.val
= get_vcpu_asce(vcpu
);
683 if (psw_bits(*psw
).t
&& !asce
.r
) { /* Use DAT? */
684 rc
= guest_translate(vcpu
, gva
, gpa
, write
);
686 if (rc
== PGM_PROTECTION
)
692 *gpa
= kvm_s390_real_to_abs(vcpu
, gva
);
693 if (kvm_is_error_gpa(vcpu
->kvm
, *gpa
))
694 rc
= pgm
->code
= PGM_ADDRESSING
;
701 * kvm_s390_check_low_addr_protection - check for low-address protection
704 * Checks whether an address is subject to low-address protection and set
705 * up vcpu->arch.pgm accordingly if necessary.
707 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
709 int kvm_s390_check_low_addr_protection(struct kvm_vcpu
*vcpu
, unsigned long ga
)
711 struct kvm_s390_pgm_info
*pgm
= &vcpu
->arch
.pgm
;
712 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
713 struct trans_exc_code_bits
*tec_bits
;
715 if (!is_low_address(ga
) || !low_address_protection_enabled(vcpu
))
718 memset(pgm
, 0, sizeof(*pgm
));
719 tec_bits
= (struct trans_exc_code_bits
*)&pgm
->trans_exc_code
;
720 tec_bits
->fsi
= FSI_STORE
;
721 tec_bits
->as
= psw_bits(*psw
).as
;
722 tec_bits
->addr
= ga
>> PAGE_SHIFT
;
723 pgm
->code
= PGM_PROTECTION
;