2 * guest access functions
4 * Copyright IBM Corp. 2014
8 #include <linux/vmalloc.h>
10 #include <asm/pgtable.h>
14 #include <asm/switch_to.h>
19 unsigned long origin
: 52; /* Region- or Segment-Table Origin */
21 unsigned long g
: 1; /* Subspace Group Control */
22 unsigned long p
: 1; /* Private Space Control */
23 unsigned long s
: 1; /* Storage-Alteration-Event Control */
24 unsigned long x
: 1; /* Space-Switch-Event Control */
25 unsigned long r
: 1; /* Real-Space Control */
27 unsigned long dt
: 2; /* Designation-Type Control */
28 unsigned long tl
: 2; /* Region- or Segment-Table Length */
33 ASCE_TYPE_SEGMENT
= 0,
34 ASCE_TYPE_REGION3
= 1,
35 ASCE_TYPE_REGION2
= 2,
39 union region1_table_entry
{
42 unsigned long rto
: 52;/* Region-Table Origin */
44 unsigned long p
: 1; /* DAT-Protection Bit */
46 unsigned long tf
: 2; /* Region-Second-Table Offset */
47 unsigned long i
: 1; /* Region-Invalid Bit */
49 unsigned long tt
: 2; /* Table-Type Bits */
50 unsigned long tl
: 2; /* Region-Second-Table Length */
54 union region2_table_entry
{
57 unsigned long rto
: 52;/* Region-Table Origin */
59 unsigned long p
: 1; /* DAT-Protection Bit */
61 unsigned long tf
: 2; /* Region-Third-Table Offset */
62 unsigned long i
: 1; /* Region-Invalid Bit */
64 unsigned long tt
: 2; /* Table-Type Bits */
65 unsigned long tl
: 2; /* Region-Third-Table Length */
69 struct region3_table_entry_fc0
{
70 unsigned long sto
: 52;/* Segment-Table Origin */
72 unsigned long fc
: 1; /* Format-Control */
73 unsigned long p
: 1; /* DAT-Protection Bit */
75 unsigned long tf
: 2; /* Segment-Table Offset */
76 unsigned long i
: 1; /* Region-Invalid Bit */
77 unsigned long cr
: 1; /* Common-Region Bit */
78 unsigned long tt
: 2; /* Table-Type Bits */
79 unsigned long tl
: 2; /* Segment-Table Length */
82 struct region3_table_entry_fc1
{
83 unsigned long rfaa
: 33; /* Region-Frame Absolute Address */
85 unsigned long av
: 1; /* ACCF-Validity Control */
86 unsigned long acc
: 4; /* Access-Control Bits */
87 unsigned long f
: 1; /* Fetch-Protection Bit */
88 unsigned long fc
: 1; /* Format-Control */
89 unsigned long p
: 1; /* DAT-Protection Bit */
90 unsigned long co
: 1; /* Change-Recording Override */
92 unsigned long i
: 1; /* Region-Invalid Bit */
93 unsigned long cr
: 1; /* Common-Region Bit */
94 unsigned long tt
: 2; /* Table-Type Bits */
98 union region3_table_entry
{
100 struct region3_table_entry_fc0 fc0
;
101 struct region3_table_entry_fc1 fc1
;
104 unsigned long fc
: 1; /* Format-Control */
106 unsigned long i
: 1; /* Region-Invalid Bit */
107 unsigned long cr
: 1; /* Common-Region Bit */
108 unsigned long tt
: 2; /* Table-Type Bits */
113 struct segment_entry_fc0
{
114 unsigned long pto
: 53;/* Page-Table Origin */
115 unsigned long fc
: 1; /* Format-Control */
116 unsigned long p
: 1; /* DAT-Protection Bit */
118 unsigned long i
: 1; /* Segment-Invalid Bit */
119 unsigned long cs
: 1; /* Common-Segment Bit */
120 unsigned long tt
: 2; /* Table-Type Bits */
124 struct segment_entry_fc1
{
125 unsigned long sfaa
: 44; /* Segment-Frame Absolute Address */
127 unsigned long av
: 1; /* ACCF-Validity Control */
128 unsigned long acc
: 4; /* Access-Control Bits */
129 unsigned long f
: 1; /* Fetch-Protection Bit */
130 unsigned long fc
: 1; /* Format-Control */
131 unsigned long p
: 1; /* DAT-Protection Bit */
132 unsigned long co
: 1; /* Change-Recording Override */
134 unsigned long i
: 1; /* Segment-Invalid Bit */
135 unsigned long cs
: 1; /* Common-Segment Bit */
136 unsigned long tt
: 2; /* Table-Type Bits */
140 union segment_table_entry
{
142 struct segment_entry_fc0 fc0
;
143 struct segment_entry_fc1 fc1
;
146 unsigned long fc
: 1; /* Format-Control */
148 unsigned long i
: 1; /* Segment-Invalid Bit */
149 unsigned long cs
: 1; /* Common-Segment Bit */
150 unsigned long tt
: 2; /* Table-Type Bits */
156 TABLE_TYPE_SEGMENT
= 0,
157 TABLE_TYPE_REGION3
= 1,
158 TABLE_TYPE_REGION2
= 2,
159 TABLE_TYPE_REGION1
= 3
162 union page_table_entry
{
165 unsigned long pfra
: 52; /* Page-Frame Real Address */
166 unsigned long z
: 1; /* Zero Bit */
167 unsigned long i
: 1; /* Page-Invalid Bit */
168 unsigned long p
: 1; /* DAT-Protection Bit */
169 unsigned long co
: 1; /* Change-Recording Override */
175 * vaddress union in order to easily decode a virtual address into its
176 * region first index, region second index etc. parts.
181 unsigned long rfx
: 11;
182 unsigned long rsx
: 11;
183 unsigned long rtx
: 11;
184 unsigned long sx
: 11;
185 unsigned long px
: 8;
186 unsigned long bx
: 12;
189 unsigned long rfx01
: 2;
191 unsigned long rsx01
: 2;
193 unsigned long rtx01
: 2;
195 unsigned long sx01
: 2;
201 * raddress union which will contain the result (real or absolute address)
202 * after a page table walk. The rfaa, sfaa and pfra members are used to
203 * simply assign them the value of a region, segment or page table entry.
207 unsigned long rfaa
: 33; /* Region-Frame Absolute Address */
208 unsigned long sfaa
: 44; /* Segment-Frame Absolute Address */
209 unsigned long pfra
: 52; /* Page-Frame Real Address */
232 unsigned long i
: 1; /* ALEN-Invalid Bit */
234 unsigned long fo
: 1; /* Fetch-Only Bit */
235 unsigned long p
: 1; /* Private Bit */
236 unsigned long alesn
: 8; /* Access-List-Entry Sequence Number */
237 unsigned long aleax
: 16; /* Access-List-Entry Authorization Index */
240 unsigned long asteo
: 25; /* ASN-Second-Table-Entry Origin */
242 unsigned long astesn
: 32; /* ASTE Sequence Number */
246 unsigned long i
: 1; /* ASX-Invalid Bit */
247 unsigned long ato
: 29; /* Authority-Table Origin */
249 unsigned long b
: 1; /* Base-Space Bit */
250 unsigned long ax
: 16; /* Authorization Index */
251 unsigned long atl
: 12; /* Authority-Table Length */
253 unsigned long ca
: 1; /* Controlled-ASN Bit */
254 unsigned long ra
: 1; /* Reusable-ASN Bit */
255 unsigned long asce
: 64; /* Address-Space-Control Element */
256 unsigned long ald
: 32;
257 unsigned long astesn
: 32;
258 /* .. more fields there */
261 int ipte_lock_held(struct kvm_vcpu
*vcpu
)
263 if (vcpu
->arch
.sie_block
->eca
& 1) {
266 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
267 rc
= kvm_s390_get_ipte_control(vcpu
->kvm
)->kh
!= 0;
268 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
271 return vcpu
->kvm
->arch
.ipte_lock_count
!= 0;
274 static void ipte_lock_simple(struct kvm_vcpu
*vcpu
)
276 union ipte_control old
, new, *ic
;
278 mutex_lock(&vcpu
->kvm
->arch
.ipte_mutex
);
279 vcpu
->kvm
->arch
.ipte_lock_count
++;
280 if (vcpu
->kvm
->arch
.ipte_lock_count
> 1)
283 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
284 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
286 old
= READ_ONCE(*ic
);
288 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
294 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
295 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
297 mutex_unlock(&vcpu
->kvm
->arch
.ipte_mutex
);
300 static void ipte_unlock_simple(struct kvm_vcpu
*vcpu
)
302 union ipte_control old
, new, *ic
;
304 mutex_lock(&vcpu
->kvm
->arch
.ipte_mutex
);
305 vcpu
->kvm
->arch
.ipte_lock_count
--;
306 if (vcpu
->kvm
->arch
.ipte_lock_count
)
308 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
309 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
311 old
= READ_ONCE(*ic
);
314 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
315 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
316 wake_up(&vcpu
->kvm
->arch
.ipte_wq
);
318 mutex_unlock(&vcpu
->kvm
->arch
.ipte_mutex
);
321 static void ipte_lock_siif(struct kvm_vcpu
*vcpu
)
323 union ipte_control old
, new, *ic
;
326 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
327 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
329 old
= READ_ONCE(*ic
);
331 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
338 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
339 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
342 static void ipte_unlock_siif(struct kvm_vcpu
*vcpu
)
344 union ipte_control old
, new, *ic
;
346 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
347 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
349 old
= READ_ONCE(*ic
);
354 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
355 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
357 wake_up(&vcpu
->kvm
->arch
.ipte_wq
);
360 void ipte_lock(struct kvm_vcpu
*vcpu
)
362 if (vcpu
->arch
.sie_block
->eca
& 1)
363 ipte_lock_siif(vcpu
);
365 ipte_lock_simple(vcpu
);
368 void ipte_unlock(struct kvm_vcpu
*vcpu
)
370 if (vcpu
->arch
.sie_block
->eca
& 1)
371 ipte_unlock_siif(vcpu
);
373 ipte_unlock_simple(vcpu
);
376 static int ar_translation(struct kvm_vcpu
*vcpu
, union asce
*asce
, ar_t ar
,
382 unsigned long ald_addr
, authority_table_addr
;
390 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
391 alet
.val
= vcpu
->run
->s
.regs
.acrs
[ar
];
393 if (ar
== 0 || alet
.val
== 0) {
394 asce
->val
= vcpu
->arch
.sie_block
->gcr
[1];
396 } else if (alet
.val
== 1) {
397 asce
->val
= vcpu
->arch
.sie_block
->gcr
[7];
402 return PGM_ALET_SPECIFICATION
;
405 ald_addr
= vcpu
->arch
.sie_block
->gcr
[5];
407 ald_addr
= vcpu
->arch
.sie_block
->gcr
[2];
408 ald_addr
&= 0x7fffffc0;
410 rc
= read_guest_real(vcpu
, ald_addr
+ 16, &ald
.val
, sizeof(union ald
));
414 if (alet
.alen
/ 8 > ald
.all
)
415 return PGM_ALEN_TRANSLATION
;
417 if (0x7fffffff - ald
.alo
* 128 < alet
.alen
* 16)
418 return PGM_ADDRESSING
;
420 rc
= read_guest_real(vcpu
, ald
.alo
* 128 + alet
.alen
* 16, &ale
,
426 return PGM_ALEN_TRANSLATION
;
427 if (ale
.alesn
!= alet
.alesn
)
428 return PGM_ALE_SEQUENCE
;
430 rc
= read_guest_real(vcpu
, ale
.asteo
* 64, &aste
, sizeof(struct aste
));
435 return PGM_ASTE_VALIDITY
;
436 if (aste
.astesn
!= ale
.astesn
)
437 return PGM_ASTE_SEQUENCE
;
440 eax
= (vcpu
->arch
.sie_block
->gcr
[8] >> 16) & 0xffff;
441 if (ale
.aleax
!= eax
) {
442 if (eax
/ 16 > aste
.atl
)
443 return PGM_EXTENDED_AUTHORITY
;
445 authority_table_addr
= aste
.ato
* 4 + eax
/ 4;
447 rc
= read_guest_real(vcpu
, authority_table_addr
,
453 if ((authority_table
& (0x40 >> ((eax
& 3) * 2))) == 0)
454 return PGM_EXTENDED_AUTHORITY
;
458 if (ale
.fo
== 1 && mode
== GACC_STORE
)
459 return PGM_PROTECTION
;
461 asce
->val
= aste
.asce
;
465 struct trans_exc_code_bits
{
466 unsigned long addr
: 52; /* Translation-exception Address */
467 unsigned long fsi
: 2; /* Access Exception Fetch/Store Indication */
469 unsigned long b60
: 1;
470 unsigned long b61
: 1;
471 unsigned long as
: 2; /* ASCE Identifier */
475 FSI_UNKNOWN
= 0, /* Unknown wether fetch or store */
476 FSI_STORE
= 1, /* Exception was due to store operation */
477 FSI_FETCH
= 2 /* Exception was due to fetch operation */
487 static int trans_exc(struct kvm_vcpu
*vcpu
, int code
, unsigned long gva
,
488 ar_t ar
, enum gacc_mode mode
, enum prot_type prot
)
490 struct kvm_s390_pgm_info
*pgm
= &vcpu
->arch
.pgm
;
491 struct trans_exc_code_bits
*tec
;
493 memset(pgm
, 0, sizeof(*pgm
));
495 tec
= (struct trans_exc_code_bits
*)&pgm
->trans_exc_code
;
499 case PGM_PAGE_TRANSLATION
:
500 case PGM_REGION_FIRST_TRANS
:
501 case PGM_REGION_SECOND_TRANS
:
502 case PGM_REGION_THIRD_TRANS
:
503 case PGM_SEGMENT_TRANSLATION
:
505 * op_access_id only applies to MOVE_PAGE -> set bit 61
506 * exc_access_id has to be set to 0 for some instructions. Both
507 * cases have to be handled by the caller. We can always store
508 * exc_access_id, as it is undefined for non-ar cases.
510 tec
->addr
= gva
>> PAGE_SHIFT
;
511 tec
->fsi
= mode
== GACC_STORE
? FSI_STORE
: FSI_FETCH
;
512 tec
->as
= psw_bits(vcpu
->arch
.sie_block
->gpsw
).as
;
514 case PGM_ALEN_TRANSLATION
:
515 case PGM_ALE_SEQUENCE
:
516 case PGM_ASTE_VALIDITY
:
517 case PGM_ASTE_SEQUENCE
:
518 case PGM_EXTENDED_AUTHORITY
:
519 pgm
->exc_access_id
= ar
;
528 tec
->addr
= gva
>> PAGE_SHIFT
;
529 tec
->fsi
= mode
== GACC_STORE
? FSI_STORE
: FSI_FETCH
;
530 tec
->as
= psw_bits(vcpu
->arch
.sie_block
->gpsw
).as
;
531 /* exc_access_id is undefined for most cases */
532 pgm
->exc_access_id
= ar
;
534 default: /* LA and KEYC set b61 to 0, other params undefined */
542 static int get_vcpu_asce(struct kvm_vcpu
*vcpu
, union asce
*asce
,
543 unsigned long ga
, ar_t ar
, enum gacc_mode mode
)
546 struct psw_bits psw
= psw_bits(vcpu
->arch
.sie_block
->gpsw
);
554 if (mode
== GACC_IFETCH
)
555 psw
.as
= psw
.as
== PSW_AS_HOME
? PSW_AS_HOME
: PSW_AS_PRIMARY
;
559 asce
->val
= vcpu
->arch
.sie_block
->gcr
[1];
561 case PSW_AS_SECONDARY
:
562 asce
->val
= vcpu
->arch
.sie_block
->gcr
[7];
565 asce
->val
= vcpu
->arch
.sie_block
->gcr
[13];
568 rc
= ar_translation(vcpu
, asce
, ar
, mode
);
570 return trans_exc(vcpu
, rc
, ga
, ar
, mode
, PROT_TYPE_ALC
);
576 static int deref_table(struct kvm
*kvm
, unsigned long gpa
, unsigned long *val
)
578 return kvm_read_guest(kvm
, gpa
, val
, sizeof(*val
));
582 * guest_translate - translate a guest virtual into a guest absolute address
584 * @gva: guest virtual address
585 * @gpa: points to where guest physical (absolute) address should be stored
586 * @asce: effective asce
587 * @mode: indicates the access mode to be used
589 * Translate a guest virtual address into a guest absolute address by means
590 * of dynamic address translation as specified by the architecture.
591 * If the resulting absolute address is not available in the configuration
592 * an addressing exception is indicated and @gpa will not be changed.
594 * Returns: - zero on success; @gpa contains the resulting absolute address
595 * - a negative value if guest access failed due to e.g. broken
597 * - a positve value if an access exception happened. In this case
598 * the returned value is the program interruption code as defined
599 * by the architecture
601 static unsigned long guest_translate(struct kvm_vcpu
*vcpu
, unsigned long gva
,
602 unsigned long *gpa
, const union asce asce
,
605 union vaddress vaddr
= {.addr
= gva
};
606 union raddress raddr
= {.addr
= gva
};
607 union page_table_entry pte
;
608 int dat_protection
= 0;
609 union ctlreg0 ctlreg0
;
613 ctlreg0
.val
= vcpu
->arch
.sie_block
->gcr
[0];
614 edat1
= ctlreg0
.edat
&& test_kvm_facility(vcpu
->kvm
, 8);
615 edat2
= edat1
&& test_kvm_facility(vcpu
->kvm
, 78);
618 ptr
= asce
.origin
* 4096;
620 case ASCE_TYPE_REGION1
:
621 if (vaddr
.rfx01
> asce
.tl
)
622 return PGM_REGION_FIRST_TRANS
;
623 ptr
+= vaddr
.rfx
* 8;
625 case ASCE_TYPE_REGION2
:
627 return PGM_ASCE_TYPE
;
628 if (vaddr
.rsx01
> asce
.tl
)
629 return PGM_REGION_SECOND_TRANS
;
630 ptr
+= vaddr
.rsx
* 8;
632 case ASCE_TYPE_REGION3
:
633 if (vaddr
.rfx
|| vaddr
.rsx
)
634 return PGM_ASCE_TYPE
;
635 if (vaddr
.rtx01
> asce
.tl
)
636 return PGM_REGION_THIRD_TRANS
;
637 ptr
+= vaddr
.rtx
* 8;
639 case ASCE_TYPE_SEGMENT
:
640 if (vaddr
.rfx
|| vaddr
.rsx
|| vaddr
.rtx
)
641 return PGM_ASCE_TYPE
;
642 if (vaddr
.sx01
> asce
.tl
)
643 return PGM_SEGMENT_TRANSLATION
;
648 case ASCE_TYPE_REGION1
: {
649 union region1_table_entry rfte
;
651 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
652 return PGM_ADDRESSING
;
653 if (deref_table(vcpu
->kvm
, ptr
, &rfte
.val
))
656 return PGM_REGION_FIRST_TRANS
;
657 if (rfte
.tt
!= TABLE_TYPE_REGION1
)
658 return PGM_TRANSLATION_SPEC
;
659 if (vaddr
.rsx01
< rfte
.tf
|| vaddr
.rsx01
> rfte
.tl
)
660 return PGM_REGION_SECOND_TRANS
;
662 dat_protection
|= rfte
.p
;
663 ptr
= rfte
.rto
* 4096 + vaddr
.rsx
* 8;
666 case ASCE_TYPE_REGION2
: {
667 union region2_table_entry rste
;
669 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
670 return PGM_ADDRESSING
;
671 if (deref_table(vcpu
->kvm
, ptr
, &rste
.val
))
674 return PGM_REGION_SECOND_TRANS
;
675 if (rste
.tt
!= TABLE_TYPE_REGION2
)
676 return PGM_TRANSLATION_SPEC
;
677 if (vaddr
.rtx01
< rste
.tf
|| vaddr
.rtx01
> rste
.tl
)
678 return PGM_REGION_THIRD_TRANS
;
680 dat_protection
|= rste
.p
;
681 ptr
= rste
.rto
* 4096 + vaddr
.rtx
* 8;
684 case ASCE_TYPE_REGION3
: {
685 union region3_table_entry rtte
;
687 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
688 return PGM_ADDRESSING
;
689 if (deref_table(vcpu
->kvm
, ptr
, &rtte
.val
))
692 return PGM_REGION_THIRD_TRANS
;
693 if (rtte
.tt
!= TABLE_TYPE_REGION3
)
694 return PGM_TRANSLATION_SPEC
;
695 if (rtte
.cr
&& asce
.p
&& edat2
)
696 return PGM_TRANSLATION_SPEC
;
697 if (rtte
.fc
&& edat2
) {
698 dat_protection
|= rtte
.fc1
.p
;
699 raddr
.rfaa
= rtte
.fc1
.rfaa
;
700 goto absolute_address
;
702 if (vaddr
.sx01
< rtte
.fc0
.tf
)
703 return PGM_SEGMENT_TRANSLATION
;
704 if (vaddr
.sx01
> rtte
.fc0
.tl
)
705 return PGM_SEGMENT_TRANSLATION
;
707 dat_protection
|= rtte
.fc0
.p
;
708 ptr
= rtte
.fc0
.sto
* 4096 + vaddr
.sx
* 8;
711 case ASCE_TYPE_SEGMENT
: {
712 union segment_table_entry ste
;
714 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
715 return PGM_ADDRESSING
;
716 if (deref_table(vcpu
->kvm
, ptr
, &ste
.val
))
719 return PGM_SEGMENT_TRANSLATION
;
720 if (ste
.tt
!= TABLE_TYPE_SEGMENT
)
721 return PGM_TRANSLATION_SPEC
;
722 if (ste
.cs
&& asce
.p
)
723 return PGM_TRANSLATION_SPEC
;
724 if (ste
.fc
&& edat1
) {
725 dat_protection
|= ste
.fc1
.p
;
726 raddr
.sfaa
= ste
.fc1
.sfaa
;
727 goto absolute_address
;
729 dat_protection
|= ste
.fc0
.p
;
730 ptr
= ste
.fc0
.pto
* 2048 + vaddr
.px
* 8;
733 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
734 return PGM_ADDRESSING
;
735 if (deref_table(vcpu
->kvm
, ptr
, &pte
.val
))
738 return PGM_PAGE_TRANSLATION
;
740 return PGM_TRANSLATION_SPEC
;
741 if (pte
.co
&& !edat1
)
742 return PGM_TRANSLATION_SPEC
;
743 dat_protection
|= pte
.p
;
744 raddr
.pfra
= pte
.pfra
;
746 raddr
.addr
= kvm_s390_real_to_abs(vcpu
, raddr
.addr
);
748 if (mode
== GACC_STORE
&& dat_protection
)
749 return PGM_PROTECTION
;
750 if (kvm_is_error_gpa(vcpu
->kvm
, raddr
.addr
))
751 return PGM_ADDRESSING
;
756 static inline int is_low_address(unsigned long ga
)
758 /* Check for address ranges 0..511 and 4096..4607 */
759 return (ga
& ~0x11fful
) == 0;
762 static int low_address_protection_enabled(struct kvm_vcpu
*vcpu
,
763 const union asce asce
)
765 union ctlreg0 ctlreg0
= {.val
= vcpu
->arch
.sie_block
->gcr
[0]};
766 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
770 if (psw_bits(*psw
).t
&& asce
.p
)
775 static int guest_page_range(struct kvm_vcpu
*vcpu
, unsigned long ga
, ar_t ar
,
776 unsigned long *pages
, unsigned long nr_pages
,
777 const union asce asce
, enum gacc_mode mode
)
779 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
780 int lap_enabled
, rc
= 0;
782 lap_enabled
= low_address_protection_enabled(vcpu
, asce
);
784 ga
= kvm_s390_logical_to_effective(vcpu
, ga
);
785 if (mode
== GACC_STORE
&& lap_enabled
&& is_low_address(ga
))
786 return trans_exc(vcpu
, PGM_PROTECTION
, ga
, ar
, mode
,
789 if (psw_bits(*psw
).t
) {
790 rc
= guest_translate(vcpu
, ga
, pages
, asce
, mode
);
794 *pages
= kvm_s390_real_to_abs(vcpu
, ga
);
795 if (kvm_is_error_gpa(vcpu
->kvm
, *pages
))
799 return trans_exc(vcpu
, rc
, ga
, ar
, mode
, PROT_TYPE_DAT
);
807 int access_guest(struct kvm_vcpu
*vcpu
, unsigned long ga
, ar_t ar
, void *data
,
808 unsigned long len
, enum gacc_mode mode
)
810 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
811 unsigned long _len
, nr_pages
, gpa
, idx
;
812 unsigned long pages_array
[2];
813 unsigned long *pages
;
820 ga
= kvm_s390_logical_to_effective(vcpu
, ga
);
821 rc
= get_vcpu_asce(vcpu
, &asce
, ga
, ar
, mode
);
824 nr_pages
= (((ga
& ~PAGE_MASK
) + len
- 1) >> PAGE_SHIFT
) + 1;
826 if (nr_pages
> ARRAY_SIZE(pages_array
))
827 pages
= vmalloc(nr_pages
* sizeof(unsigned long));
830 need_ipte_lock
= psw_bits(*psw
).t
&& !asce
.r
;
833 rc
= guest_page_range(vcpu
, ga
, ar
, pages
, nr_pages
, asce
, mode
);
834 for (idx
= 0; idx
< nr_pages
&& !rc
; idx
++) {
835 gpa
= *(pages
+ idx
) + (ga
& ~PAGE_MASK
);
836 _len
= min(PAGE_SIZE
- (gpa
& ~PAGE_MASK
), len
);
837 if (mode
== GACC_STORE
)
838 rc
= kvm_write_guest(vcpu
->kvm
, gpa
, data
, _len
);
840 rc
= kvm_read_guest(vcpu
->kvm
, gpa
, data
, _len
);
847 if (nr_pages
> ARRAY_SIZE(pages_array
))
852 int access_guest_real(struct kvm_vcpu
*vcpu
, unsigned long gra
,
853 void *data
, unsigned long len
, enum gacc_mode mode
)
855 unsigned long _len
, gpa
;
859 gpa
= kvm_s390_real_to_abs(vcpu
, gra
);
860 _len
= min(PAGE_SIZE
- (gpa
& ~PAGE_MASK
), len
);
862 rc
= write_guest_abs(vcpu
, gpa
, data
, _len
);
864 rc
= read_guest_abs(vcpu
, gpa
, data
, _len
);
873 * guest_translate_address - translate guest logical into guest absolute address
875 * Parameter semantics are the same as the ones from guest_translate.
876 * The memory contents at the guest address are not changed.
878 * Note: The IPTE lock is not taken during this function, so the caller
879 * has to take care of this.
881 int guest_translate_address(struct kvm_vcpu
*vcpu
, unsigned long gva
, ar_t ar
,
882 unsigned long *gpa
, enum gacc_mode mode
)
884 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
888 gva
= kvm_s390_logical_to_effective(vcpu
, gva
);
889 rc
= get_vcpu_asce(vcpu
, &asce
, gva
, ar
, mode
);
892 if (is_low_address(gva
) && low_address_protection_enabled(vcpu
, asce
)) {
893 if (mode
== GACC_STORE
)
894 return trans_exc(vcpu
, PGM_PROTECTION
, gva
, 0,
898 if (psw_bits(*psw
).t
&& !asce
.r
) { /* Use DAT? */
899 rc
= guest_translate(vcpu
, gva
, gpa
, asce
, mode
);
901 return trans_exc(vcpu
, rc
, gva
, 0, mode
, PROT_TYPE_DAT
);
903 *gpa
= kvm_s390_real_to_abs(vcpu
, gva
);
904 if (kvm_is_error_gpa(vcpu
->kvm
, *gpa
))
905 return trans_exc(vcpu
, rc
, gva
, PGM_ADDRESSING
, mode
, 0);
912 * check_gva_range - test a range of guest virtual addresses for accessibility
914 int check_gva_range(struct kvm_vcpu
*vcpu
, unsigned long gva
, ar_t ar
,
915 unsigned long length
, enum gacc_mode mode
)
918 unsigned long currlen
;
922 while (length
> 0 && !rc
) {
923 currlen
= min(length
, PAGE_SIZE
- (gva
% PAGE_SIZE
));
924 rc
= guest_translate_address(vcpu
, gva
, ar
, &gpa
, mode
);
934 * kvm_s390_check_low_addr_prot_real - check for low-address protection
935 * @gra: Guest real address
937 * Checks whether an address is subject to low-address protection and set
938 * up vcpu->arch.pgm accordingly if necessary.
940 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
942 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu
*vcpu
, unsigned long gra
)
944 union ctlreg0 ctlreg0
= {.val
= vcpu
->arch
.sie_block
->gcr
[0]};
946 if (!ctlreg0
.lap
|| !is_low_address(gra
))
948 return trans_exc(vcpu
, PGM_PROTECTION
, gra
, 0, GACC_STORE
, PROT_TYPE_LA
);
952 * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
953 * @sg: pointer to the shadow guest address space structure
954 * @saddr: faulting address in the shadow gmap
955 * @pgt: pointer to the page table address result
956 * @fake: pgt references contiguous guest memory block, not a pgtable
958 static int kvm_s390_shadow_tables(struct gmap
*sg
, unsigned long saddr
,
959 unsigned long *pgt
, int *dat_protection
,
964 union vaddress vaddr
;
972 asce
.val
= sg
->orig_asce
;
973 ptr
= asce
.origin
* 4096;
976 asce
.dt
= ASCE_TYPE_REGION1
;
979 case ASCE_TYPE_REGION1
:
980 if (vaddr
.rfx01
> asce
.tl
&& !asce
.r
)
981 return PGM_REGION_FIRST_TRANS
;
983 case ASCE_TYPE_REGION2
:
985 return PGM_ASCE_TYPE
;
986 if (vaddr
.rsx01
> asce
.tl
)
987 return PGM_REGION_SECOND_TRANS
;
989 case ASCE_TYPE_REGION3
:
990 if (vaddr
.rfx
|| vaddr
.rsx
)
991 return PGM_ASCE_TYPE
;
992 if (vaddr
.rtx01
> asce
.tl
)
993 return PGM_REGION_THIRD_TRANS
;
995 case ASCE_TYPE_SEGMENT
:
996 if (vaddr
.rfx
|| vaddr
.rsx
|| vaddr
.rtx
)
997 return PGM_ASCE_TYPE
;
998 if (vaddr
.sx01
> asce
.tl
)
999 return PGM_SEGMENT_TRANSLATION
;
1004 case ASCE_TYPE_REGION1
: {
1005 union region1_table_entry rfte
;
1008 /* offset in 16EB guest memory block */
1009 ptr
= ptr
+ ((unsigned long) vaddr
.rsx
<< 53UL);
1013 rc
= gmap_read_table(parent
, ptr
+ vaddr
.rfx
* 8, &rfte
.val
);
1017 return PGM_REGION_FIRST_TRANS
;
1018 if (rfte
.tt
!= TABLE_TYPE_REGION1
)
1019 return PGM_TRANSLATION_SPEC
;
1020 if (vaddr
.rsx01
< rfte
.tf
|| vaddr
.rsx01
> rfte
.tl
)
1021 return PGM_REGION_SECOND_TRANS
;
1022 if (sg
->edat_level
>= 1)
1023 *dat_protection
|= rfte
.p
;
1024 ptr
= rfte
.rto
<< 12UL;
1026 rc
= gmap_shadow_r2t(sg
, saddr
, rfte
.val
, *fake
);
1031 case ASCE_TYPE_REGION2
: {
1032 union region2_table_entry rste
;
1035 /* offset in 8PB guest memory block */
1036 ptr
= ptr
+ ((unsigned long) vaddr
.rtx
<< 42UL);
1040 rc
= gmap_read_table(parent
, ptr
+ vaddr
.rsx
* 8, &rste
.val
);
1044 return PGM_REGION_SECOND_TRANS
;
1045 if (rste
.tt
!= TABLE_TYPE_REGION2
)
1046 return PGM_TRANSLATION_SPEC
;
1047 if (vaddr
.rtx01
< rste
.tf
|| vaddr
.rtx01
> rste
.tl
)
1048 return PGM_REGION_THIRD_TRANS
;
1049 if (sg
->edat_level
>= 1)
1050 *dat_protection
|= rste
.p
;
1051 ptr
= rste
.rto
<< 12UL;
1053 rste
.p
|= *dat_protection
;
1054 rc
= gmap_shadow_r3t(sg
, saddr
, rste
.val
, *fake
);
1059 case ASCE_TYPE_REGION3
: {
1060 union region3_table_entry rtte
;
1063 /* offset in 4TB guest memory block */
1064 ptr
= ptr
+ ((unsigned long) vaddr
.sx
<< 31UL);
1068 rc
= gmap_read_table(parent
, ptr
+ vaddr
.rtx
* 8, &rtte
.val
);
1072 return PGM_REGION_THIRD_TRANS
;
1073 if (rtte
.tt
!= TABLE_TYPE_REGION3
)
1074 return PGM_TRANSLATION_SPEC
;
1075 if (rtte
.cr
&& asce
.p
&& sg
->edat_level
>= 2)
1076 return PGM_TRANSLATION_SPEC
;
1077 if (rtte
.fc
&& sg
->edat_level
>= 2) {
1078 *dat_protection
|= rtte
.fc0
.p
;
1080 ptr
= rtte
.fc1
.rfaa
<< 31UL;
1084 if (vaddr
.sx01
< rtte
.fc0
.tf
|| vaddr
.sx01
> rtte
.fc0
.tl
)
1085 return PGM_SEGMENT_TRANSLATION
;
1086 if (sg
->edat_level
>= 1)
1087 *dat_protection
|= rtte
.fc0
.p
;
1088 ptr
= rtte
.fc0
.sto
<< 12UL;
1090 rtte
.fc0
.p
|= *dat_protection
;
1091 rc
= gmap_shadow_sgt(sg
, saddr
, rtte
.val
, *fake
);
1096 case ASCE_TYPE_SEGMENT
: {
1097 union segment_table_entry ste
;
1100 /* offset in 2G guest memory block */
1101 ptr
= ptr
+ ((unsigned long) vaddr
.sx
<< 20UL);
1105 rc
= gmap_read_table(parent
, ptr
+ vaddr
.sx
* 8, &ste
.val
);
1109 return PGM_SEGMENT_TRANSLATION
;
1110 if (ste
.tt
!= TABLE_TYPE_SEGMENT
)
1111 return PGM_TRANSLATION_SPEC
;
1112 if (ste
.cs
&& asce
.p
)
1113 return PGM_TRANSLATION_SPEC
;
1114 *dat_protection
|= ste
.fc0
.p
;
1115 if (ste
.fc
&& sg
->edat_level
>= 1) {
1117 ptr
= ste
.fc1
.sfaa
<< 20UL;
1121 ptr
= ste
.fc0
.pto
<< 11UL;
1123 ste
.fc0
.p
|= *dat_protection
;
1124 rc
= gmap_shadow_pgt(sg
, saddr
, ste
.val
, *fake
);
1129 /* Return the parent address of the page table */
1135 * kvm_s390_shadow_fault - handle fault on a shadow page table
1136 * @vcpu: virtual cpu
1137 * @sg: pointer to the shadow guest address space structure
1138 * @saddr: faulting address in the shadow gmap
1140 * Returns: - 0 if the shadow fault was successfully resolved
1141 * - > 0 (pgm exception code) on exceptions while faulting
1142 * - -EAGAIN if the caller can retry immediately
1143 * - -EFAULT when accessing invalid guest addresses
1144 * - -ENOMEM if out of memory
1146 int kvm_s390_shadow_fault(struct kvm_vcpu
*vcpu
, struct gmap
*sg
,
1147 unsigned long saddr
)
1149 union vaddress vaddr
;
1150 union page_table_entry pte
;
1152 int dat_protection
, fake
;
1155 down_read(&sg
->mm
->mmap_sem
);
1157 * We don't want any guest-2 tables to change - so the parent
1158 * tables/pointers we read stay valid - unshadowing is however
1159 * always possible - only guest_table_lock protects us.
1163 rc
= gmap_shadow_pgt_lookup(sg
, saddr
, &pgt
, &dat_protection
, &fake
);
1165 rc
= kvm_s390_shadow_tables(sg
, saddr
, &pgt
, &dat_protection
,
1170 /* offset in 1MB guest memory block */
1171 pte
.val
= pgt
+ ((unsigned long) vaddr
.px
<< 12UL);
1175 rc
= gmap_read_table(sg
->parent
, pgt
+ vaddr
.px
* 8, &pte
.val
);
1177 rc
= PGM_PAGE_TRANSLATION
;
1178 if (!rc
&& (pte
.z
|| (pte
.co
&& sg
->edat_level
< 1)))
1179 rc
= PGM_TRANSLATION_SPEC
;
1181 pte
.p
|= dat_protection
;
1183 rc
= gmap_shadow_page(sg
, saddr
, __pte(pte
.val
));
1185 up_read(&sg
->mm
->mmap_sem
);