1 // SPDX-License-Identifier: GPL-2.0
3 * guest access functions
5 * Copyright IBM Corp. 2014
9 #include <linux/vmalloc.h>
10 #include <linux/mm_types.h>
11 #include <linux/err.h>
13 #include <asm/pgtable.h>
17 #include <asm/switch_to.h>
22 unsigned long origin
: 52; /* Region- or Segment-Table Origin */
24 unsigned long g
: 1; /* Subspace Group Control */
25 unsigned long p
: 1; /* Private Space Control */
26 unsigned long s
: 1; /* Storage-Alteration-Event Control */
27 unsigned long x
: 1; /* Space-Switch-Event Control */
28 unsigned long r
: 1; /* Real-Space Control */
30 unsigned long dt
: 2; /* Designation-Type Control */
31 unsigned long tl
: 2; /* Region- or Segment-Table Length */
36 ASCE_TYPE_SEGMENT
= 0,
37 ASCE_TYPE_REGION3
= 1,
38 ASCE_TYPE_REGION2
= 2,
42 union region1_table_entry
{
45 unsigned long rto
: 52;/* Region-Table Origin */
47 unsigned long p
: 1; /* DAT-Protection Bit */
49 unsigned long tf
: 2; /* Region-Second-Table Offset */
50 unsigned long i
: 1; /* Region-Invalid Bit */
52 unsigned long tt
: 2; /* Table-Type Bits */
53 unsigned long tl
: 2; /* Region-Second-Table Length */
57 union region2_table_entry
{
60 unsigned long rto
: 52;/* Region-Table Origin */
62 unsigned long p
: 1; /* DAT-Protection Bit */
64 unsigned long tf
: 2; /* Region-Third-Table Offset */
65 unsigned long i
: 1; /* Region-Invalid Bit */
67 unsigned long tt
: 2; /* Table-Type Bits */
68 unsigned long tl
: 2; /* Region-Third-Table Length */
72 struct region3_table_entry_fc0
{
73 unsigned long sto
: 52;/* Segment-Table Origin */
75 unsigned long fc
: 1; /* Format-Control */
76 unsigned long p
: 1; /* DAT-Protection Bit */
78 unsigned long tf
: 2; /* Segment-Table Offset */
79 unsigned long i
: 1; /* Region-Invalid Bit */
80 unsigned long cr
: 1; /* Common-Region Bit */
81 unsigned long tt
: 2; /* Table-Type Bits */
82 unsigned long tl
: 2; /* Segment-Table Length */
85 struct region3_table_entry_fc1
{
86 unsigned long rfaa
: 33; /* Region-Frame Absolute Address */
88 unsigned long av
: 1; /* ACCF-Validity Control */
89 unsigned long acc
: 4; /* Access-Control Bits */
90 unsigned long f
: 1; /* Fetch-Protection Bit */
91 unsigned long fc
: 1; /* Format-Control */
92 unsigned long p
: 1; /* DAT-Protection Bit */
93 unsigned long iep
: 1; /* Instruction-Execution-Protection */
95 unsigned long i
: 1; /* Region-Invalid Bit */
96 unsigned long cr
: 1; /* Common-Region Bit */
97 unsigned long tt
: 2; /* Table-Type Bits */
101 union region3_table_entry
{
103 struct region3_table_entry_fc0 fc0
;
104 struct region3_table_entry_fc1 fc1
;
107 unsigned long fc
: 1; /* Format-Control */
109 unsigned long i
: 1; /* Region-Invalid Bit */
110 unsigned long cr
: 1; /* Common-Region Bit */
111 unsigned long tt
: 2; /* Table-Type Bits */
116 struct segment_entry_fc0
{
117 unsigned long pto
: 53;/* Page-Table Origin */
118 unsigned long fc
: 1; /* Format-Control */
119 unsigned long p
: 1; /* DAT-Protection Bit */
121 unsigned long i
: 1; /* Segment-Invalid Bit */
122 unsigned long cs
: 1; /* Common-Segment Bit */
123 unsigned long tt
: 2; /* Table-Type Bits */
127 struct segment_entry_fc1
{
128 unsigned long sfaa
: 44; /* Segment-Frame Absolute Address */
130 unsigned long av
: 1; /* ACCF-Validity Control */
131 unsigned long acc
: 4; /* Access-Control Bits */
132 unsigned long f
: 1; /* Fetch-Protection Bit */
133 unsigned long fc
: 1; /* Format-Control */
134 unsigned long p
: 1; /* DAT-Protection Bit */
135 unsigned long iep
: 1; /* Instruction-Execution-Protection */
137 unsigned long i
: 1; /* Segment-Invalid Bit */
138 unsigned long cs
: 1; /* Common-Segment Bit */
139 unsigned long tt
: 2; /* Table-Type Bits */
143 union segment_table_entry
{
145 struct segment_entry_fc0 fc0
;
146 struct segment_entry_fc1 fc1
;
149 unsigned long fc
: 1; /* Format-Control */
151 unsigned long i
: 1; /* Segment-Invalid Bit */
152 unsigned long cs
: 1; /* Common-Segment Bit */
153 unsigned long tt
: 2; /* Table-Type Bits */
159 TABLE_TYPE_SEGMENT
= 0,
160 TABLE_TYPE_REGION3
= 1,
161 TABLE_TYPE_REGION2
= 2,
162 TABLE_TYPE_REGION1
= 3
165 union page_table_entry
{
168 unsigned long pfra
: 52; /* Page-Frame Real Address */
169 unsigned long z
: 1; /* Zero Bit */
170 unsigned long i
: 1; /* Page-Invalid Bit */
171 unsigned long p
: 1; /* DAT-Protection Bit */
172 unsigned long iep
: 1; /* Instruction-Execution-Protection */
178 * vaddress union in order to easily decode a virtual address into its
179 * region first index, region second index etc. parts.
184 unsigned long rfx
: 11;
185 unsigned long rsx
: 11;
186 unsigned long rtx
: 11;
187 unsigned long sx
: 11;
188 unsigned long px
: 8;
189 unsigned long bx
: 12;
192 unsigned long rfx01
: 2;
194 unsigned long rsx01
: 2;
196 unsigned long rtx01
: 2;
198 unsigned long sx01
: 2;
204 * raddress union which will contain the result (real or absolute address)
205 * after a page table walk. The rfaa, sfaa and pfra members are used to
206 * simply assign them the value of a region, segment or page table entry.
210 unsigned long rfaa
: 33; /* Region-Frame Absolute Address */
211 unsigned long sfaa
: 44; /* Segment-Frame Absolute Address */
212 unsigned long pfra
: 52; /* Page-Frame Real Address */
235 unsigned long i
: 1; /* ALEN-Invalid Bit */
237 unsigned long fo
: 1; /* Fetch-Only Bit */
238 unsigned long p
: 1; /* Private Bit */
239 unsigned long alesn
: 8; /* Access-List-Entry Sequence Number */
240 unsigned long aleax
: 16; /* Access-List-Entry Authorization Index */
243 unsigned long asteo
: 25; /* ASN-Second-Table-Entry Origin */
245 unsigned long astesn
: 32; /* ASTE Sequence Number */
249 unsigned long i
: 1; /* ASX-Invalid Bit */
250 unsigned long ato
: 29; /* Authority-Table Origin */
252 unsigned long b
: 1; /* Base-Space Bit */
253 unsigned long ax
: 16; /* Authorization Index */
254 unsigned long atl
: 12; /* Authority-Table Length */
256 unsigned long ca
: 1; /* Controlled-ASN Bit */
257 unsigned long ra
: 1; /* Reusable-ASN Bit */
258 unsigned long asce
: 64; /* Address-Space-Control Element */
259 unsigned long ald
: 32;
260 unsigned long astesn
: 32;
261 /* .. more fields there */
264 int ipte_lock_held(struct kvm_vcpu
*vcpu
)
266 if (vcpu
->arch
.sie_block
->eca
& ECA_SII
) {
269 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
270 rc
= kvm_s390_get_ipte_control(vcpu
->kvm
)->kh
!= 0;
271 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
274 return vcpu
->kvm
->arch
.ipte_lock_count
!= 0;
277 static void ipte_lock_simple(struct kvm_vcpu
*vcpu
)
279 union ipte_control old
, new, *ic
;
281 mutex_lock(&vcpu
->kvm
->arch
.ipte_mutex
);
282 vcpu
->kvm
->arch
.ipte_lock_count
++;
283 if (vcpu
->kvm
->arch
.ipte_lock_count
> 1)
286 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
287 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
289 old
= READ_ONCE(*ic
);
291 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
297 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
298 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
300 mutex_unlock(&vcpu
->kvm
->arch
.ipte_mutex
);
303 static void ipte_unlock_simple(struct kvm_vcpu
*vcpu
)
305 union ipte_control old
, new, *ic
;
307 mutex_lock(&vcpu
->kvm
->arch
.ipte_mutex
);
308 vcpu
->kvm
->arch
.ipte_lock_count
--;
309 if (vcpu
->kvm
->arch
.ipte_lock_count
)
311 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
312 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
314 old
= READ_ONCE(*ic
);
317 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
318 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
319 wake_up(&vcpu
->kvm
->arch
.ipte_wq
);
321 mutex_unlock(&vcpu
->kvm
->arch
.ipte_mutex
);
324 static void ipte_lock_siif(struct kvm_vcpu
*vcpu
)
326 union ipte_control old
, new, *ic
;
329 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
330 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
332 old
= READ_ONCE(*ic
);
334 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
341 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
342 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
345 static void ipte_unlock_siif(struct kvm_vcpu
*vcpu
)
347 union ipte_control old
, new, *ic
;
349 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
350 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
352 old
= READ_ONCE(*ic
);
357 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
358 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
360 wake_up(&vcpu
->kvm
->arch
.ipte_wq
);
363 void ipte_lock(struct kvm_vcpu
*vcpu
)
365 if (vcpu
->arch
.sie_block
->eca
& ECA_SII
)
366 ipte_lock_siif(vcpu
);
368 ipte_lock_simple(vcpu
);
371 void ipte_unlock(struct kvm_vcpu
*vcpu
)
373 if (vcpu
->arch
.sie_block
->eca
& ECA_SII
)
374 ipte_unlock_siif(vcpu
);
376 ipte_unlock_simple(vcpu
);
379 static int ar_translation(struct kvm_vcpu
*vcpu
, union asce
*asce
, u8 ar
,
385 unsigned long ald_addr
, authority_table_addr
;
393 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
394 alet
.val
= vcpu
->run
->s
.regs
.acrs
[ar
];
396 if (ar
== 0 || alet
.val
== 0) {
397 asce
->val
= vcpu
->arch
.sie_block
->gcr
[1];
399 } else if (alet
.val
== 1) {
400 asce
->val
= vcpu
->arch
.sie_block
->gcr
[7];
405 return PGM_ALET_SPECIFICATION
;
408 ald_addr
= vcpu
->arch
.sie_block
->gcr
[5];
410 ald_addr
= vcpu
->arch
.sie_block
->gcr
[2];
411 ald_addr
&= 0x7fffffc0;
413 rc
= read_guest_real(vcpu
, ald_addr
+ 16, &ald
.val
, sizeof(union ald
));
417 if (alet
.alen
/ 8 > ald
.all
)
418 return PGM_ALEN_TRANSLATION
;
420 if (0x7fffffff - ald
.alo
* 128 < alet
.alen
* 16)
421 return PGM_ADDRESSING
;
423 rc
= read_guest_real(vcpu
, ald
.alo
* 128 + alet
.alen
* 16, &ale
,
429 return PGM_ALEN_TRANSLATION
;
430 if (ale
.alesn
!= alet
.alesn
)
431 return PGM_ALE_SEQUENCE
;
433 rc
= read_guest_real(vcpu
, ale
.asteo
* 64, &aste
, sizeof(struct aste
));
438 return PGM_ASTE_VALIDITY
;
439 if (aste
.astesn
!= ale
.astesn
)
440 return PGM_ASTE_SEQUENCE
;
443 eax
= (vcpu
->arch
.sie_block
->gcr
[8] >> 16) & 0xffff;
444 if (ale
.aleax
!= eax
) {
445 if (eax
/ 16 > aste
.atl
)
446 return PGM_EXTENDED_AUTHORITY
;
448 authority_table_addr
= aste
.ato
* 4 + eax
/ 4;
450 rc
= read_guest_real(vcpu
, authority_table_addr
,
456 if ((authority_table
& (0x40 >> ((eax
& 3) * 2))) == 0)
457 return PGM_EXTENDED_AUTHORITY
;
461 if (ale
.fo
== 1 && mode
== GACC_STORE
)
462 return PGM_PROTECTION
;
464 asce
->val
= aste
.asce
;
468 struct trans_exc_code_bits
{
469 unsigned long addr
: 52; /* Translation-exception Address */
470 unsigned long fsi
: 2; /* Access Exception Fetch/Store Indication */
472 unsigned long b56
: 1;
474 unsigned long b60
: 1;
475 unsigned long b61
: 1;
476 unsigned long as
: 2; /* ASCE Identifier */
480 FSI_UNKNOWN
= 0, /* Unknown wether fetch or store */
481 FSI_STORE
= 1, /* Exception was due to store operation */
482 FSI_FETCH
= 2 /* Exception was due to fetch operation */
493 static int trans_exc(struct kvm_vcpu
*vcpu
, int code
, unsigned long gva
,
494 u8 ar
, enum gacc_mode mode
, enum prot_type prot
)
496 struct kvm_s390_pgm_info
*pgm
= &vcpu
->arch
.pgm
;
497 struct trans_exc_code_bits
*tec
;
499 memset(pgm
, 0, sizeof(*pgm
));
501 tec
= (struct trans_exc_code_bits
*)&pgm
->trans_exc_code
;
524 case PGM_PAGE_TRANSLATION
:
525 case PGM_REGION_FIRST_TRANS
:
526 case PGM_REGION_SECOND_TRANS
:
527 case PGM_REGION_THIRD_TRANS
:
528 case PGM_SEGMENT_TRANSLATION
:
530 * op_access_id only applies to MOVE_PAGE -> set bit 61
531 * exc_access_id has to be set to 0 for some instructions. Both
532 * cases have to be handled by the caller.
534 tec
->addr
= gva
>> PAGE_SHIFT
;
535 tec
->fsi
= mode
== GACC_STORE
? FSI_STORE
: FSI_FETCH
;
536 tec
->as
= psw_bits(vcpu
->arch
.sie_block
->gpsw
).as
;
538 case PGM_ALEN_TRANSLATION
:
539 case PGM_ALE_SEQUENCE
:
540 case PGM_ASTE_VALIDITY
:
541 case PGM_ASTE_SEQUENCE
:
542 case PGM_EXTENDED_AUTHORITY
:
544 * We can always store exc_access_id, as it is
545 * undefined for non-ar cases. It is undefined for
546 * most DAT protection exceptions.
548 pgm
->exc_access_id
= ar
;
554 static int get_vcpu_asce(struct kvm_vcpu
*vcpu
, union asce
*asce
,
555 unsigned long ga
, u8 ar
, enum gacc_mode mode
)
558 struct psw_bits psw
= psw_bits(vcpu
->arch
.sie_block
->gpsw
);
566 if ((mode
== GACC_IFETCH
) && (psw
.as
!= PSW_BITS_AS_HOME
))
567 psw
.as
= PSW_BITS_AS_PRIMARY
;
570 case PSW_BITS_AS_PRIMARY
:
571 asce
->val
= vcpu
->arch
.sie_block
->gcr
[1];
573 case PSW_BITS_AS_SECONDARY
:
574 asce
->val
= vcpu
->arch
.sie_block
->gcr
[7];
576 case PSW_BITS_AS_HOME
:
577 asce
->val
= vcpu
->arch
.sie_block
->gcr
[13];
579 case PSW_BITS_AS_ACCREG
:
580 rc
= ar_translation(vcpu
, asce
, ar
, mode
);
582 return trans_exc(vcpu
, rc
, ga
, ar
, mode
, PROT_TYPE_ALC
);
588 static int deref_table(struct kvm
*kvm
, unsigned long gpa
, unsigned long *val
)
590 return kvm_read_guest(kvm
, gpa
, val
, sizeof(*val
));
594 * guest_translate - translate a guest virtual into a guest absolute address
596 * @gva: guest virtual address
597 * @gpa: points to where guest physical (absolute) address should be stored
598 * @asce: effective asce
599 * @mode: indicates the access mode to be used
600 * @prot: returns the type for protection exceptions
602 * Translate a guest virtual address into a guest absolute address by means
603 * of dynamic address translation as specified by the architecture.
604 * If the resulting absolute address is not available in the configuration
605 * an addressing exception is indicated and @gpa will not be changed.
607 * Returns: - zero on success; @gpa contains the resulting absolute address
608 * - a negative value if guest access failed due to e.g. broken
610 * - a positve value if an access exception happened. In this case
611 * the returned value is the program interruption code as defined
612 * by the architecture
614 static unsigned long guest_translate(struct kvm_vcpu
*vcpu
, unsigned long gva
,
615 unsigned long *gpa
, const union asce asce
,
616 enum gacc_mode mode
, enum prot_type
*prot
)
618 union vaddress vaddr
= {.addr
= gva
};
619 union raddress raddr
= {.addr
= gva
};
620 union page_table_entry pte
;
621 int dat_protection
= 0;
622 int iep_protection
= 0;
623 union ctlreg0 ctlreg0
;
625 int edat1
, edat2
, iep
;
627 ctlreg0
.val
= vcpu
->arch
.sie_block
->gcr
[0];
628 edat1
= ctlreg0
.edat
&& test_kvm_facility(vcpu
->kvm
, 8);
629 edat2
= edat1
&& test_kvm_facility(vcpu
->kvm
, 78);
630 iep
= ctlreg0
.iep
&& test_kvm_facility(vcpu
->kvm
, 130);
633 ptr
= asce
.origin
* PAGE_SIZE
;
635 case ASCE_TYPE_REGION1
:
636 if (vaddr
.rfx01
> asce
.tl
)
637 return PGM_REGION_FIRST_TRANS
;
638 ptr
+= vaddr
.rfx
* 8;
640 case ASCE_TYPE_REGION2
:
642 return PGM_ASCE_TYPE
;
643 if (vaddr
.rsx01
> asce
.tl
)
644 return PGM_REGION_SECOND_TRANS
;
645 ptr
+= vaddr
.rsx
* 8;
647 case ASCE_TYPE_REGION3
:
648 if (vaddr
.rfx
|| vaddr
.rsx
)
649 return PGM_ASCE_TYPE
;
650 if (vaddr
.rtx01
> asce
.tl
)
651 return PGM_REGION_THIRD_TRANS
;
652 ptr
+= vaddr
.rtx
* 8;
654 case ASCE_TYPE_SEGMENT
:
655 if (vaddr
.rfx
|| vaddr
.rsx
|| vaddr
.rtx
)
656 return PGM_ASCE_TYPE
;
657 if (vaddr
.sx01
> asce
.tl
)
658 return PGM_SEGMENT_TRANSLATION
;
663 case ASCE_TYPE_REGION1
: {
664 union region1_table_entry rfte
;
666 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
667 return PGM_ADDRESSING
;
668 if (deref_table(vcpu
->kvm
, ptr
, &rfte
.val
))
671 return PGM_REGION_FIRST_TRANS
;
672 if (rfte
.tt
!= TABLE_TYPE_REGION1
)
673 return PGM_TRANSLATION_SPEC
;
674 if (vaddr
.rsx01
< rfte
.tf
|| vaddr
.rsx01
> rfte
.tl
)
675 return PGM_REGION_SECOND_TRANS
;
677 dat_protection
|= rfte
.p
;
678 ptr
= rfte
.rto
* PAGE_SIZE
+ vaddr
.rsx
* 8;
681 case ASCE_TYPE_REGION2
: {
682 union region2_table_entry rste
;
684 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
685 return PGM_ADDRESSING
;
686 if (deref_table(vcpu
->kvm
, ptr
, &rste
.val
))
689 return PGM_REGION_SECOND_TRANS
;
690 if (rste
.tt
!= TABLE_TYPE_REGION2
)
691 return PGM_TRANSLATION_SPEC
;
692 if (vaddr
.rtx01
< rste
.tf
|| vaddr
.rtx01
> rste
.tl
)
693 return PGM_REGION_THIRD_TRANS
;
695 dat_protection
|= rste
.p
;
696 ptr
= rste
.rto
* PAGE_SIZE
+ vaddr
.rtx
* 8;
699 case ASCE_TYPE_REGION3
: {
700 union region3_table_entry rtte
;
702 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
703 return PGM_ADDRESSING
;
704 if (deref_table(vcpu
->kvm
, ptr
, &rtte
.val
))
707 return PGM_REGION_THIRD_TRANS
;
708 if (rtte
.tt
!= TABLE_TYPE_REGION3
)
709 return PGM_TRANSLATION_SPEC
;
710 if (rtte
.cr
&& asce
.p
&& edat2
)
711 return PGM_TRANSLATION_SPEC
;
712 if (rtte
.fc
&& edat2
) {
713 dat_protection
|= rtte
.fc1
.p
;
714 iep_protection
= rtte
.fc1
.iep
;
715 raddr
.rfaa
= rtte
.fc1
.rfaa
;
716 goto absolute_address
;
718 if (vaddr
.sx01
< rtte
.fc0
.tf
)
719 return PGM_SEGMENT_TRANSLATION
;
720 if (vaddr
.sx01
> rtte
.fc0
.tl
)
721 return PGM_SEGMENT_TRANSLATION
;
723 dat_protection
|= rtte
.fc0
.p
;
724 ptr
= rtte
.fc0
.sto
* PAGE_SIZE
+ vaddr
.sx
* 8;
727 case ASCE_TYPE_SEGMENT
: {
728 union segment_table_entry ste
;
730 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
731 return PGM_ADDRESSING
;
732 if (deref_table(vcpu
->kvm
, ptr
, &ste
.val
))
735 return PGM_SEGMENT_TRANSLATION
;
736 if (ste
.tt
!= TABLE_TYPE_SEGMENT
)
737 return PGM_TRANSLATION_SPEC
;
738 if (ste
.cs
&& asce
.p
)
739 return PGM_TRANSLATION_SPEC
;
740 if (ste
.fc
&& edat1
) {
741 dat_protection
|= ste
.fc1
.p
;
742 iep_protection
= ste
.fc1
.iep
;
743 raddr
.sfaa
= ste
.fc1
.sfaa
;
744 goto absolute_address
;
746 dat_protection
|= ste
.fc0
.p
;
747 ptr
= ste
.fc0
.pto
* (PAGE_SIZE
/ 2) + vaddr
.px
* 8;
750 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
751 return PGM_ADDRESSING
;
752 if (deref_table(vcpu
->kvm
, ptr
, &pte
.val
))
755 return PGM_PAGE_TRANSLATION
;
757 return PGM_TRANSLATION_SPEC
;
758 dat_protection
|= pte
.p
;
759 iep_protection
= pte
.iep
;
760 raddr
.pfra
= pte
.pfra
;
762 raddr
.addr
= kvm_s390_real_to_abs(vcpu
, raddr
.addr
);
764 if (mode
== GACC_STORE
&& dat_protection
) {
765 *prot
= PROT_TYPE_DAT
;
766 return PGM_PROTECTION
;
768 if (mode
== GACC_IFETCH
&& iep_protection
&& iep
) {
769 *prot
= PROT_TYPE_IEP
;
770 return PGM_PROTECTION
;
772 if (kvm_is_error_gpa(vcpu
->kvm
, raddr
.addr
))
773 return PGM_ADDRESSING
;
778 static inline int is_low_address(unsigned long ga
)
780 /* Check for address ranges 0..511 and 4096..4607 */
781 return (ga
& ~0x11fful
) == 0;
784 static int low_address_protection_enabled(struct kvm_vcpu
*vcpu
,
785 const union asce asce
)
787 union ctlreg0 ctlreg0
= {.val
= vcpu
->arch
.sie_block
->gcr
[0]};
788 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
792 if (psw_bits(*psw
).dat
&& asce
.p
)
797 static int guest_page_range(struct kvm_vcpu
*vcpu
, unsigned long ga
, u8 ar
,
798 unsigned long *pages
, unsigned long nr_pages
,
799 const union asce asce
, enum gacc_mode mode
)
801 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
802 int lap_enabled
, rc
= 0;
805 lap_enabled
= low_address_protection_enabled(vcpu
, asce
);
807 ga
= kvm_s390_logical_to_effective(vcpu
, ga
);
808 if (mode
== GACC_STORE
&& lap_enabled
&& is_low_address(ga
))
809 return trans_exc(vcpu
, PGM_PROTECTION
, ga
, ar
, mode
,
812 if (psw_bits(*psw
).dat
) {
813 rc
= guest_translate(vcpu
, ga
, pages
, asce
, mode
, &prot
);
817 *pages
= kvm_s390_real_to_abs(vcpu
, ga
);
818 if (kvm_is_error_gpa(vcpu
->kvm
, *pages
))
822 return trans_exc(vcpu
, rc
, ga
, ar
, mode
, prot
);
830 int access_guest(struct kvm_vcpu
*vcpu
, unsigned long ga
, u8 ar
, void *data
,
831 unsigned long len
, enum gacc_mode mode
)
833 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
834 unsigned long _len
, nr_pages
, gpa
, idx
;
835 unsigned long pages_array
[2];
836 unsigned long *pages
;
843 ga
= kvm_s390_logical_to_effective(vcpu
, ga
);
844 rc
= get_vcpu_asce(vcpu
, &asce
, ga
, ar
, mode
);
847 nr_pages
= (((ga
& ~PAGE_MASK
) + len
- 1) >> PAGE_SHIFT
) + 1;
849 if (nr_pages
> ARRAY_SIZE(pages_array
))
850 pages
= vmalloc(array_size(nr_pages
, sizeof(unsigned long)));
853 need_ipte_lock
= psw_bits(*psw
).dat
&& !asce
.r
;
856 rc
= guest_page_range(vcpu
, ga
, ar
, pages
, nr_pages
, asce
, mode
);
857 for (idx
= 0; idx
< nr_pages
&& !rc
; idx
++) {
858 gpa
= *(pages
+ idx
) + (ga
& ~PAGE_MASK
);
859 _len
= min(PAGE_SIZE
- (gpa
& ~PAGE_MASK
), len
);
860 if (mode
== GACC_STORE
)
861 rc
= kvm_write_guest(vcpu
->kvm
, gpa
, data
, _len
);
863 rc
= kvm_read_guest(vcpu
->kvm
, gpa
, data
, _len
);
870 if (nr_pages
> ARRAY_SIZE(pages_array
))
875 int access_guest_real(struct kvm_vcpu
*vcpu
, unsigned long gra
,
876 void *data
, unsigned long len
, enum gacc_mode mode
)
878 unsigned long _len
, gpa
;
882 gpa
= kvm_s390_real_to_abs(vcpu
, gra
);
883 _len
= min(PAGE_SIZE
- (gpa
& ~PAGE_MASK
), len
);
885 rc
= write_guest_abs(vcpu
, gpa
, data
, _len
);
887 rc
= read_guest_abs(vcpu
, gpa
, data
, _len
);
896 * guest_translate_address - translate guest logical into guest absolute address
898 * Parameter semantics are the same as the ones from guest_translate.
899 * The memory contents at the guest address are not changed.
901 * Note: The IPTE lock is not taken during this function, so the caller
902 * has to take care of this.
904 int guest_translate_address(struct kvm_vcpu
*vcpu
, unsigned long gva
, u8 ar
,
905 unsigned long *gpa
, enum gacc_mode mode
)
907 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
912 gva
= kvm_s390_logical_to_effective(vcpu
, gva
);
913 rc
= get_vcpu_asce(vcpu
, &asce
, gva
, ar
, mode
);
916 if (is_low_address(gva
) && low_address_protection_enabled(vcpu
, asce
)) {
917 if (mode
== GACC_STORE
)
918 return trans_exc(vcpu
, PGM_PROTECTION
, gva
, 0,
922 if (psw_bits(*psw
).dat
&& !asce
.r
) { /* Use DAT? */
923 rc
= guest_translate(vcpu
, gva
, gpa
, asce
, mode
, &prot
);
925 return trans_exc(vcpu
, rc
, gva
, 0, mode
, prot
);
927 *gpa
= kvm_s390_real_to_abs(vcpu
, gva
);
928 if (kvm_is_error_gpa(vcpu
->kvm
, *gpa
))
929 return trans_exc(vcpu
, rc
, gva
, PGM_ADDRESSING
, mode
, 0);
936 * check_gva_range - test a range of guest virtual addresses for accessibility
938 int check_gva_range(struct kvm_vcpu
*vcpu
, unsigned long gva
, u8 ar
,
939 unsigned long length
, enum gacc_mode mode
)
942 unsigned long currlen
;
946 while (length
> 0 && !rc
) {
947 currlen
= min(length
, PAGE_SIZE
- (gva
% PAGE_SIZE
));
948 rc
= guest_translate_address(vcpu
, gva
, ar
, &gpa
, mode
);
958 * kvm_s390_check_low_addr_prot_real - check for low-address protection
959 * @gra: Guest real address
961 * Checks whether an address is subject to low-address protection and set
962 * up vcpu->arch.pgm accordingly if necessary.
964 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
966 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu
*vcpu
, unsigned long gra
)
968 union ctlreg0 ctlreg0
= {.val
= vcpu
->arch
.sie_block
->gcr
[0]};
970 if (!ctlreg0
.lap
|| !is_low_address(gra
))
972 return trans_exc(vcpu
, PGM_PROTECTION
, gra
, 0, GACC_STORE
, PROT_TYPE_LA
);
976 * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
977 * @sg: pointer to the shadow guest address space structure
978 * @saddr: faulting address in the shadow gmap
979 * @pgt: pointer to the page table address result
980 * @fake: pgt references contiguous guest memory block, not a pgtable
982 static int kvm_s390_shadow_tables(struct gmap
*sg
, unsigned long saddr
,
983 unsigned long *pgt
, int *dat_protection
,
988 union vaddress vaddr
;
996 asce
.val
= sg
->orig_asce
;
997 ptr
= asce
.origin
* PAGE_SIZE
;
1001 asce
.dt
= ASCE_TYPE_REGION1
;
1004 case ASCE_TYPE_REGION1
:
1005 if (vaddr
.rfx01
> asce
.tl
&& !*fake
)
1006 return PGM_REGION_FIRST_TRANS
;
1008 case ASCE_TYPE_REGION2
:
1010 return PGM_ASCE_TYPE
;
1011 if (vaddr
.rsx01
> asce
.tl
)
1012 return PGM_REGION_SECOND_TRANS
;
1014 case ASCE_TYPE_REGION3
:
1015 if (vaddr
.rfx
|| vaddr
.rsx
)
1016 return PGM_ASCE_TYPE
;
1017 if (vaddr
.rtx01
> asce
.tl
)
1018 return PGM_REGION_THIRD_TRANS
;
1020 case ASCE_TYPE_SEGMENT
:
1021 if (vaddr
.rfx
|| vaddr
.rsx
|| vaddr
.rtx
)
1022 return PGM_ASCE_TYPE
;
1023 if (vaddr
.sx01
> asce
.tl
)
1024 return PGM_SEGMENT_TRANSLATION
;
1029 case ASCE_TYPE_REGION1
: {
1030 union region1_table_entry rfte
;
1033 ptr
+= vaddr
.rfx
* _REGION1_SIZE
;
1037 rc
= gmap_read_table(parent
, ptr
+ vaddr
.rfx
* 8, &rfte
.val
);
1041 return PGM_REGION_FIRST_TRANS
;
1042 if (rfte
.tt
!= TABLE_TYPE_REGION1
)
1043 return PGM_TRANSLATION_SPEC
;
1044 if (vaddr
.rsx01
< rfte
.tf
|| vaddr
.rsx01
> rfte
.tl
)
1045 return PGM_REGION_SECOND_TRANS
;
1046 if (sg
->edat_level
>= 1)
1047 *dat_protection
|= rfte
.p
;
1048 ptr
= rfte
.rto
* PAGE_SIZE
;
1050 rc
= gmap_shadow_r2t(sg
, saddr
, rfte
.val
, *fake
);
1054 case ASCE_TYPE_REGION2
: {
1055 union region2_table_entry rste
;
1058 ptr
+= vaddr
.rsx
* _REGION2_SIZE
;
1062 rc
= gmap_read_table(parent
, ptr
+ vaddr
.rsx
* 8, &rste
.val
);
1066 return PGM_REGION_SECOND_TRANS
;
1067 if (rste
.tt
!= TABLE_TYPE_REGION2
)
1068 return PGM_TRANSLATION_SPEC
;
1069 if (vaddr
.rtx01
< rste
.tf
|| vaddr
.rtx01
> rste
.tl
)
1070 return PGM_REGION_THIRD_TRANS
;
1071 if (sg
->edat_level
>= 1)
1072 *dat_protection
|= rste
.p
;
1073 ptr
= rste
.rto
* PAGE_SIZE
;
1075 rste
.p
|= *dat_protection
;
1076 rc
= gmap_shadow_r3t(sg
, saddr
, rste
.val
, *fake
);
1080 case ASCE_TYPE_REGION3
: {
1081 union region3_table_entry rtte
;
1084 ptr
+= vaddr
.rtx
* _REGION3_SIZE
;
1088 rc
= gmap_read_table(parent
, ptr
+ vaddr
.rtx
* 8, &rtte
.val
);
1092 return PGM_REGION_THIRD_TRANS
;
1093 if (rtte
.tt
!= TABLE_TYPE_REGION3
)
1094 return PGM_TRANSLATION_SPEC
;
1095 if (rtte
.cr
&& asce
.p
&& sg
->edat_level
>= 2)
1096 return PGM_TRANSLATION_SPEC
;
1097 if (rtte
.fc
&& sg
->edat_level
>= 2) {
1098 *dat_protection
|= rtte
.fc0
.p
;
1100 ptr
= rtte
.fc1
.rfaa
* _REGION3_SIZE
;
1104 if (vaddr
.sx01
< rtte
.fc0
.tf
|| vaddr
.sx01
> rtte
.fc0
.tl
)
1105 return PGM_SEGMENT_TRANSLATION
;
1106 if (sg
->edat_level
>= 1)
1107 *dat_protection
|= rtte
.fc0
.p
;
1108 ptr
= rtte
.fc0
.sto
* PAGE_SIZE
;
1110 rtte
.fc0
.p
|= *dat_protection
;
1111 rc
= gmap_shadow_sgt(sg
, saddr
, rtte
.val
, *fake
);
1115 case ASCE_TYPE_SEGMENT
: {
1116 union segment_table_entry ste
;
1119 ptr
+= vaddr
.sx
* _SEGMENT_SIZE
;
1123 rc
= gmap_read_table(parent
, ptr
+ vaddr
.sx
* 8, &ste
.val
);
1127 return PGM_SEGMENT_TRANSLATION
;
1128 if (ste
.tt
!= TABLE_TYPE_SEGMENT
)
1129 return PGM_TRANSLATION_SPEC
;
1130 if (ste
.cs
&& asce
.p
)
1131 return PGM_TRANSLATION_SPEC
;
1132 *dat_protection
|= ste
.fc0
.p
;
1133 if (ste
.fc
&& sg
->edat_level
>= 1) {
1135 ptr
= ste
.fc1
.sfaa
* _SEGMENT_SIZE
;
1139 ptr
= ste
.fc0
.pto
* (PAGE_SIZE
/ 2);
1141 ste
.fc0
.p
|= *dat_protection
;
1142 rc
= gmap_shadow_pgt(sg
, saddr
, ste
.val
, *fake
);
1147 /* Return the parent address of the page table */
1153 * kvm_s390_shadow_fault - handle fault on a shadow page table
1154 * @vcpu: virtual cpu
1155 * @sg: pointer to the shadow guest address space structure
1156 * @saddr: faulting address in the shadow gmap
1158 * Returns: - 0 if the shadow fault was successfully resolved
1159 * - > 0 (pgm exception code) on exceptions while faulting
1160 * - -EAGAIN if the caller can retry immediately
1161 * - -EFAULT when accessing invalid guest addresses
1162 * - -ENOMEM if out of memory
1164 int kvm_s390_shadow_fault(struct kvm_vcpu
*vcpu
, struct gmap
*sg
,
1165 unsigned long saddr
)
1167 union vaddress vaddr
;
1168 union page_table_entry pte
;
1170 int dat_protection
, fake
;
1173 down_read(&sg
->mm
->mmap_sem
);
1175 * We don't want any guest-2 tables to change - so the parent
1176 * tables/pointers we read stay valid - unshadowing is however
1177 * always possible - only guest_table_lock protects us.
1181 rc
= gmap_shadow_pgt_lookup(sg
, saddr
, &pgt
, &dat_protection
, &fake
);
1183 rc
= kvm_s390_shadow_tables(sg
, saddr
, &pgt
, &dat_protection
,
1188 pte
.val
= pgt
+ vaddr
.px
* PAGE_SIZE
;
1192 rc
= gmap_read_table(sg
->parent
, pgt
+ vaddr
.px
* 8, &pte
.val
);
1194 rc
= PGM_PAGE_TRANSLATION
;
1196 rc
= PGM_TRANSLATION_SPEC
;
1198 pte
.p
|= dat_protection
;
1200 rc
= gmap_shadow_page(sg
, saddr
, __pte(pte
.val
));
1202 up_read(&sg
->mm
->mmap_sem
);