2 * guest access functions
4 * Copyright IBM Corp. 2014
8 #include <linux/vmalloc.h>
10 #include <asm/pgtable.h>
13 #include <asm/switch_to.h>
18 unsigned long origin
: 52; /* Region- or Segment-Table Origin */
20 unsigned long g
: 1; /* Subspace Group Control */
21 unsigned long p
: 1; /* Private Space Control */
22 unsigned long s
: 1; /* Storage-Alteration-Event Control */
23 unsigned long x
: 1; /* Space-Switch-Event Control */
24 unsigned long r
: 1; /* Real-Space Control */
26 unsigned long dt
: 2; /* Designation-Type Control */
27 unsigned long tl
: 2; /* Region- or Segment-Table Length */
32 ASCE_TYPE_SEGMENT
= 0,
33 ASCE_TYPE_REGION3
= 1,
34 ASCE_TYPE_REGION2
= 2,
38 union region1_table_entry
{
41 unsigned long rto
: 52;/* Region-Table Origin */
43 unsigned long p
: 1; /* DAT-Protection Bit */
45 unsigned long tf
: 2; /* Region-Second-Table Offset */
46 unsigned long i
: 1; /* Region-Invalid Bit */
48 unsigned long tt
: 2; /* Table-Type Bits */
49 unsigned long tl
: 2; /* Region-Second-Table Length */
53 union region2_table_entry
{
56 unsigned long rto
: 52;/* Region-Table Origin */
58 unsigned long p
: 1; /* DAT-Protection Bit */
60 unsigned long tf
: 2; /* Region-Third-Table Offset */
61 unsigned long i
: 1; /* Region-Invalid Bit */
63 unsigned long tt
: 2; /* Table-Type Bits */
64 unsigned long tl
: 2; /* Region-Third-Table Length */
68 struct region3_table_entry_fc0
{
69 unsigned long sto
: 52;/* Segment-Table Origin */
71 unsigned long fc
: 1; /* Format-Control */
72 unsigned long p
: 1; /* DAT-Protection Bit */
74 unsigned long tf
: 2; /* Segment-Table Offset */
75 unsigned long i
: 1; /* Region-Invalid Bit */
76 unsigned long cr
: 1; /* Common-Region Bit */
77 unsigned long tt
: 2; /* Table-Type Bits */
78 unsigned long tl
: 2; /* Segment-Table Length */
81 struct region3_table_entry_fc1
{
82 unsigned long rfaa
: 33; /* Region-Frame Absolute Address */
84 unsigned long av
: 1; /* ACCF-Validity Control */
85 unsigned long acc
: 4; /* Access-Control Bits */
86 unsigned long f
: 1; /* Fetch-Protection Bit */
87 unsigned long fc
: 1; /* Format-Control */
88 unsigned long p
: 1; /* DAT-Protection Bit */
89 unsigned long co
: 1; /* Change-Recording Override */
91 unsigned long i
: 1; /* Region-Invalid Bit */
92 unsigned long cr
: 1; /* Common-Region Bit */
93 unsigned long tt
: 2; /* Table-Type Bits */
97 union region3_table_entry
{
99 struct region3_table_entry_fc0 fc0
;
100 struct region3_table_entry_fc1 fc1
;
103 unsigned long fc
: 1; /* Format-Control */
105 unsigned long i
: 1; /* Region-Invalid Bit */
106 unsigned long cr
: 1; /* Common-Region Bit */
107 unsigned long tt
: 2; /* Table-Type Bits */
112 struct segment_entry_fc0
{
113 unsigned long pto
: 53;/* Page-Table Origin */
114 unsigned long fc
: 1; /* Format-Control */
115 unsigned long p
: 1; /* DAT-Protection Bit */
117 unsigned long i
: 1; /* Segment-Invalid Bit */
118 unsigned long cs
: 1; /* Common-Segment Bit */
119 unsigned long tt
: 2; /* Table-Type Bits */
123 struct segment_entry_fc1
{
124 unsigned long sfaa
: 44; /* Segment-Frame Absolute Address */
126 unsigned long av
: 1; /* ACCF-Validity Control */
127 unsigned long acc
: 4; /* Access-Control Bits */
128 unsigned long f
: 1; /* Fetch-Protection Bit */
129 unsigned long fc
: 1; /* Format-Control */
130 unsigned long p
: 1; /* DAT-Protection Bit */
131 unsigned long co
: 1; /* Change-Recording Override */
133 unsigned long i
: 1; /* Segment-Invalid Bit */
134 unsigned long cs
: 1; /* Common-Segment Bit */
135 unsigned long tt
: 2; /* Table-Type Bits */
139 union segment_table_entry
{
141 struct segment_entry_fc0 fc0
;
142 struct segment_entry_fc1 fc1
;
145 unsigned long fc
: 1; /* Format-Control */
147 unsigned long i
: 1; /* Segment-Invalid Bit */
148 unsigned long cs
: 1; /* Common-Segment Bit */
149 unsigned long tt
: 2; /* Table-Type Bits */
155 TABLE_TYPE_SEGMENT
= 0,
156 TABLE_TYPE_REGION3
= 1,
157 TABLE_TYPE_REGION2
= 2,
158 TABLE_TYPE_REGION1
= 3
161 union page_table_entry
{
164 unsigned long pfra
: 52; /* Page-Frame Real Address */
165 unsigned long z
: 1; /* Zero Bit */
166 unsigned long i
: 1; /* Page-Invalid Bit */
167 unsigned long p
: 1; /* DAT-Protection Bit */
168 unsigned long co
: 1; /* Change-Recording Override */
174 * vaddress union in order to easily decode a virtual address into its
175 * region first index, region second index etc. parts.
180 unsigned long rfx
: 11;
181 unsigned long rsx
: 11;
182 unsigned long rtx
: 11;
183 unsigned long sx
: 11;
184 unsigned long px
: 8;
185 unsigned long bx
: 12;
188 unsigned long rfx01
: 2;
190 unsigned long rsx01
: 2;
192 unsigned long rtx01
: 2;
194 unsigned long sx01
: 2;
200 * raddress union which will contain the result (real or absolute address)
201 * after a page table walk. The rfaa, sfaa and pfra members are used to
202 * simply assign them the value of a region, segment or page table entry.
206 unsigned long rfaa
: 33; /* Region-Frame Absolute Address */
207 unsigned long sfaa
: 44; /* Segment-Frame Absolute Address */
208 unsigned long pfra
: 52; /* Page-Frame Real Address */
231 unsigned long i
: 1; /* ALEN-Invalid Bit */
233 unsigned long fo
: 1; /* Fetch-Only Bit */
234 unsigned long p
: 1; /* Private Bit */
235 unsigned long alesn
: 8; /* Access-List-Entry Sequence Number */
236 unsigned long aleax
: 16; /* Access-List-Entry Authorization Index */
239 unsigned long asteo
: 25; /* ASN-Second-Table-Entry Origin */
241 unsigned long astesn
: 32; /* ASTE Sequence Number */
245 unsigned long i
: 1; /* ASX-Invalid Bit */
246 unsigned long ato
: 29; /* Authority-Table Origin */
248 unsigned long b
: 1; /* Base-Space Bit */
249 unsigned long ax
: 16; /* Authorization Index */
250 unsigned long atl
: 12; /* Authority-Table Length */
252 unsigned long ca
: 1; /* Controlled-ASN Bit */
253 unsigned long ra
: 1; /* Reusable-ASN Bit */
254 unsigned long asce
: 64; /* Address-Space-Control Element */
255 unsigned long ald
: 32;
256 unsigned long astesn
: 32;
257 /* .. more fields there */
260 int ipte_lock_held(struct kvm_vcpu
*vcpu
)
262 if (vcpu
->arch
.sie_block
->eca
& 1) {
265 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
266 rc
= kvm_s390_get_ipte_control(vcpu
->kvm
)->kh
!= 0;
267 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
270 return vcpu
->kvm
->arch
.ipte_lock_count
!= 0;
273 static void ipte_lock_simple(struct kvm_vcpu
*vcpu
)
275 union ipte_control old
, new, *ic
;
277 mutex_lock(&vcpu
->kvm
->arch
.ipte_mutex
);
278 vcpu
->kvm
->arch
.ipte_lock_count
++;
279 if (vcpu
->kvm
->arch
.ipte_lock_count
> 1)
282 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
283 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
285 old
= READ_ONCE(*ic
);
287 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
293 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
294 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
296 mutex_unlock(&vcpu
->kvm
->arch
.ipte_mutex
);
299 static void ipte_unlock_simple(struct kvm_vcpu
*vcpu
)
301 union ipte_control old
, new, *ic
;
303 mutex_lock(&vcpu
->kvm
->arch
.ipte_mutex
);
304 vcpu
->kvm
->arch
.ipte_lock_count
--;
305 if (vcpu
->kvm
->arch
.ipte_lock_count
)
307 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
308 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
310 old
= READ_ONCE(*ic
);
313 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
314 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
315 wake_up(&vcpu
->kvm
->arch
.ipte_wq
);
317 mutex_unlock(&vcpu
->kvm
->arch
.ipte_mutex
);
320 static void ipte_lock_siif(struct kvm_vcpu
*vcpu
)
322 union ipte_control old
, new, *ic
;
325 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
326 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
328 old
= READ_ONCE(*ic
);
330 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
337 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
338 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
341 static void ipte_unlock_siif(struct kvm_vcpu
*vcpu
)
343 union ipte_control old
, new, *ic
;
345 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
346 ic
= kvm_s390_get_ipte_control(vcpu
->kvm
);
348 old
= READ_ONCE(*ic
);
353 } while (cmpxchg(&ic
->val
, old
.val
, new.val
) != old
.val
);
354 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
356 wake_up(&vcpu
->kvm
->arch
.ipte_wq
);
359 void ipte_lock(struct kvm_vcpu
*vcpu
)
361 if (vcpu
->arch
.sie_block
->eca
& 1)
362 ipte_lock_siif(vcpu
);
364 ipte_lock_simple(vcpu
);
367 void ipte_unlock(struct kvm_vcpu
*vcpu
)
369 if (vcpu
->arch
.sie_block
->eca
& 1)
370 ipte_unlock_siif(vcpu
);
372 ipte_unlock_simple(vcpu
);
375 static int ar_translation(struct kvm_vcpu
*vcpu
, union asce
*asce
, ar_t ar
,
381 unsigned long ald_addr
, authority_table_addr
;
389 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
390 alet
.val
= vcpu
->run
->s
.regs
.acrs
[ar
];
392 if (ar
== 0 || alet
.val
== 0) {
393 asce
->val
= vcpu
->arch
.sie_block
->gcr
[1];
395 } else if (alet
.val
== 1) {
396 asce
->val
= vcpu
->arch
.sie_block
->gcr
[7];
401 return PGM_ALET_SPECIFICATION
;
404 ald_addr
= vcpu
->arch
.sie_block
->gcr
[5];
406 ald_addr
= vcpu
->arch
.sie_block
->gcr
[2];
407 ald_addr
&= 0x7fffffc0;
409 rc
= read_guest_real(vcpu
, ald_addr
+ 16, &ald
.val
, sizeof(union ald
));
413 if (alet
.alen
/ 8 > ald
.all
)
414 return PGM_ALEN_TRANSLATION
;
416 if (0x7fffffff - ald
.alo
* 128 < alet
.alen
* 16)
417 return PGM_ADDRESSING
;
419 rc
= read_guest_real(vcpu
, ald
.alo
* 128 + alet
.alen
* 16, &ale
,
425 return PGM_ALEN_TRANSLATION
;
426 if (ale
.alesn
!= alet
.alesn
)
427 return PGM_ALE_SEQUENCE
;
429 rc
= read_guest_real(vcpu
, ale
.asteo
* 64, &aste
, sizeof(struct aste
));
434 return PGM_ASTE_VALIDITY
;
435 if (aste
.astesn
!= ale
.astesn
)
436 return PGM_ASTE_SEQUENCE
;
439 eax
= (vcpu
->arch
.sie_block
->gcr
[8] >> 16) & 0xffff;
440 if (ale
.aleax
!= eax
) {
441 if (eax
/ 16 > aste
.atl
)
442 return PGM_EXTENDED_AUTHORITY
;
444 authority_table_addr
= aste
.ato
* 4 + eax
/ 4;
446 rc
= read_guest_real(vcpu
, authority_table_addr
,
452 if ((authority_table
& (0x40 >> ((eax
& 3) * 2))) == 0)
453 return PGM_EXTENDED_AUTHORITY
;
457 if (ale
.fo
== 1 && mode
== GACC_STORE
)
458 return PGM_PROTECTION
;
460 asce
->val
= aste
.asce
;
464 struct trans_exc_code_bits
{
465 unsigned long addr
: 52; /* Translation-exception Address */
466 unsigned long fsi
: 2; /* Access Exception Fetch/Store Indication */
468 unsigned long b60
: 1;
469 unsigned long b61
: 1;
470 unsigned long as
: 2; /* ASCE Identifier */
474 FSI_UNKNOWN
= 0, /* Unknown wether fetch or store */
475 FSI_STORE
= 1, /* Exception was due to store operation */
476 FSI_FETCH
= 2 /* Exception was due to fetch operation */
479 static int get_vcpu_asce(struct kvm_vcpu
*vcpu
, union asce
*asce
,
480 ar_t ar
, enum gacc_mode mode
)
483 struct psw_bits psw
= psw_bits(vcpu
->arch
.sie_block
->gpsw
);
484 struct kvm_s390_pgm_info
*pgm
= &vcpu
->arch
.pgm
;
485 struct trans_exc_code_bits
*tec_bits
;
487 memset(pgm
, 0, sizeof(*pgm
));
488 tec_bits
= (struct trans_exc_code_bits
*)&pgm
->trans_exc_code
;
489 tec_bits
->fsi
= mode
== GACC_STORE
? FSI_STORE
: FSI_FETCH
;
490 tec_bits
->as
= psw
.as
;
498 if (mode
== GACC_IFETCH
)
499 psw
.as
= psw
.as
== PSW_AS_HOME
? PSW_AS_HOME
: PSW_AS_PRIMARY
;
503 asce
->val
= vcpu
->arch
.sie_block
->gcr
[1];
505 case PSW_AS_SECONDARY
:
506 asce
->val
= vcpu
->arch
.sie_block
->gcr
[7];
509 asce
->val
= vcpu
->arch
.sie_block
->gcr
[13];
512 rc
= ar_translation(vcpu
, asce
, ar
, mode
);
514 case PGM_ALEN_TRANSLATION
:
515 case PGM_ALE_SEQUENCE
:
516 case PGM_ASTE_VALIDITY
:
517 case PGM_ASTE_SEQUENCE
:
518 case PGM_EXTENDED_AUTHORITY
:
519 vcpu
->arch
.pgm
.exc_access_id
= ar
;
533 static int deref_table(struct kvm
*kvm
, unsigned long gpa
, unsigned long *val
)
535 return kvm_read_guest(kvm
, gpa
, val
, sizeof(*val
));
539 * guest_translate - translate a guest virtual into a guest absolute address
541 * @gva: guest virtual address
542 * @gpa: points to where guest physical (absolute) address should be stored
543 * @asce: effective asce
544 * @mode: indicates the access mode to be used
546 * Translate a guest virtual address into a guest absolute address by means
547 * of dynamic address translation as specified by the architecture.
548 * If the resulting absolute address is not available in the configuration
549 * an addressing exception is indicated and @gpa will not be changed.
551 * Returns: - zero on success; @gpa contains the resulting absolute address
552 * - a negative value if guest access failed due to e.g. broken
554 * - a positve value if an access exception happened. In this case
555 * the returned value is the program interruption code as defined
556 * by the architecture
558 static unsigned long guest_translate(struct kvm_vcpu
*vcpu
, unsigned long gva
,
559 unsigned long *gpa
, const union asce asce
,
562 union vaddress vaddr
= {.addr
= gva
};
563 union raddress raddr
= {.addr
= gva
};
564 union page_table_entry pte
;
565 int dat_protection
= 0;
566 union ctlreg0 ctlreg0
;
570 ctlreg0
.val
= vcpu
->arch
.sie_block
->gcr
[0];
571 edat1
= ctlreg0
.edat
&& test_kvm_facility(vcpu
->kvm
, 8);
572 edat2
= edat1
&& test_kvm_facility(vcpu
->kvm
, 78);
575 ptr
= asce
.origin
* 4096;
577 case ASCE_TYPE_REGION1
:
578 if (vaddr
.rfx01
> asce
.tl
)
579 return PGM_REGION_FIRST_TRANS
;
580 ptr
+= vaddr
.rfx
* 8;
582 case ASCE_TYPE_REGION2
:
584 return PGM_ASCE_TYPE
;
585 if (vaddr
.rsx01
> asce
.tl
)
586 return PGM_REGION_SECOND_TRANS
;
587 ptr
+= vaddr
.rsx
* 8;
589 case ASCE_TYPE_REGION3
:
590 if (vaddr
.rfx
|| vaddr
.rsx
)
591 return PGM_ASCE_TYPE
;
592 if (vaddr
.rtx01
> asce
.tl
)
593 return PGM_REGION_THIRD_TRANS
;
594 ptr
+= vaddr
.rtx
* 8;
596 case ASCE_TYPE_SEGMENT
:
597 if (vaddr
.rfx
|| vaddr
.rsx
|| vaddr
.rtx
)
598 return PGM_ASCE_TYPE
;
599 if (vaddr
.sx01
> asce
.tl
)
600 return PGM_SEGMENT_TRANSLATION
;
605 case ASCE_TYPE_REGION1
: {
606 union region1_table_entry rfte
;
608 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
609 return PGM_ADDRESSING
;
610 if (deref_table(vcpu
->kvm
, ptr
, &rfte
.val
))
613 return PGM_REGION_FIRST_TRANS
;
614 if (rfte
.tt
!= TABLE_TYPE_REGION1
)
615 return PGM_TRANSLATION_SPEC
;
616 if (vaddr
.rsx01
< rfte
.tf
|| vaddr
.rsx01
> rfte
.tl
)
617 return PGM_REGION_SECOND_TRANS
;
619 dat_protection
|= rfte
.p
;
620 ptr
= rfte
.rto
* 4096 + vaddr
.rsx
* 8;
623 case ASCE_TYPE_REGION2
: {
624 union region2_table_entry rste
;
626 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
627 return PGM_ADDRESSING
;
628 if (deref_table(vcpu
->kvm
, ptr
, &rste
.val
))
631 return PGM_REGION_SECOND_TRANS
;
632 if (rste
.tt
!= TABLE_TYPE_REGION2
)
633 return PGM_TRANSLATION_SPEC
;
634 if (vaddr
.rtx01
< rste
.tf
|| vaddr
.rtx01
> rste
.tl
)
635 return PGM_REGION_THIRD_TRANS
;
637 dat_protection
|= rste
.p
;
638 ptr
= rste
.rto
* 4096 + vaddr
.rtx
* 8;
641 case ASCE_TYPE_REGION3
: {
642 union region3_table_entry rtte
;
644 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
645 return PGM_ADDRESSING
;
646 if (deref_table(vcpu
->kvm
, ptr
, &rtte
.val
))
649 return PGM_REGION_THIRD_TRANS
;
650 if (rtte
.tt
!= TABLE_TYPE_REGION3
)
651 return PGM_TRANSLATION_SPEC
;
652 if (rtte
.cr
&& asce
.p
&& edat2
)
653 return PGM_TRANSLATION_SPEC
;
654 if (rtte
.fc
&& edat2
) {
655 dat_protection
|= rtte
.fc1
.p
;
656 raddr
.rfaa
= rtte
.fc1
.rfaa
;
657 goto absolute_address
;
659 if (vaddr
.sx01
< rtte
.fc0
.tf
)
660 return PGM_SEGMENT_TRANSLATION
;
661 if (vaddr
.sx01
> rtte
.fc0
.tl
)
662 return PGM_SEGMENT_TRANSLATION
;
664 dat_protection
|= rtte
.fc0
.p
;
665 ptr
= rtte
.fc0
.sto
* 4096 + vaddr
.sx
* 8;
668 case ASCE_TYPE_SEGMENT
: {
669 union segment_table_entry ste
;
671 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
672 return PGM_ADDRESSING
;
673 if (deref_table(vcpu
->kvm
, ptr
, &ste
.val
))
676 return PGM_SEGMENT_TRANSLATION
;
677 if (ste
.tt
!= TABLE_TYPE_SEGMENT
)
678 return PGM_TRANSLATION_SPEC
;
679 if (ste
.cs
&& asce
.p
)
680 return PGM_TRANSLATION_SPEC
;
681 if (ste
.fc
&& edat1
) {
682 dat_protection
|= ste
.fc1
.p
;
683 raddr
.sfaa
= ste
.fc1
.sfaa
;
684 goto absolute_address
;
686 dat_protection
|= ste
.fc0
.p
;
687 ptr
= ste
.fc0
.pto
* 2048 + vaddr
.px
* 8;
690 if (kvm_is_error_gpa(vcpu
->kvm
, ptr
))
691 return PGM_ADDRESSING
;
692 if (deref_table(vcpu
->kvm
, ptr
, &pte
.val
))
695 return PGM_PAGE_TRANSLATION
;
697 return PGM_TRANSLATION_SPEC
;
698 if (pte
.co
&& !edat1
)
699 return PGM_TRANSLATION_SPEC
;
700 dat_protection
|= pte
.p
;
701 raddr
.pfra
= pte
.pfra
;
703 raddr
.addr
= kvm_s390_real_to_abs(vcpu
, raddr
.addr
);
705 if (mode
== GACC_STORE
&& dat_protection
)
706 return PGM_PROTECTION
;
707 if (kvm_is_error_gpa(vcpu
->kvm
, raddr
.addr
))
708 return PGM_ADDRESSING
;
713 static inline int is_low_address(unsigned long ga
)
715 /* Check for address ranges 0..511 and 4096..4607 */
716 return (ga
& ~0x11fful
) == 0;
719 static int low_address_protection_enabled(struct kvm_vcpu
*vcpu
,
720 const union asce asce
)
722 union ctlreg0 ctlreg0
= {.val
= vcpu
->arch
.sie_block
->gcr
[0]};
723 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
727 if (psw_bits(*psw
).t
&& asce
.p
)
732 static int guest_page_range(struct kvm_vcpu
*vcpu
, unsigned long ga
,
733 unsigned long *pages
, unsigned long nr_pages
,
734 const union asce asce
, enum gacc_mode mode
)
736 struct kvm_s390_pgm_info
*pgm
= &vcpu
->arch
.pgm
;
737 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
738 struct trans_exc_code_bits
*tec_bits
;
741 tec_bits
= (struct trans_exc_code_bits
*)&pgm
->trans_exc_code
;
742 lap_enabled
= low_address_protection_enabled(vcpu
, asce
);
744 ga
= kvm_s390_logical_to_effective(vcpu
, ga
);
745 tec_bits
->addr
= ga
>> PAGE_SHIFT
;
746 if (mode
== GACC_STORE
&& lap_enabled
&& is_low_address(ga
)) {
747 pgm
->code
= PGM_PROTECTION
;
751 if (psw_bits(*psw
).t
) {
752 rc
= guest_translate(vcpu
, ga
, pages
, asce
, mode
);
755 if (rc
== PGM_PROTECTION
)
760 *pages
= kvm_s390_real_to_abs(vcpu
, ga
);
761 if (kvm_is_error_gpa(vcpu
->kvm
, *pages
))
762 pgm
->code
= PGM_ADDRESSING
;
773 int access_guest(struct kvm_vcpu
*vcpu
, unsigned long ga
, ar_t ar
, void *data
,
774 unsigned long len
, enum gacc_mode mode
)
776 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
777 unsigned long _len
, nr_pages
, gpa
, idx
;
778 unsigned long pages_array
[2];
779 unsigned long *pages
;
786 rc
= get_vcpu_asce(vcpu
, &asce
, ar
, mode
);
789 nr_pages
= (((ga
& ~PAGE_MASK
) + len
- 1) >> PAGE_SHIFT
) + 1;
791 if (nr_pages
> ARRAY_SIZE(pages_array
))
792 pages
= vmalloc(nr_pages
* sizeof(unsigned long));
795 need_ipte_lock
= psw_bits(*psw
).t
&& !asce
.r
;
798 rc
= guest_page_range(vcpu
, ga
, pages
, nr_pages
, asce
, mode
);
799 for (idx
= 0; idx
< nr_pages
&& !rc
; idx
++) {
800 gpa
= *(pages
+ idx
) + (ga
& ~PAGE_MASK
);
801 _len
= min(PAGE_SIZE
- (gpa
& ~PAGE_MASK
), len
);
802 if (mode
== GACC_STORE
)
803 rc
= kvm_write_guest(vcpu
->kvm
, gpa
, data
, _len
);
805 rc
= kvm_read_guest(vcpu
->kvm
, gpa
, data
, _len
);
812 if (nr_pages
> ARRAY_SIZE(pages_array
))
817 int access_guest_real(struct kvm_vcpu
*vcpu
, unsigned long gra
,
818 void *data
, unsigned long len
, enum gacc_mode mode
)
820 unsigned long _len
, gpa
;
824 gpa
= kvm_s390_real_to_abs(vcpu
, gra
);
825 _len
= min(PAGE_SIZE
- (gpa
& ~PAGE_MASK
), len
);
827 rc
= write_guest_abs(vcpu
, gpa
, data
, _len
);
829 rc
= read_guest_abs(vcpu
, gpa
, data
, _len
);
838 * guest_translate_address - translate guest logical into guest absolute address
840 * Parameter semantics are the same as the ones from guest_translate.
841 * The memory contents at the guest address are not changed.
843 * Note: The IPTE lock is not taken during this function, so the caller
844 * has to take care of this.
846 int guest_translate_address(struct kvm_vcpu
*vcpu
, unsigned long gva
, ar_t ar
,
847 unsigned long *gpa
, enum gacc_mode mode
)
849 struct kvm_s390_pgm_info
*pgm
= &vcpu
->arch
.pgm
;
850 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
851 struct trans_exc_code_bits
*tec
;
855 gva
= kvm_s390_logical_to_effective(vcpu
, gva
);
856 tec
= (struct trans_exc_code_bits
*)&pgm
->trans_exc_code
;
857 rc
= get_vcpu_asce(vcpu
, &asce
, ar
, mode
);
858 tec
->addr
= gva
>> PAGE_SHIFT
;
861 if (is_low_address(gva
) && low_address_protection_enabled(vcpu
, asce
)) {
862 if (mode
== GACC_STORE
) {
863 rc
= pgm
->code
= PGM_PROTECTION
;
868 if (psw_bits(*psw
).t
&& !asce
.r
) { /* Use DAT? */
869 rc
= guest_translate(vcpu
, gva
, gpa
, asce
, mode
);
871 if (rc
== PGM_PROTECTION
)
877 *gpa
= kvm_s390_real_to_abs(vcpu
, gva
);
878 if (kvm_is_error_gpa(vcpu
->kvm
, *gpa
))
879 rc
= pgm
->code
= PGM_ADDRESSING
;
886 * check_gva_range - test a range of guest virtual addresses for accessibility
888 int check_gva_range(struct kvm_vcpu
*vcpu
, unsigned long gva
, ar_t ar
,
889 unsigned long length
, enum gacc_mode mode
)
892 unsigned long currlen
;
896 while (length
> 0 && !rc
) {
897 currlen
= min(length
, PAGE_SIZE
- (gva
% PAGE_SIZE
));
898 rc
= guest_translate_address(vcpu
, gva
, ar
, &gpa
, mode
);
908 * kvm_s390_check_low_addr_prot_real - check for low-address protection
909 * @gra: Guest real address
911 * Checks whether an address is subject to low-address protection and set
912 * up vcpu->arch.pgm accordingly if necessary.
914 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
916 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu
*vcpu
, unsigned long gra
)
918 struct kvm_s390_pgm_info
*pgm
= &vcpu
->arch
.pgm
;
919 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
920 struct trans_exc_code_bits
*tec_bits
;
921 union ctlreg0 ctlreg0
= {.val
= vcpu
->arch
.sie_block
->gcr
[0]};
923 if (!ctlreg0
.lap
|| !is_low_address(gra
))
926 memset(pgm
, 0, sizeof(*pgm
));
927 tec_bits
= (struct trans_exc_code_bits
*)&pgm
->trans_exc_code
;
928 tec_bits
->fsi
= FSI_STORE
;
929 tec_bits
->as
= psw_bits(*psw
).as
;
930 tec_bits
->addr
= gra
>> PAGE_SHIFT
;
931 pgm
->code
= PGM_PROTECTION
;