1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2011
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
12 #include <linux/swap.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/rcupdate.h>
16 #include <linux/slab.h>
17 #include <linux/swapops.h>
18 #include <linux/sysctl.h>
19 #include <linux/ksm.h>
20 #include <linux/mman.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
25 #include <asm/tlbflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/page-states.h>
29 static inline void ptep_ipte_local(struct mm_struct
*mm
, unsigned long addr
,
30 pte_t
*ptep
, int nodat
)
32 unsigned long opt
, asce
;
34 if (MACHINE_HAS_TLB_GUEST
) {
36 asce
= READ_ONCE(mm
->context
.gmap_asce
);
37 if (asce
== 0UL || nodat
)
40 asce
= asce
? : mm
->context
.asce
;
41 opt
|= IPTE_GUEST_ASCE
;
43 __ptep_ipte(addr
, ptep
, opt
, asce
, IPTE_LOCAL
);
45 __ptep_ipte(addr
, ptep
, 0, 0, IPTE_LOCAL
);
49 static inline void ptep_ipte_global(struct mm_struct
*mm
, unsigned long addr
,
50 pte_t
*ptep
, int nodat
)
52 unsigned long opt
, asce
;
54 if (MACHINE_HAS_TLB_GUEST
) {
56 asce
= READ_ONCE(mm
->context
.gmap_asce
);
57 if (asce
== 0UL || nodat
)
60 asce
= asce
? : mm
->context
.asce
;
61 opt
|= IPTE_GUEST_ASCE
;
63 __ptep_ipte(addr
, ptep
, opt
, asce
, IPTE_GLOBAL
);
65 __ptep_ipte(addr
, ptep
, 0, 0, IPTE_GLOBAL
);
69 static inline pte_t
ptep_flush_direct(struct mm_struct
*mm
,
70 unsigned long addr
, pte_t
*ptep
,
76 if (unlikely(pte_val(old
) & _PAGE_INVALID
))
78 atomic_inc(&mm
->context
.flush_count
);
79 if (MACHINE_HAS_TLB_LC
&&
80 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
81 ptep_ipte_local(mm
, addr
, ptep
, nodat
);
83 ptep_ipte_global(mm
, addr
, ptep
, nodat
);
84 atomic_dec(&mm
->context
.flush_count
);
88 static inline pte_t
ptep_flush_lazy(struct mm_struct
*mm
,
89 unsigned long addr
, pte_t
*ptep
,
95 if (unlikely(pte_val(old
) & _PAGE_INVALID
))
97 atomic_inc(&mm
->context
.flush_count
);
98 if (cpumask_equal(&mm
->context
.cpu_attach_mask
,
99 cpumask_of(smp_processor_id()))) {
100 pte_val(*ptep
) |= _PAGE_INVALID
;
101 mm
->context
.flush_mm
= 1;
103 ptep_ipte_global(mm
, addr
, ptep
, nodat
);
104 atomic_dec(&mm
->context
.flush_count
);
108 static inline pgste_t
pgste_get_lock(pte_t
*ptep
)
110 unsigned long new = 0;
117 " nihh %0,0xff7f\n" /* clear PCL bit in old */
118 " oihh %1,0x0080\n" /* set PCL bit in new */
121 : "=&d" (old
), "=&d" (new), "=Q" (ptep
[PTRS_PER_PTE
])
122 : "Q" (ptep
[PTRS_PER_PTE
]) : "cc", "memory");
127 static inline void pgste_set_unlock(pte_t
*ptep
, pgste_t pgste
)
131 " nihh %1,0xff7f\n" /* clear PCL bit */
133 : "=Q" (ptep
[PTRS_PER_PTE
])
134 : "d" (pgste_val(pgste
)), "Q" (ptep
[PTRS_PER_PTE
])
139 static inline pgste_t
pgste_get(pte_t
*ptep
)
141 unsigned long pgste
= 0;
143 pgste
= *(unsigned long *)(ptep
+ PTRS_PER_PTE
);
145 return __pgste(pgste
);
148 static inline void pgste_set(pte_t
*ptep
, pgste_t pgste
)
151 *(pgste_t
*)(ptep
+ PTRS_PER_PTE
) = pgste
;
155 static inline pgste_t
pgste_update_all(pte_t pte
, pgste_t pgste
,
156 struct mm_struct
*mm
)
159 unsigned long address
, bits
, skey
;
161 if (!mm_uses_skeys(mm
) || pte_val(pte
) & _PAGE_INVALID
)
163 address
= pte_val(pte
) & PAGE_MASK
;
164 skey
= (unsigned long) page_get_storage_key(address
);
165 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
166 /* Transfer page changed & referenced bit to guest bits in pgste */
167 pgste_val(pgste
) |= bits
<< 48; /* GR bit & GC bit */
168 /* Copy page access key and fetch protection bit to pgste */
169 pgste_val(pgste
) &= ~(PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
170 pgste_val(pgste
) |= (skey
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
176 static inline void pgste_set_key(pte_t
*ptep
, pgste_t pgste
, pte_t entry
,
177 struct mm_struct
*mm
)
180 unsigned long address
;
183 if (!mm_uses_skeys(mm
) || pte_val(entry
) & _PAGE_INVALID
)
185 VM_BUG_ON(!(pte_val(*ptep
) & _PAGE_INVALID
));
186 address
= pte_val(entry
) & PAGE_MASK
;
188 * Set page access key and fetch protection bit from pgste.
189 * The guest C/R information is still in the PGSTE, set real
192 nkey
= (pgste_val(pgste
) & (PGSTE_ACC_BITS
| PGSTE_FP_BIT
)) >> 56;
193 nkey
|= (pgste_val(pgste
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 48;
194 page_set_storage_key(address
, nkey
, 0);
198 static inline pgste_t
pgste_set_pte(pte_t
*ptep
, pgste_t pgste
, pte_t entry
)
201 if ((pte_val(entry
) & _PAGE_PRESENT
) &&
202 (pte_val(entry
) & _PAGE_WRITE
) &&
203 !(pte_val(entry
) & _PAGE_INVALID
)) {
204 if (!MACHINE_HAS_ESOP
) {
206 * Without enhanced suppression-on-protection force
207 * the dirty bit on for all writable ptes.
209 pte_val(entry
) |= _PAGE_DIRTY
;
210 pte_val(entry
) &= ~_PAGE_PROTECT
;
212 if (!(pte_val(entry
) & _PAGE_PROTECT
))
213 /* This pte allows write access, set user-dirty */
214 pgste_val(pgste
) |= PGSTE_UC_BIT
;
221 static inline pgste_t
pgste_pte_notify(struct mm_struct
*mm
,
223 pte_t
*ptep
, pgste_t pgste
)
228 bits
= pgste_val(pgste
) & (PGSTE_IN_BIT
| PGSTE_VSIE_BIT
);
230 pgste_val(pgste
) ^= bits
;
231 ptep_notify(mm
, addr
, ptep
, bits
);
237 static inline pgste_t
ptep_xchg_start(struct mm_struct
*mm
,
238 unsigned long addr
, pte_t
*ptep
)
240 pgste_t pgste
= __pgste(0);
242 if (mm_has_pgste(mm
)) {
243 pgste
= pgste_get_lock(ptep
);
244 pgste
= pgste_pte_notify(mm
, addr
, ptep
, pgste
);
249 static inline pte_t
ptep_xchg_commit(struct mm_struct
*mm
,
250 unsigned long addr
, pte_t
*ptep
,
251 pgste_t pgste
, pte_t old
, pte_t
new)
253 if (mm_has_pgste(mm
)) {
254 if (pte_val(old
) & _PAGE_INVALID
)
255 pgste_set_key(ptep
, pgste
, new, mm
);
256 if (pte_val(new) & _PAGE_INVALID
) {
257 pgste
= pgste_update_all(old
, pgste
, mm
);
258 if ((pgste_val(pgste
) & _PGSTE_GPS_USAGE_MASK
) ==
259 _PGSTE_GPS_USAGE_UNUSED
)
260 pte_val(old
) |= _PAGE_UNUSED
;
262 pgste
= pgste_set_pte(ptep
, pgste
, new);
263 pgste_set_unlock(ptep
, pgste
);
270 pte_t
ptep_xchg_direct(struct mm_struct
*mm
, unsigned long addr
,
271 pte_t
*ptep
, pte_t
new)
278 pgste
= ptep_xchg_start(mm
, addr
, ptep
);
279 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
280 old
= ptep_flush_direct(mm
, addr
, ptep
, nodat
);
281 old
= ptep_xchg_commit(mm
, addr
, ptep
, pgste
, old
, new);
285 EXPORT_SYMBOL(ptep_xchg_direct
);
287 pte_t
ptep_xchg_lazy(struct mm_struct
*mm
, unsigned long addr
,
288 pte_t
*ptep
, pte_t
new)
295 pgste
= ptep_xchg_start(mm
, addr
, ptep
);
296 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
297 old
= ptep_flush_lazy(mm
, addr
, ptep
, nodat
);
298 old
= ptep_xchg_commit(mm
, addr
, ptep
, pgste
, old
, new);
302 EXPORT_SYMBOL(ptep_xchg_lazy
);
304 pte_t
ptep_modify_prot_start(struct mm_struct
*mm
, unsigned long addr
,
312 pgste
= ptep_xchg_start(mm
, addr
, ptep
);
313 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
314 old
= ptep_flush_lazy(mm
, addr
, ptep
, nodat
);
315 if (mm_has_pgste(mm
)) {
316 pgste
= pgste_update_all(old
, pgste
, mm
);
317 pgste_set(ptep
, pgste
);
321 EXPORT_SYMBOL(ptep_modify_prot_start
);
323 void ptep_modify_prot_commit(struct mm_struct
*mm
, unsigned long addr
,
324 pte_t
*ptep
, pte_t pte
)
329 pte_val(pte
) &= ~_PAGE_NOEXEC
;
330 if (mm_has_pgste(mm
)) {
331 pgste
= pgste_get(ptep
);
332 pgste_set_key(ptep
, pgste
, pte
, mm
);
333 pgste
= pgste_set_pte(ptep
, pgste
, pte
);
334 pgste_set_unlock(ptep
, pgste
);
340 EXPORT_SYMBOL(ptep_modify_prot_commit
);
342 static inline void pmdp_idte_local(struct mm_struct
*mm
,
343 unsigned long addr
, pmd_t
*pmdp
)
345 if (MACHINE_HAS_TLB_GUEST
)
346 __pmdp_idte(addr
, pmdp
, IDTE_NODAT
| IDTE_GUEST_ASCE
,
347 mm
->context
.asce
, IDTE_LOCAL
);
349 __pmdp_idte(addr
, pmdp
, 0, 0, IDTE_LOCAL
);
350 if (mm_has_pgste(mm
) && mm
->context
.allow_gmap_hpage_1m
)
351 gmap_pmdp_idte_local(mm
, addr
);
354 static inline void pmdp_idte_global(struct mm_struct
*mm
,
355 unsigned long addr
, pmd_t
*pmdp
)
357 if (MACHINE_HAS_TLB_GUEST
) {
358 __pmdp_idte(addr
, pmdp
, IDTE_NODAT
| IDTE_GUEST_ASCE
,
359 mm
->context
.asce
, IDTE_GLOBAL
);
360 if (mm_has_pgste(mm
) && mm
->context
.allow_gmap_hpage_1m
)
361 gmap_pmdp_idte_global(mm
, addr
);
362 } else if (MACHINE_HAS_IDTE
) {
363 __pmdp_idte(addr
, pmdp
, 0, 0, IDTE_GLOBAL
);
364 if (mm_has_pgste(mm
) && mm
->context
.allow_gmap_hpage_1m
)
365 gmap_pmdp_idte_global(mm
, addr
);
368 if (mm_has_pgste(mm
) && mm
->context
.allow_gmap_hpage_1m
)
369 gmap_pmdp_csp(mm
, addr
);
373 static inline pmd_t
pmdp_flush_direct(struct mm_struct
*mm
,
374 unsigned long addr
, pmd_t
*pmdp
)
379 if (pmd_val(old
) & _SEGMENT_ENTRY_INVALID
)
381 atomic_inc(&mm
->context
.flush_count
);
382 if (MACHINE_HAS_TLB_LC
&&
383 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
384 pmdp_idte_local(mm
, addr
, pmdp
);
386 pmdp_idte_global(mm
, addr
, pmdp
);
387 atomic_dec(&mm
->context
.flush_count
);
391 static inline pmd_t
pmdp_flush_lazy(struct mm_struct
*mm
,
392 unsigned long addr
, pmd_t
*pmdp
)
397 if (pmd_val(old
) & _SEGMENT_ENTRY_INVALID
)
399 atomic_inc(&mm
->context
.flush_count
);
400 if (cpumask_equal(&mm
->context
.cpu_attach_mask
,
401 cpumask_of(smp_processor_id()))) {
402 pmd_val(*pmdp
) |= _SEGMENT_ENTRY_INVALID
;
403 mm
->context
.flush_mm
= 1;
404 if (mm_has_pgste(mm
))
405 gmap_pmdp_invalidate(mm
, addr
);
407 pmdp_idte_global(mm
, addr
, pmdp
);
409 atomic_dec(&mm
->context
.flush_count
);
413 static pmd_t
*pmd_alloc_map(struct mm_struct
*mm
, unsigned long addr
)
420 pgd
= pgd_offset(mm
, addr
);
421 p4d
= p4d_alloc(mm
, pgd
, addr
);
424 pud
= pud_alloc(mm
, p4d
, addr
);
427 pmd
= pmd_alloc(mm
, pud
, addr
);
431 pmd_t
pmdp_xchg_direct(struct mm_struct
*mm
, unsigned long addr
,
432 pmd_t
*pmdp
, pmd_t
new)
437 old
= pmdp_flush_direct(mm
, addr
, pmdp
);
442 EXPORT_SYMBOL(pmdp_xchg_direct
);
444 pmd_t
pmdp_xchg_lazy(struct mm_struct
*mm
, unsigned long addr
,
445 pmd_t
*pmdp
, pmd_t
new)
450 old
= pmdp_flush_lazy(mm
, addr
, pmdp
);
455 EXPORT_SYMBOL(pmdp_xchg_lazy
);
457 static inline void pudp_idte_local(struct mm_struct
*mm
,
458 unsigned long addr
, pud_t
*pudp
)
460 if (MACHINE_HAS_TLB_GUEST
)
461 __pudp_idte(addr
, pudp
, IDTE_NODAT
| IDTE_GUEST_ASCE
,
462 mm
->context
.asce
, IDTE_LOCAL
);
464 __pudp_idte(addr
, pudp
, 0, 0, IDTE_LOCAL
);
467 static inline void pudp_idte_global(struct mm_struct
*mm
,
468 unsigned long addr
, pud_t
*pudp
)
470 if (MACHINE_HAS_TLB_GUEST
)
471 __pudp_idte(addr
, pudp
, IDTE_NODAT
| IDTE_GUEST_ASCE
,
472 mm
->context
.asce
, IDTE_GLOBAL
);
473 else if (MACHINE_HAS_IDTE
)
474 __pudp_idte(addr
, pudp
, 0, 0, IDTE_GLOBAL
);
477 * Invalid bit position is the same for pmd and pud, so we can
478 * re-use _pmd_csp() here
480 __pmdp_csp((pmd_t
*) pudp
);
483 static inline pud_t
pudp_flush_direct(struct mm_struct
*mm
,
484 unsigned long addr
, pud_t
*pudp
)
489 if (pud_val(old
) & _REGION_ENTRY_INVALID
)
491 atomic_inc(&mm
->context
.flush_count
);
492 if (MACHINE_HAS_TLB_LC
&&
493 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
494 pudp_idte_local(mm
, addr
, pudp
);
496 pudp_idte_global(mm
, addr
, pudp
);
497 atomic_dec(&mm
->context
.flush_count
);
501 pud_t
pudp_xchg_direct(struct mm_struct
*mm
, unsigned long addr
,
502 pud_t
*pudp
, pud_t
new)
507 old
= pudp_flush_direct(mm
, addr
, pudp
);
512 EXPORT_SYMBOL(pudp_xchg_direct
);
514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
515 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
518 struct list_head
*lh
= (struct list_head
*) pgtable
;
520 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
523 if (!pmd_huge_pte(mm
, pmdp
))
526 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
527 pmd_huge_pte(mm
, pmdp
) = pgtable
;
530 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
532 struct list_head
*lh
;
536 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
539 pgtable
= pmd_huge_pte(mm
, pmdp
);
540 lh
= (struct list_head
*) pgtable
;
542 pmd_huge_pte(mm
, pmdp
) = NULL
;
544 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
547 ptep
= (pte_t
*) pgtable
;
548 pte_val(*ptep
) = _PAGE_INVALID
;
550 pte_val(*ptep
) = _PAGE_INVALID
;
553 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
556 void ptep_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
557 pte_t
*ptep
, pte_t entry
)
561 /* the mm_has_pgste() check is done in set_pte_at() */
563 pgste
= pgste_get_lock(ptep
);
564 pgste_val(pgste
) &= ~_PGSTE_GPS_ZERO
;
565 pgste_set_key(ptep
, pgste
, entry
, mm
);
566 pgste
= pgste_set_pte(ptep
, pgste
, entry
);
567 pgste_set_unlock(ptep
, pgste
);
571 void ptep_set_notify(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
576 pgste
= pgste_get_lock(ptep
);
577 pgste_val(pgste
) |= PGSTE_IN_BIT
;
578 pgste_set_unlock(ptep
, pgste
);
583 * ptep_force_prot - change access rights of a locked pte
584 * @mm: pointer to the process mm_struct
585 * @addr: virtual address in the guest address space
586 * @ptep: pointer to the page table entry
587 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
588 * @bit: pgste bit to set (e.g. for notification)
590 * Returns 0 if the access rights were changed and -EAGAIN if the current
591 * and requested access rights are incompatible.
593 int ptep_force_prot(struct mm_struct
*mm
, unsigned long addr
,
594 pte_t
*ptep
, int prot
, unsigned long bit
)
598 int pte_i
, pte_p
, nodat
;
600 pgste
= pgste_get_lock(ptep
);
602 /* Check pte entry after all locks have been acquired */
603 pte_i
= pte_val(entry
) & _PAGE_INVALID
;
604 pte_p
= pte_val(entry
) & _PAGE_PROTECT
;
605 if ((pte_i
&& (prot
!= PROT_NONE
)) ||
606 (pte_p
&& (prot
& PROT_WRITE
))) {
607 pgste_set_unlock(ptep
, pgste
);
610 /* Change access rights and set pgste bit */
611 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
612 if (prot
== PROT_NONE
&& !pte_i
) {
613 ptep_flush_direct(mm
, addr
, ptep
, nodat
);
614 pgste
= pgste_update_all(entry
, pgste
, mm
);
615 pte_val(entry
) |= _PAGE_INVALID
;
617 if (prot
== PROT_READ
&& !pte_p
) {
618 ptep_flush_direct(mm
, addr
, ptep
, nodat
);
619 pte_val(entry
) &= ~_PAGE_INVALID
;
620 pte_val(entry
) |= _PAGE_PROTECT
;
622 pgste_val(pgste
) |= bit
;
623 pgste
= pgste_set_pte(ptep
, pgste
, entry
);
624 pgste_set_unlock(ptep
, pgste
);
628 int ptep_shadow_pte(struct mm_struct
*mm
, unsigned long saddr
,
629 pte_t
*sptep
, pte_t
*tptep
, pte_t pte
)
631 pgste_t spgste
, tpgste
;
635 if (!(pte_val(*tptep
) & _PAGE_INVALID
))
636 return 0; /* already shadowed */
637 spgste
= pgste_get_lock(sptep
);
639 if (!(pte_val(spte
) & _PAGE_INVALID
) &&
640 !((pte_val(spte
) & _PAGE_PROTECT
) &&
641 !(pte_val(pte
) & _PAGE_PROTECT
))) {
642 pgste_val(spgste
) |= PGSTE_VSIE_BIT
;
643 tpgste
= pgste_get_lock(tptep
);
644 pte_val(tpte
) = (pte_val(spte
) & PAGE_MASK
) |
645 (pte_val(pte
) & _PAGE_PROTECT
);
646 /* don't touch the storage key - it belongs to parent pgste */
647 tpgste
= pgste_set_pte(tptep
, tpgste
, tpte
);
648 pgste_set_unlock(tptep
, tpgste
);
651 pgste_set_unlock(sptep
, spgste
);
655 void ptep_unshadow_pte(struct mm_struct
*mm
, unsigned long saddr
, pte_t
*ptep
)
660 pgste
= pgste_get_lock(ptep
);
661 /* notifier is called by the caller */
662 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
663 ptep_flush_direct(mm
, saddr
, ptep
, nodat
);
664 /* don't touch the storage key - it belongs to parent pgste */
665 pgste
= pgste_set_pte(ptep
, pgste
, __pte(_PAGE_INVALID
));
666 pgste_set_unlock(ptep
, pgste
);
669 static void ptep_zap_swap_entry(struct mm_struct
*mm
, swp_entry_t entry
)
671 if (!non_swap_entry(entry
))
672 dec_mm_counter(mm
, MM_SWAPENTS
);
673 else if (is_migration_entry(entry
)) {
674 struct page
*page
= migration_entry_to_page(entry
);
676 dec_mm_counter(mm
, mm_counter(page
));
678 free_swap_and_cache(entry
);
681 void ptep_zap_unused(struct mm_struct
*mm
, unsigned long addr
,
682 pte_t
*ptep
, int reset
)
684 unsigned long pgstev
;
688 /* Zap unused and logically-zero pages */
690 pgste
= pgste_get_lock(ptep
);
691 pgstev
= pgste_val(pgste
);
693 if (!reset
&& pte_swap(pte
) &&
694 ((pgstev
& _PGSTE_GPS_USAGE_MASK
) == _PGSTE_GPS_USAGE_UNUSED
||
695 (pgstev
& _PGSTE_GPS_ZERO
))) {
696 ptep_zap_swap_entry(mm
, pte_to_swp_entry(pte
));
697 pte_clear(mm
, addr
, ptep
);
700 pgste_val(pgste
) &= ~_PGSTE_GPS_USAGE_MASK
;
701 pgste_set_unlock(ptep
, pgste
);
705 void ptep_zap_key(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
710 /* Clear storage key ACC and F, but set R/C */
712 pgste
= pgste_get_lock(ptep
);
713 pgste_val(pgste
) &= ~(PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
714 pgste_val(pgste
) |= PGSTE_GR_BIT
| PGSTE_GC_BIT
;
715 ptev
= pte_val(*ptep
);
716 if (!(ptev
& _PAGE_INVALID
) && (ptev
& _PAGE_WRITE
))
717 page_set_storage_key(ptev
& PAGE_MASK
, PAGE_DEFAULT_KEY
, 1);
718 pgste_set_unlock(ptep
, pgste
);
723 * Test and reset if a guest page is dirty
725 bool ptep_test_and_clear_uc(struct mm_struct
*mm
, unsigned long addr
,
733 pgste
= pgste_get_lock(ptep
);
734 dirty
= !!(pgste_val(pgste
) & PGSTE_UC_BIT
);
735 pgste_val(pgste
) &= ~PGSTE_UC_BIT
;
737 if (dirty
&& (pte_val(pte
) & _PAGE_PRESENT
)) {
738 pgste
= pgste_pte_notify(mm
, addr
, ptep
, pgste
);
739 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
740 ptep_ipte_global(mm
, addr
, ptep
, nodat
);
741 if (MACHINE_HAS_ESOP
|| !(pte_val(pte
) & _PAGE_WRITE
))
742 pte_val(pte
) |= _PAGE_PROTECT
;
744 pte_val(pte
) |= _PAGE_INVALID
;
747 pgste_set_unlock(ptep
, pgste
);
750 EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc
);
752 int set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
753 unsigned char key
, bool nq
)
755 unsigned long keyul
, paddr
;
761 pmdp
= pmd_alloc_map(mm
, addr
);
765 ptl
= pmd_lock(mm
, pmdp
);
766 if (!pmd_present(*pmdp
)) {
771 if (pmd_large(*pmdp
)) {
772 paddr
= pmd_val(*pmdp
) & HPAGE_MASK
;
773 paddr
|= addr
& ~HPAGE_MASK
;
775 * Huge pmds need quiescing operations, they are
778 page_set_storage_key(paddr
, key
, 1);
784 ptep
= pte_alloc_map_lock(mm
, pmdp
, addr
, &ptl
);
788 new = old
= pgste_get_lock(ptep
);
789 pgste_val(new) &= ~(PGSTE_GR_BIT
| PGSTE_GC_BIT
|
790 PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
791 keyul
= (unsigned long) key
;
792 pgste_val(new) |= (keyul
& (_PAGE_CHANGED
| _PAGE_REFERENCED
)) << 48;
793 pgste_val(new) |= (keyul
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
794 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
795 unsigned long bits
, skey
;
797 paddr
= pte_val(*ptep
) & PAGE_MASK
;
798 skey
= (unsigned long) page_get_storage_key(paddr
);
799 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
800 skey
= key
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
);
801 /* Set storage key ACC and FP */
802 page_set_storage_key(paddr
, skey
, !nq
);
803 /* Merge host changed & referenced into pgste */
804 pgste_val(new) |= bits
<< 52;
806 /* changing the guest storage key is considered a change of the page */
807 if ((pgste_val(new) ^ pgste_val(old
)) &
808 (PGSTE_ACC_BITS
| PGSTE_FP_BIT
| PGSTE_GR_BIT
| PGSTE_GC_BIT
))
809 pgste_val(new) |= PGSTE_UC_BIT
;
811 pgste_set_unlock(ptep
, new);
812 pte_unmap_unlock(ptep
, ptl
);
815 EXPORT_SYMBOL(set_guest_storage_key
);
818 * Conditionally set a guest storage key (handling csske).
819 * oldkey will be updated when either mr or mc is set and a pointer is given.
821 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
822 * storage key was updated and -EFAULT on access errors.
824 int cond_set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
825 unsigned char key
, unsigned char *oldkey
,
826 bool nq
, bool mr
, bool mc
)
828 unsigned char tmp
, mask
= _PAGE_ACC_BITS
| _PAGE_FP_BIT
;
831 /* we can drop the pgste lock between getting and setting the key */
833 rc
= get_guest_storage_key(current
->mm
, addr
, &tmp
);
839 mask
|= _PAGE_REFERENCED
;
841 mask
|= _PAGE_CHANGED
;
842 if (!((tmp
^ key
) & mask
))
845 rc
= set_guest_storage_key(current
->mm
, addr
, key
, nq
);
846 return rc
< 0 ? rc
: 1;
848 EXPORT_SYMBOL(cond_set_guest_storage_key
);
851 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
853 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
855 int reset_guest_reference_bit(struct mm_struct
*mm
, unsigned long addr
)
864 pmdp
= pmd_alloc_map(mm
, addr
);
868 ptl
= pmd_lock(mm
, pmdp
);
869 if (!pmd_present(*pmdp
)) {
874 if (pmd_large(*pmdp
)) {
875 paddr
= pmd_val(*pmdp
) & HPAGE_MASK
;
876 paddr
|= addr
& ~HPAGE_MASK
;
877 cc
= page_reset_referenced(paddr
);
883 ptep
= pte_alloc_map_lock(mm
, pmdp
, addr
, &ptl
);
887 new = old
= pgste_get_lock(ptep
);
888 /* Reset guest reference bit only */
889 pgste_val(new) &= ~PGSTE_GR_BIT
;
891 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
892 paddr
= pte_val(*ptep
) & PAGE_MASK
;
893 cc
= page_reset_referenced(paddr
);
894 /* Merge real referenced bit into host-set */
895 pgste_val(new) |= ((unsigned long) cc
<< 53) & PGSTE_HR_BIT
;
897 /* Reflect guest's logical view, not physical */
898 cc
|= (pgste_val(old
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 49;
899 /* Changing the guest storage key is considered a change of the page */
900 if ((pgste_val(new) ^ pgste_val(old
)) & PGSTE_GR_BIT
)
901 pgste_val(new) |= PGSTE_UC_BIT
;
903 pgste_set_unlock(ptep
, new);
904 pte_unmap_unlock(ptep
, ptl
);
907 EXPORT_SYMBOL(reset_guest_reference_bit
);
909 int get_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
918 pmdp
= pmd_alloc_map(mm
, addr
);
922 ptl
= pmd_lock(mm
, pmdp
);
923 if (!pmd_present(*pmdp
)) {
924 /* Not yet mapped memory has a zero key */
930 if (pmd_large(*pmdp
)) {
931 paddr
= pmd_val(*pmdp
) & HPAGE_MASK
;
932 paddr
|= addr
& ~HPAGE_MASK
;
933 *key
= page_get_storage_key(paddr
);
939 ptep
= pte_alloc_map_lock(mm
, pmdp
, addr
, &ptl
);
943 pgste
= pgste_get_lock(ptep
);
944 *key
= (pgste_val(pgste
) & (PGSTE_ACC_BITS
| PGSTE_FP_BIT
)) >> 56;
945 paddr
= pte_val(*ptep
) & PAGE_MASK
;
946 if (!(pte_val(*ptep
) & _PAGE_INVALID
))
947 *key
= page_get_storage_key(paddr
);
948 /* Reflect guest's logical view, not physical */
949 *key
|= (pgste_val(pgste
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 48;
950 pgste_set_unlock(ptep
, pgste
);
951 pte_unmap_unlock(ptep
, ptl
);
954 EXPORT_SYMBOL(get_guest_storage_key
);
957 * pgste_perform_essa - perform ESSA actions on the PGSTE.
958 * @mm: the memory context. It must have PGSTEs, no check is performed here!
959 * @hva: the host virtual address of the page whose PGSTE is to be processed
960 * @orc: the specific action to perform, see the ESSA_SET_* macros.
961 * @oldpte: the PTE will be saved there if the pointer is not NULL.
962 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
964 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
965 * or < 0 in case of error. -EINVAL is returned for invalid values
966 * of orc, -EFAULT for invalid addresses.
968 int pgste_perform_essa(struct mm_struct
*mm
, unsigned long hva
, int orc
,
969 unsigned long *oldpte
, unsigned long *oldpgste
)
971 unsigned long pgstev
;
977 WARN_ON_ONCE(orc
> ESSA_MAX
);
978 if (unlikely(orc
> ESSA_MAX
))
980 ptep
= get_locked_pte(mm
, hva
, &ptl
);
983 pgste
= pgste_get_lock(ptep
);
984 pgstev
= pgste_val(pgste
);
986 *oldpte
= pte_val(*ptep
);
993 case ESSA_SET_STABLE
:
994 pgstev
&= ~(_PGSTE_GPS_USAGE_MASK
| _PGSTE_GPS_NODAT
);
995 pgstev
|= _PGSTE_GPS_USAGE_STABLE
;
997 case ESSA_SET_UNUSED
:
998 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
999 pgstev
|= _PGSTE_GPS_USAGE_UNUSED
;
1000 if (pte_val(*ptep
) & _PAGE_INVALID
)
1003 case ESSA_SET_VOLATILE
:
1004 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
1005 pgstev
|= _PGSTE_GPS_USAGE_VOLATILE
;
1006 if (pte_val(*ptep
) & _PAGE_INVALID
)
1009 case ESSA_SET_POT_VOLATILE
:
1010 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
1011 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
1012 pgstev
|= _PGSTE_GPS_USAGE_POT_VOLATILE
;
1015 if (pgstev
& _PGSTE_GPS_ZERO
) {
1016 pgstev
|= _PGSTE_GPS_USAGE_VOLATILE
;
1019 if (!(pgstev
& PGSTE_GC_BIT
)) {
1020 pgstev
|= _PGSTE_GPS_USAGE_VOLATILE
;
1025 case ESSA_SET_STABLE_RESIDENT
:
1026 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
1027 pgstev
|= _PGSTE_GPS_USAGE_STABLE
;
1029 * Since the resident state can go away any time after this
1030 * call, we will not make this page resident. We can revisit
1031 * this decision if a guest will ever start using this.
1034 case ESSA_SET_STABLE_IF_RESIDENT
:
1035 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
1036 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
1037 pgstev
|= _PGSTE_GPS_USAGE_STABLE
;
1040 case ESSA_SET_STABLE_NODAT
:
1041 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
1042 pgstev
|= _PGSTE_GPS_USAGE_STABLE
| _PGSTE_GPS_NODAT
;
1045 /* we should never get here! */
1048 /* If we are discarding a page, set it to logical zero */
1050 pgstev
|= _PGSTE_GPS_ZERO
;
1052 pgste_val(pgste
) = pgstev
;
1053 pgste_set_unlock(ptep
, pgste
);
1054 pte_unmap_unlock(ptep
, ptl
);
1057 EXPORT_SYMBOL(pgste_perform_essa
);
1060 * set_pgste_bits - set specific PGSTE bits.
1061 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1062 * @hva: the host virtual address of the page whose PGSTE is to be processed
1063 * @bits: a bitmask representing the bits that will be touched
1064 * @value: the values of the bits to be written. Only the bits in the mask
1067 * Return: 0 on success, < 0 in case of error.
1069 int set_pgste_bits(struct mm_struct
*mm
, unsigned long hva
,
1070 unsigned long bits
, unsigned long value
)
1076 ptep
= get_locked_pte(mm
, hva
, &ptl
);
1077 if (unlikely(!ptep
))
1079 new = pgste_get_lock(ptep
);
1081 pgste_val(new) &= ~bits
;
1082 pgste_val(new) |= value
& bits
;
1084 pgste_set_unlock(ptep
, new);
1085 pte_unmap_unlock(ptep
, ptl
);
1088 EXPORT_SYMBOL(set_pgste_bits
);
1091 * get_pgste - get the current PGSTE for the given address.
1092 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1093 * @hva: the host virtual address of the page whose PGSTE is to be processed
1094 * @pgstep: will be written with the current PGSTE for the given address.
1096 * Return: 0 on success, < 0 in case of error.
1098 int get_pgste(struct mm_struct
*mm
, unsigned long hva
, unsigned long *pgstep
)
1103 ptep
= get_locked_pte(mm
, hva
, &ptl
);
1104 if (unlikely(!ptep
))
1106 *pgstep
= pgste_val(pgste_get(ptep
));
1107 pte_unmap_unlock(ptep
, ptl
);
1110 EXPORT_SYMBOL(get_pgste
);