2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/swapops.h>
17 #include <linux/sysctl.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
26 #include <asm/page-states.h>
28 static inline void ptep_ipte_local(struct mm_struct
*mm
, unsigned long addr
,
29 pte_t
*ptep
, int nodat
)
31 unsigned long opt
, asce
;
33 if (MACHINE_HAS_TLB_GUEST
) {
35 asce
= READ_ONCE(mm
->context
.gmap_asce
);
36 if (asce
== 0UL || nodat
)
39 asce
= asce
? : mm
->context
.asce
;
40 opt
|= IPTE_GUEST_ASCE
;
42 __ptep_ipte(addr
, ptep
, opt
, asce
, IPTE_LOCAL
);
44 __ptep_ipte(addr
, ptep
, 0, 0, IPTE_LOCAL
);
48 static inline void ptep_ipte_global(struct mm_struct
*mm
, unsigned long addr
,
49 pte_t
*ptep
, int nodat
)
51 unsigned long opt
, asce
;
53 if (MACHINE_HAS_TLB_GUEST
) {
55 asce
= READ_ONCE(mm
->context
.gmap_asce
);
56 if (asce
== 0UL || nodat
)
59 asce
= asce
? : mm
->context
.asce
;
60 opt
|= IPTE_GUEST_ASCE
;
62 __ptep_ipte(addr
, ptep
, opt
, asce
, IPTE_GLOBAL
);
64 __ptep_ipte(addr
, ptep
, 0, 0, IPTE_GLOBAL
);
68 static inline pte_t
ptep_flush_direct(struct mm_struct
*mm
,
69 unsigned long addr
, pte_t
*ptep
,
75 if (unlikely(pte_val(old
) & _PAGE_INVALID
))
77 atomic_inc(&mm
->context
.flush_count
);
78 if (MACHINE_HAS_TLB_LC
&&
79 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
80 ptep_ipte_local(mm
, addr
, ptep
, nodat
);
82 ptep_ipte_global(mm
, addr
, ptep
, nodat
);
83 atomic_dec(&mm
->context
.flush_count
);
87 static inline pte_t
ptep_flush_lazy(struct mm_struct
*mm
,
88 unsigned long addr
, pte_t
*ptep
,
94 if (unlikely(pte_val(old
) & _PAGE_INVALID
))
96 atomic_inc(&mm
->context
.flush_count
);
97 if (cpumask_equal(&mm
->context
.cpu_attach_mask
,
98 cpumask_of(smp_processor_id()))) {
99 pte_val(*ptep
) |= _PAGE_INVALID
;
100 mm
->context
.flush_mm
= 1;
102 ptep_ipte_global(mm
, addr
, ptep
, nodat
);
103 atomic_dec(&mm
->context
.flush_count
);
107 static inline pgste_t
pgste_get_lock(pte_t
*ptep
)
109 unsigned long new = 0;
116 " nihh %0,0xff7f\n" /* clear PCL bit in old */
117 " oihh %1,0x0080\n" /* set PCL bit in new */
120 : "=&d" (old
), "=&d" (new), "=Q" (ptep
[PTRS_PER_PTE
])
121 : "Q" (ptep
[PTRS_PER_PTE
]) : "cc", "memory");
126 static inline void pgste_set_unlock(pte_t
*ptep
, pgste_t pgste
)
130 " nihh %1,0xff7f\n" /* clear PCL bit */
132 : "=Q" (ptep
[PTRS_PER_PTE
])
133 : "d" (pgste_val(pgste
)), "Q" (ptep
[PTRS_PER_PTE
])
138 static inline pgste_t
pgste_get(pte_t
*ptep
)
140 unsigned long pgste
= 0;
142 pgste
= *(unsigned long *)(ptep
+ PTRS_PER_PTE
);
144 return __pgste(pgste
);
147 static inline void pgste_set(pte_t
*ptep
, pgste_t pgste
)
150 *(pgste_t
*)(ptep
+ PTRS_PER_PTE
) = pgste
;
154 static inline pgste_t
pgste_update_all(pte_t pte
, pgste_t pgste
,
155 struct mm_struct
*mm
)
158 unsigned long address
, bits
, skey
;
160 if (!mm_use_skey(mm
) || pte_val(pte
) & _PAGE_INVALID
)
162 address
= pte_val(pte
) & PAGE_MASK
;
163 skey
= (unsigned long) page_get_storage_key(address
);
164 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
165 /* Transfer page changed & referenced bit to guest bits in pgste */
166 pgste_val(pgste
) |= bits
<< 48; /* GR bit & GC bit */
167 /* Copy page access key and fetch protection bit to pgste */
168 pgste_val(pgste
) &= ~(PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
169 pgste_val(pgste
) |= (skey
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
175 static inline void pgste_set_key(pte_t
*ptep
, pgste_t pgste
, pte_t entry
,
176 struct mm_struct
*mm
)
179 unsigned long address
;
182 if (!mm_use_skey(mm
) || pte_val(entry
) & _PAGE_INVALID
)
184 VM_BUG_ON(!(pte_val(*ptep
) & _PAGE_INVALID
));
185 address
= pte_val(entry
) & PAGE_MASK
;
187 * Set page access key and fetch protection bit from pgste.
188 * The guest C/R information is still in the PGSTE, set real
191 nkey
= (pgste_val(pgste
) & (PGSTE_ACC_BITS
| PGSTE_FP_BIT
)) >> 56;
192 nkey
|= (pgste_val(pgste
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 48;
193 page_set_storage_key(address
, nkey
, 0);
197 static inline pgste_t
pgste_set_pte(pte_t
*ptep
, pgste_t pgste
, pte_t entry
)
200 if ((pte_val(entry
) & _PAGE_PRESENT
) &&
201 (pte_val(entry
) & _PAGE_WRITE
) &&
202 !(pte_val(entry
) & _PAGE_INVALID
)) {
203 if (!MACHINE_HAS_ESOP
) {
205 * Without enhanced suppression-on-protection force
206 * the dirty bit on for all writable ptes.
208 pte_val(entry
) |= _PAGE_DIRTY
;
209 pte_val(entry
) &= ~_PAGE_PROTECT
;
211 if (!(pte_val(entry
) & _PAGE_PROTECT
))
212 /* This pte allows write access, set user-dirty */
213 pgste_val(pgste
) |= PGSTE_UC_BIT
;
220 static inline pgste_t
pgste_pte_notify(struct mm_struct
*mm
,
222 pte_t
*ptep
, pgste_t pgste
)
227 bits
= pgste_val(pgste
) & (PGSTE_IN_BIT
| PGSTE_VSIE_BIT
);
229 pgste_val(pgste
) ^= bits
;
230 ptep_notify(mm
, addr
, ptep
, bits
);
236 static inline pgste_t
ptep_xchg_start(struct mm_struct
*mm
,
237 unsigned long addr
, pte_t
*ptep
)
239 pgste_t pgste
= __pgste(0);
241 if (mm_has_pgste(mm
)) {
242 pgste
= pgste_get_lock(ptep
);
243 pgste
= pgste_pte_notify(mm
, addr
, ptep
, pgste
);
248 static inline pte_t
ptep_xchg_commit(struct mm_struct
*mm
,
249 unsigned long addr
, pte_t
*ptep
,
250 pgste_t pgste
, pte_t old
, pte_t
new)
252 if (mm_has_pgste(mm
)) {
253 if (pte_val(old
) & _PAGE_INVALID
)
254 pgste_set_key(ptep
, pgste
, new, mm
);
255 if (pte_val(new) & _PAGE_INVALID
) {
256 pgste
= pgste_update_all(old
, pgste
, mm
);
257 if ((pgste_val(pgste
) & _PGSTE_GPS_USAGE_MASK
) ==
258 _PGSTE_GPS_USAGE_UNUSED
)
259 pte_val(old
) |= _PAGE_UNUSED
;
261 pgste
= pgste_set_pte(ptep
, pgste
, new);
262 pgste_set_unlock(ptep
, pgste
);
269 pte_t
ptep_xchg_direct(struct mm_struct
*mm
, unsigned long addr
,
270 pte_t
*ptep
, pte_t
new)
277 pgste
= ptep_xchg_start(mm
, addr
, ptep
);
278 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
279 old
= ptep_flush_direct(mm
, addr
, ptep
, nodat
);
280 old
= ptep_xchg_commit(mm
, addr
, ptep
, pgste
, old
, new);
284 EXPORT_SYMBOL(ptep_xchg_direct
);
286 pte_t
ptep_xchg_lazy(struct mm_struct
*mm
, unsigned long addr
,
287 pte_t
*ptep
, pte_t
new)
294 pgste
= ptep_xchg_start(mm
, addr
, ptep
);
295 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
296 old
= ptep_flush_lazy(mm
, addr
, ptep
, nodat
);
297 old
= ptep_xchg_commit(mm
, addr
, ptep
, pgste
, old
, new);
301 EXPORT_SYMBOL(ptep_xchg_lazy
);
303 pte_t
ptep_modify_prot_start(struct mm_struct
*mm
, unsigned long addr
,
311 pgste
= ptep_xchg_start(mm
, addr
, ptep
);
312 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
313 old
= ptep_flush_lazy(mm
, addr
, ptep
, nodat
);
314 if (mm_has_pgste(mm
)) {
315 pgste
= pgste_update_all(old
, pgste
, mm
);
316 pgste_set(ptep
, pgste
);
320 EXPORT_SYMBOL(ptep_modify_prot_start
);
322 void ptep_modify_prot_commit(struct mm_struct
*mm
, unsigned long addr
,
323 pte_t
*ptep
, pte_t pte
)
328 pte_val(pte
) &= ~_PAGE_NOEXEC
;
329 if (mm_has_pgste(mm
)) {
330 pgste
= pgste_get(ptep
);
331 pgste_set_key(ptep
, pgste
, pte
, mm
);
332 pgste
= pgste_set_pte(ptep
, pgste
, pte
);
333 pgste_set_unlock(ptep
, pgste
);
339 EXPORT_SYMBOL(ptep_modify_prot_commit
);
341 static inline void pmdp_idte_local(struct mm_struct
*mm
,
342 unsigned long addr
, pmd_t
*pmdp
)
344 if (MACHINE_HAS_TLB_GUEST
)
345 __pmdp_idte(addr
, pmdp
, IDTE_NODAT
| IDTE_GUEST_ASCE
,
346 mm
->context
.asce
, IDTE_LOCAL
);
348 __pmdp_idte(addr
, pmdp
, 0, 0, IDTE_LOCAL
);
351 static inline void pmdp_idte_global(struct mm_struct
*mm
,
352 unsigned long addr
, pmd_t
*pmdp
)
354 if (MACHINE_HAS_TLB_GUEST
)
355 __pmdp_idte(addr
, pmdp
, IDTE_NODAT
| IDTE_GUEST_ASCE
,
356 mm
->context
.asce
, IDTE_GLOBAL
);
357 else if (MACHINE_HAS_IDTE
)
358 __pmdp_idte(addr
, pmdp
, 0, 0, IDTE_GLOBAL
);
363 static inline pmd_t
pmdp_flush_direct(struct mm_struct
*mm
,
364 unsigned long addr
, pmd_t
*pmdp
)
369 if (pmd_val(old
) & _SEGMENT_ENTRY_INVALID
)
371 atomic_inc(&mm
->context
.flush_count
);
372 if (MACHINE_HAS_TLB_LC
&&
373 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
374 pmdp_idte_local(mm
, addr
, pmdp
);
376 pmdp_idte_global(mm
, addr
, pmdp
);
377 atomic_dec(&mm
->context
.flush_count
);
381 static inline pmd_t
pmdp_flush_lazy(struct mm_struct
*mm
,
382 unsigned long addr
, pmd_t
*pmdp
)
387 if (pmd_val(old
) & _SEGMENT_ENTRY_INVALID
)
389 atomic_inc(&mm
->context
.flush_count
);
390 if (cpumask_equal(&mm
->context
.cpu_attach_mask
,
391 cpumask_of(smp_processor_id()))) {
392 pmd_val(*pmdp
) |= _SEGMENT_ENTRY_INVALID
;
393 mm
->context
.flush_mm
= 1;
395 pmdp_idte_global(mm
, addr
, pmdp
);
397 atomic_dec(&mm
->context
.flush_count
);
401 pmd_t
pmdp_xchg_direct(struct mm_struct
*mm
, unsigned long addr
,
402 pmd_t
*pmdp
, pmd_t
new)
407 old
= pmdp_flush_direct(mm
, addr
, pmdp
);
412 EXPORT_SYMBOL(pmdp_xchg_direct
);
414 pmd_t
pmdp_xchg_lazy(struct mm_struct
*mm
, unsigned long addr
,
415 pmd_t
*pmdp
, pmd_t
new)
420 old
= pmdp_flush_lazy(mm
, addr
, pmdp
);
425 EXPORT_SYMBOL(pmdp_xchg_lazy
);
427 static inline void pudp_idte_local(struct mm_struct
*mm
,
428 unsigned long addr
, pud_t
*pudp
)
430 if (MACHINE_HAS_TLB_GUEST
)
431 __pudp_idte(addr
, pudp
, IDTE_NODAT
| IDTE_GUEST_ASCE
,
432 mm
->context
.asce
, IDTE_LOCAL
);
434 __pudp_idte(addr
, pudp
, 0, 0, IDTE_LOCAL
);
437 static inline void pudp_idte_global(struct mm_struct
*mm
,
438 unsigned long addr
, pud_t
*pudp
)
440 if (MACHINE_HAS_TLB_GUEST
)
441 __pudp_idte(addr
, pudp
, IDTE_NODAT
| IDTE_GUEST_ASCE
,
442 mm
->context
.asce
, IDTE_GLOBAL
);
443 else if (MACHINE_HAS_IDTE
)
444 __pudp_idte(addr
, pudp
, 0, 0, IDTE_GLOBAL
);
447 * Invalid bit position is the same for pmd and pud, so we can
448 * re-use _pmd_csp() here
450 __pmdp_csp((pmd_t
*) pudp
);
453 static inline pud_t
pudp_flush_direct(struct mm_struct
*mm
,
454 unsigned long addr
, pud_t
*pudp
)
459 if (pud_val(old
) & _REGION_ENTRY_INVALID
)
461 atomic_inc(&mm
->context
.flush_count
);
462 if (MACHINE_HAS_TLB_LC
&&
463 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
464 pudp_idte_local(mm
, addr
, pudp
);
466 pudp_idte_global(mm
, addr
, pudp
);
467 atomic_dec(&mm
->context
.flush_count
);
471 pud_t
pudp_xchg_direct(struct mm_struct
*mm
, unsigned long addr
,
472 pud_t
*pudp
, pud_t
new)
477 old
= pudp_flush_direct(mm
, addr
, pudp
);
482 EXPORT_SYMBOL(pudp_xchg_direct
);
484 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
485 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
488 struct list_head
*lh
= (struct list_head
*) pgtable
;
490 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
493 if (!pmd_huge_pte(mm
, pmdp
))
496 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
497 pmd_huge_pte(mm
, pmdp
) = pgtable
;
500 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
502 struct list_head
*lh
;
506 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
509 pgtable
= pmd_huge_pte(mm
, pmdp
);
510 lh
= (struct list_head
*) pgtable
;
512 pmd_huge_pte(mm
, pmdp
) = NULL
;
514 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
517 ptep
= (pte_t
*) pgtable
;
518 pte_val(*ptep
) = _PAGE_INVALID
;
520 pte_val(*ptep
) = _PAGE_INVALID
;
523 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
526 void ptep_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
527 pte_t
*ptep
, pte_t entry
)
531 /* the mm_has_pgste() check is done in set_pte_at() */
533 pgste
= pgste_get_lock(ptep
);
534 pgste_val(pgste
) &= ~_PGSTE_GPS_ZERO
;
535 pgste_set_key(ptep
, pgste
, entry
, mm
);
536 pgste
= pgste_set_pte(ptep
, pgste
, entry
);
537 pgste_set_unlock(ptep
, pgste
);
541 void ptep_set_notify(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
546 pgste
= pgste_get_lock(ptep
);
547 pgste_val(pgste
) |= PGSTE_IN_BIT
;
548 pgste_set_unlock(ptep
, pgste
);
553 * ptep_force_prot - change access rights of a locked pte
554 * @mm: pointer to the process mm_struct
555 * @addr: virtual address in the guest address space
556 * @ptep: pointer to the page table entry
557 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
558 * @bit: pgste bit to set (e.g. for notification)
560 * Returns 0 if the access rights were changed and -EAGAIN if the current
561 * and requested access rights are incompatible.
563 int ptep_force_prot(struct mm_struct
*mm
, unsigned long addr
,
564 pte_t
*ptep
, int prot
, unsigned long bit
)
568 int pte_i
, pte_p
, nodat
;
570 pgste
= pgste_get_lock(ptep
);
572 /* Check pte entry after all locks have been acquired */
573 pte_i
= pte_val(entry
) & _PAGE_INVALID
;
574 pte_p
= pte_val(entry
) & _PAGE_PROTECT
;
575 if ((pte_i
&& (prot
!= PROT_NONE
)) ||
576 (pte_p
&& (prot
& PROT_WRITE
))) {
577 pgste_set_unlock(ptep
, pgste
);
580 /* Change access rights and set pgste bit */
581 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
582 if (prot
== PROT_NONE
&& !pte_i
) {
583 ptep_flush_direct(mm
, addr
, ptep
, nodat
);
584 pgste
= pgste_update_all(entry
, pgste
, mm
);
585 pte_val(entry
) |= _PAGE_INVALID
;
587 if (prot
== PROT_READ
&& !pte_p
) {
588 ptep_flush_direct(mm
, addr
, ptep
, nodat
);
589 pte_val(entry
) &= ~_PAGE_INVALID
;
590 pte_val(entry
) |= _PAGE_PROTECT
;
592 pgste_val(pgste
) |= bit
;
593 pgste
= pgste_set_pte(ptep
, pgste
, entry
);
594 pgste_set_unlock(ptep
, pgste
);
598 int ptep_shadow_pte(struct mm_struct
*mm
, unsigned long saddr
,
599 pte_t
*sptep
, pte_t
*tptep
, pte_t pte
)
601 pgste_t spgste
, tpgste
;
605 if (!(pte_val(*tptep
) & _PAGE_INVALID
))
606 return 0; /* already shadowed */
607 spgste
= pgste_get_lock(sptep
);
609 if (!(pte_val(spte
) & _PAGE_INVALID
) &&
610 !((pte_val(spte
) & _PAGE_PROTECT
) &&
611 !(pte_val(pte
) & _PAGE_PROTECT
))) {
612 pgste_val(spgste
) |= PGSTE_VSIE_BIT
;
613 tpgste
= pgste_get_lock(tptep
);
614 pte_val(tpte
) = (pte_val(spte
) & PAGE_MASK
) |
615 (pte_val(pte
) & _PAGE_PROTECT
);
616 /* don't touch the storage key - it belongs to parent pgste */
617 tpgste
= pgste_set_pte(tptep
, tpgste
, tpte
);
618 pgste_set_unlock(tptep
, tpgste
);
621 pgste_set_unlock(sptep
, spgste
);
625 void ptep_unshadow_pte(struct mm_struct
*mm
, unsigned long saddr
, pte_t
*ptep
)
630 pgste
= pgste_get_lock(ptep
);
631 /* notifier is called by the caller */
632 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
633 ptep_flush_direct(mm
, saddr
, ptep
, nodat
);
634 /* don't touch the storage key - it belongs to parent pgste */
635 pgste
= pgste_set_pte(ptep
, pgste
, __pte(_PAGE_INVALID
));
636 pgste_set_unlock(ptep
, pgste
);
639 static void ptep_zap_swap_entry(struct mm_struct
*mm
, swp_entry_t entry
)
641 if (!non_swap_entry(entry
))
642 dec_mm_counter(mm
, MM_SWAPENTS
);
643 else if (is_migration_entry(entry
)) {
644 struct page
*page
= migration_entry_to_page(entry
);
646 dec_mm_counter(mm
, mm_counter(page
));
648 free_swap_and_cache(entry
);
651 void ptep_zap_unused(struct mm_struct
*mm
, unsigned long addr
,
652 pte_t
*ptep
, int reset
)
654 unsigned long pgstev
;
658 /* Zap unused and logically-zero pages */
660 pgste
= pgste_get_lock(ptep
);
661 pgstev
= pgste_val(pgste
);
663 if (!reset
&& pte_swap(pte
) &&
664 ((pgstev
& _PGSTE_GPS_USAGE_MASK
) == _PGSTE_GPS_USAGE_UNUSED
||
665 (pgstev
& _PGSTE_GPS_ZERO
))) {
666 ptep_zap_swap_entry(mm
, pte_to_swp_entry(pte
));
667 pte_clear(mm
, addr
, ptep
);
670 pgste_val(pgste
) &= ~_PGSTE_GPS_USAGE_MASK
;
671 pgste_set_unlock(ptep
, pgste
);
675 void ptep_zap_key(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
680 /* Clear storage key ACC and F, but set R/C */
682 pgste
= pgste_get_lock(ptep
);
683 pgste_val(pgste
) &= ~(PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
684 pgste_val(pgste
) |= PGSTE_GR_BIT
| PGSTE_GC_BIT
;
685 ptev
= pte_val(*ptep
);
686 if (!(ptev
& _PAGE_INVALID
) && (ptev
& _PAGE_WRITE
))
687 page_set_storage_key(ptev
& PAGE_MASK
, PAGE_DEFAULT_KEY
, 1);
688 pgste_set_unlock(ptep
, pgste
);
693 * Test and reset if a guest page is dirty
695 bool test_and_clear_guest_dirty(struct mm_struct
*mm
, unsigned long addr
)
708 pgd
= pgd_offset(mm
, addr
);
709 p4d
= p4d_alloc(mm
, pgd
, addr
);
712 pud
= pud_alloc(mm
, p4d
, addr
);
715 pmd
= pmd_alloc(mm
, pud
, addr
);
718 /* We can't run guests backed by huge pages, but userspace can
719 * still set them up and then try to migrate them without any
725 ptep
= pte_alloc_map_lock(mm
, pmd
, addr
, &ptl
);
729 pgste
= pgste_get_lock(ptep
);
730 dirty
= !!(pgste_val(pgste
) & PGSTE_UC_BIT
);
731 pgste_val(pgste
) &= ~PGSTE_UC_BIT
;
733 if (dirty
&& (pte_val(pte
) & _PAGE_PRESENT
)) {
734 pgste
= pgste_pte_notify(mm
, addr
, ptep
, pgste
);
735 nodat
= !!(pgste_val(pgste
) & _PGSTE_GPS_NODAT
);
736 ptep_ipte_global(mm
, addr
, ptep
, nodat
);
737 if (MACHINE_HAS_ESOP
|| !(pte_val(pte
) & _PAGE_WRITE
))
738 pte_val(pte
) |= _PAGE_PROTECT
;
740 pte_val(pte
) |= _PAGE_INVALID
;
743 pgste_set_unlock(ptep
, pgste
);
748 EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty
);
750 int set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
751 unsigned char key
, bool nq
)
758 ptep
= get_locked_pte(mm
, addr
, &ptl
);
762 new = old
= pgste_get_lock(ptep
);
763 pgste_val(new) &= ~(PGSTE_GR_BIT
| PGSTE_GC_BIT
|
764 PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
765 keyul
= (unsigned long) key
;
766 pgste_val(new) |= (keyul
& (_PAGE_CHANGED
| _PAGE_REFERENCED
)) << 48;
767 pgste_val(new) |= (keyul
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
768 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
769 unsigned long address
, bits
, skey
;
771 address
= pte_val(*ptep
) & PAGE_MASK
;
772 skey
= (unsigned long) page_get_storage_key(address
);
773 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
774 skey
= key
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
);
775 /* Set storage key ACC and FP */
776 page_set_storage_key(address
, skey
, !nq
);
777 /* Merge host changed & referenced into pgste */
778 pgste_val(new) |= bits
<< 52;
780 /* changing the guest storage key is considered a change of the page */
781 if ((pgste_val(new) ^ pgste_val(old
)) &
782 (PGSTE_ACC_BITS
| PGSTE_FP_BIT
| PGSTE_GR_BIT
| PGSTE_GC_BIT
))
783 pgste_val(new) |= PGSTE_UC_BIT
;
785 pgste_set_unlock(ptep
, new);
786 pte_unmap_unlock(ptep
, ptl
);
789 EXPORT_SYMBOL(set_guest_storage_key
);
792 * Conditionally set a guest storage key (handling csske).
793 * oldkey will be updated when either mr or mc is set and a pointer is given.
795 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
796 * storage key was updated and -EFAULT on access errors.
798 int cond_set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
799 unsigned char key
, unsigned char *oldkey
,
800 bool nq
, bool mr
, bool mc
)
802 unsigned char tmp
, mask
= _PAGE_ACC_BITS
| _PAGE_FP_BIT
;
805 /* we can drop the pgste lock between getting and setting the key */
807 rc
= get_guest_storage_key(current
->mm
, addr
, &tmp
);
813 mask
|= _PAGE_REFERENCED
;
815 mask
|= _PAGE_CHANGED
;
816 if (!((tmp
^ key
) & mask
))
819 rc
= set_guest_storage_key(current
->mm
, addr
, key
, nq
);
820 return rc
< 0 ? rc
: 1;
822 EXPORT_SYMBOL(cond_set_guest_storage_key
);
825 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
827 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
829 int reset_guest_reference_bit(struct mm_struct
*mm
, unsigned long addr
)
836 ptep
= get_locked_pte(mm
, addr
, &ptl
);
840 new = old
= pgste_get_lock(ptep
);
841 /* Reset guest reference bit only */
842 pgste_val(new) &= ~PGSTE_GR_BIT
;
844 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
845 cc
= page_reset_referenced(pte_val(*ptep
) & PAGE_MASK
);
846 /* Merge real referenced bit into host-set */
847 pgste_val(new) |= ((unsigned long) cc
<< 53) & PGSTE_HR_BIT
;
849 /* Reflect guest's logical view, not physical */
850 cc
|= (pgste_val(old
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 49;
851 /* Changing the guest storage key is considered a change of the page */
852 if ((pgste_val(new) ^ pgste_val(old
)) & PGSTE_GR_BIT
)
853 pgste_val(new) |= PGSTE_UC_BIT
;
855 pgste_set_unlock(ptep
, new);
856 pte_unmap_unlock(ptep
, ptl
);
859 EXPORT_SYMBOL(reset_guest_reference_bit
);
861 int get_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
868 ptep
= get_locked_pte(mm
, addr
, &ptl
);
872 pgste
= pgste_get_lock(ptep
);
873 *key
= (pgste_val(pgste
) & (PGSTE_ACC_BITS
| PGSTE_FP_BIT
)) >> 56;
874 if (!(pte_val(*ptep
) & _PAGE_INVALID
))
875 *key
= page_get_storage_key(pte_val(*ptep
) & PAGE_MASK
);
876 /* Reflect guest's logical view, not physical */
877 *key
|= (pgste_val(pgste
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 48;
878 pgste_set_unlock(ptep
, pgste
);
879 pte_unmap_unlock(ptep
, ptl
);
882 EXPORT_SYMBOL(get_guest_storage_key
);
885 * pgste_perform_essa - perform ESSA actions on the PGSTE.
886 * @mm: the memory context. It must have PGSTEs, no check is performed here!
887 * @hva: the host virtual address of the page whose PGSTE is to be processed
888 * @orc: the specific action to perform, see the ESSA_SET_* macros.
889 * @oldpte: the PTE will be saved there if the pointer is not NULL.
890 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
892 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
893 * or < 0 in case of error. -EINVAL is returned for invalid values
894 * of orc, -EFAULT for invalid addresses.
896 int pgste_perform_essa(struct mm_struct
*mm
, unsigned long hva
, int orc
,
897 unsigned long *oldpte
, unsigned long *oldpgste
)
899 unsigned long pgstev
;
905 WARN_ON_ONCE(orc
> ESSA_MAX
);
906 if (unlikely(orc
> ESSA_MAX
))
908 ptep
= get_locked_pte(mm
, hva
, &ptl
);
911 pgste
= pgste_get_lock(ptep
);
912 pgstev
= pgste_val(pgste
);
914 *oldpte
= pte_val(*ptep
);
921 case ESSA_SET_STABLE
:
922 pgstev
&= ~(_PGSTE_GPS_USAGE_MASK
| _PGSTE_GPS_NODAT
);
923 pgstev
|= _PGSTE_GPS_USAGE_STABLE
;
925 case ESSA_SET_UNUSED
:
926 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
927 pgstev
|= _PGSTE_GPS_USAGE_UNUSED
;
928 if (pte_val(*ptep
) & _PAGE_INVALID
)
931 case ESSA_SET_VOLATILE
:
932 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
933 pgstev
|= _PGSTE_GPS_USAGE_VOLATILE
;
934 if (pte_val(*ptep
) & _PAGE_INVALID
)
937 case ESSA_SET_POT_VOLATILE
:
938 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
939 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
940 pgstev
|= _PGSTE_GPS_USAGE_POT_VOLATILE
;
943 if (pgstev
& _PGSTE_GPS_ZERO
) {
944 pgstev
|= _PGSTE_GPS_USAGE_VOLATILE
;
947 if (!(pgstev
& PGSTE_GC_BIT
)) {
948 pgstev
|= _PGSTE_GPS_USAGE_VOLATILE
;
953 case ESSA_SET_STABLE_RESIDENT
:
954 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
955 pgstev
|= _PGSTE_GPS_USAGE_STABLE
;
957 * Since the resident state can go away any time after this
958 * call, we will not make this page resident. We can revisit
959 * this decision if a guest will ever start using this.
962 case ESSA_SET_STABLE_IF_RESIDENT
:
963 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
964 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
965 pgstev
|= _PGSTE_GPS_USAGE_STABLE
;
968 case ESSA_SET_STABLE_NODAT
:
969 pgstev
&= ~_PGSTE_GPS_USAGE_MASK
;
970 pgstev
|= _PGSTE_GPS_USAGE_STABLE
| _PGSTE_GPS_NODAT
;
973 /* we should never get here! */
976 /* If we are discarding a page, set it to logical zero */
978 pgstev
|= _PGSTE_GPS_ZERO
;
980 pgste_val(pgste
) = pgstev
;
981 pgste_set_unlock(ptep
, pgste
);
982 pte_unmap_unlock(ptep
, ptl
);
985 EXPORT_SYMBOL(pgste_perform_essa
);
988 * set_pgste_bits - set specific PGSTE bits.
989 * @mm: the memory context. It must have PGSTEs, no check is performed here!
990 * @hva: the host virtual address of the page whose PGSTE is to be processed
991 * @bits: a bitmask representing the bits that will be touched
992 * @value: the values of the bits to be written. Only the bits in the mask
995 * Return: 0 on success, < 0 in case of error.
997 int set_pgste_bits(struct mm_struct
*mm
, unsigned long hva
,
998 unsigned long bits
, unsigned long value
)
1004 ptep
= get_locked_pte(mm
, hva
, &ptl
);
1005 if (unlikely(!ptep
))
1007 new = pgste_get_lock(ptep
);
1009 pgste_val(new) &= ~bits
;
1010 pgste_val(new) |= value
& bits
;
1012 pgste_set_unlock(ptep
, new);
1013 pte_unmap_unlock(ptep
, ptl
);
1016 EXPORT_SYMBOL(set_pgste_bits
);
1019 * get_pgste - get the current PGSTE for the given address.
1020 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1021 * @hva: the host virtual address of the page whose PGSTE is to be processed
1022 * @pgstep: will be written with the current PGSTE for the given address.
1024 * Return: 0 on success, < 0 in case of error.
1026 int get_pgste(struct mm_struct
*mm
, unsigned long hva
, unsigned long *pgstep
)
1031 ptep
= get_locked_pte(mm
, hva
, &ptl
);
1032 if (unlikely(!ptep
))
1034 *pgstep
= pgste_val(pgste_get(ptep
));
1035 pte_unmap_unlock(ptep
, ptl
);
1038 EXPORT_SYMBOL(get_pgste
);