2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/swapops.h>
17 #include <linux/sysctl.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 static inline pte_t
ptep_flush_direct(struct mm_struct
*mm
,
28 unsigned long addr
, pte_t
*ptep
)
33 if (unlikely(pte_val(old
) & _PAGE_INVALID
))
35 atomic_inc(&mm
->context
.flush_count
);
36 if (MACHINE_HAS_TLB_LC
&&
37 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
38 __ptep_ipte(addr
, ptep
, IPTE_LOCAL
);
40 __ptep_ipte(addr
, ptep
, IPTE_GLOBAL
);
41 atomic_dec(&mm
->context
.flush_count
);
45 static inline pte_t
ptep_flush_lazy(struct mm_struct
*mm
,
46 unsigned long addr
, pte_t
*ptep
)
51 if (unlikely(pte_val(old
) & _PAGE_INVALID
))
53 atomic_inc(&mm
->context
.flush_count
);
54 if (cpumask_equal(&mm
->context
.cpu_attach_mask
,
55 cpumask_of(smp_processor_id()))) {
56 pte_val(*ptep
) |= _PAGE_INVALID
;
57 mm
->context
.flush_mm
= 1;
59 __ptep_ipte(addr
, ptep
, IPTE_GLOBAL
);
60 atomic_dec(&mm
->context
.flush_count
);
64 static inline pgste_t
pgste_get_lock(pte_t
*ptep
)
66 unsigned long new = 0;
73 " nihh %0,0xff7f\n" /* clear PCL bit in old */
74 " oihh %1,0x0080\n" /* set PCL bit in new */
77 : "=&d" (old
), "=&d" (new), "=Q" (ptep
[PTRS_PER_PTE
])
78 : "Q" (ptep
[PTRS_PER_PTE
]) : "cc", "memory");
83 static inline void pgste_set_unlock(pte_t
*ptep
, pgste_t pgste
)
87 " nihh %1,0xff7f\n" /* clear PCL bit */
89 : "=Q" (ptep
[PTRS_PER_PTE
])
90 : "d" (pgste_val(pgste
)), "Q" (ptep
[PTRS_PER_PTE
])
95 static inline pgste_t
pgste_get(pte_t
*ptep
)
97 unsigned long pgste
= 0;
99 pgste
= *(unsigned long *)(ptep
+ PTRS_PER_PTE
);
101 return __pgste(pgste
);
104 static inline void pgste_set(pte_t
*ptep
, pgste_t pgste
)
107 *(pgste_t
*)(ptep
+ PTRS_PER_PTE
) = pgste
;
111 static inline pgste_t
pgste_update_all(pte_t pte
, pgste_t pgste
,
112 struct mm_struct
*mm
)
115 unsigned long address
, bits
, skey
;
117 if (!mm_use_skey(mm
) || pte_val(pte
) & _PAGE_INVALID
)
119 address
= pte_val(pte
) & PAGE_MASK
;
120 skey
= (unsigned long) page_get_storage_key(address
);
121 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
122 /* Transfer page changed & referenced bit to guest bits in pgste */
123 pgste_val(pgste
) |= bits
<< 48; /* GR bit & GC bit */
124 /* Copy page access key and fetch protection bit to pgste */
125 pgste_val(pgste
) &= ~(PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
126 pgste_val(pgste
) |= (skey
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
132 static inline void pgste_set_key(pte_t
*ptep
, pgste_t pgste
, pte_t entry
,
133 struct mm_struct
*mm
)
136 unsigned long address
;
139 if (!mm_use_skey(mm
) || pte_val(entry
) & _PAGE_INVALID
)
141 VM_BUG_ON(!(pte_val(*ptep
) & _PAGE_INVALID
));
142 address
= pte_val(entry
) & PAGE_MASK
;
144 * Set page access key and fetch protection bit from pgste.
145 * The guest C/R information is still in the PGSTE, set real
148 nkey
= (pgste_val(pgste
) & (PGSTE_ACC_BITS
| PGSTE_FP_BIT
)) >> 56;
149 nkey
|= (pgste_val(pgste
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 48;
150 page_set_storage_key(address
, nkey
, 0);
154 static inline pgste_t
pgste_set_pte(pte_t
*ptep
, pgste_t pgste
, pte_t entry
)
157 if ((pte_val(entry
) & _PAGE_PRESENT
) &&
158 (pte_val(entry
) & _PAGE_WRITE
) &&
159 !(pte_val(entry
) & _PAGE_INVALID
)) {
160 if (!MACHINE_HAS_ESOP
) {
162 * Without enhanced suppression-on-protection force
163 * the dirty bit on for all writable ptes.
165 pte_val(entry
) |= _PAGE_DIRTY
;
166 pte_val(entry
) &= ~_PAGE_PROTECT
;
168 if (!(pte_val(entry
) & _PAGE_PROTECT
))
169 /* This pte allows write access, set user-dirty */
170 pgste_val(pgste
) |= PGSTE_UC_BIT
;
177 static inline pgste_t
pgste_pte_notify(struct mm_struct
*mm
,
179 pte_t
*ptep
, pgste_t pgste
)
184 bits
= pgste_val(pgste
) & (PGSTE_IN_BIT
| PGSTE_VSIE_BIT
);
186 pgste_val(pgste
) ^= bits
;
187 ptep_notify(mm
, addr
, ptep
, bits
);
193 static inline pgste_t
ptep_xchg_start(struct mm_struct
*mm
,
194 unsigned long addr
, pte_t
*ptep
)
196 pgste_t pgste
= __pgste(0);
198 if (mm_has_pgste(mm
)) {
199 pgste
= pgste_get_lock(ptep
);
200 pgste
= pgste_pte_notify(mm
, addr
, ptep
, pgste
);
205 static inline void ptep_xchg_commit(struct mm_struct
*mm
,
206 unsigned long addr
, pte_t
*ptep
,
207 pgste_t pgste
, pte_t old
, pte_t
new)
209 if (mm_has_pgste(mm
)) {
210 if (pte_val(old
) & _PAGE_INVALID
)
211 pgste_set_key(ptep
, pgste
, new, mm
);
212 if (pte_val(new) & _PAGE_INVALID
) {
213 pgste
= pgste_update_all(old
, pgste
, mm
);
214 if ((pgste_val(pgste
) & _PGSTE_GPS_USAGE_MASK
) ==
215 _PGSTE_GPS_USAGE_UNUSED
)
216 pte_val(old
) |= _PAGE_UNUSED
;
218 pgste
= pgste_set_pte(ptep
, pgste
, new);
219 pgste_set_unlock(ptep
, pgste
);
225 pte_t
ptep_xchg_direct(struct mm_struct
*mm
, unsigned long addr
,
226 pte_t
*ptep
, pte_t
new)
232 pgste
= ptep_xchg_start(mm
, addr
, ptep
);
233 old
= ptep_flush_direct(mm
, addr
, ptep
);
234 ptep_xchg_commit(mm
, addr
, ptep
, pgste
, old
, new);
238 EXPORT_SYMBOL(ptep_xchg_direct
);
240 pte_t
ptep_xchg_lazy(struct mm_struct
*mm
, unsigned long addr
,
241 pte_t
*ptep
, pte_t
new)
247 pgste
= ptep_xchg_start(mm
, addr
, ptep
);
248 old
= ptep_flush_lazy(mm
, addr
, ptep
);
249 ptep_xchg_commit(mm
, addr
, ptep
, pgste
, old
, new);
253 EXPORT_SYMBOL(ptep_xchg_lazy
);
255 pte_t
ptep_modify_prot_start(struct mm_struct
*mm
, unsigned long addr
,
262 pgste
= ptep_xchg_start(mm
, addr
, ptep
);
263 old
= ptep_flush_lazy(mm
, addr
, ptep
);
264 if (mm_has_pgste(mm
)) {
265 pgste
= pgste_update_all(old
, pgste
, mm
);
266 pgste_set(ptep
, pgste
);
270 EXPORT_SYMBOL(ptep_modify_prot_start
);
272 void ptep_modify_prot_commit(struct mm_struct
*mm
, unsigned long addr
,
273 pte_t
*ptep
, pte_t pte
)
277 if (mm_has_pgste(mm
)) {
278 pgste
= pgste_get(ptep
);
279 pgste_set_key(ptep
, pgste
, pte
, mm
);
280 pgste
= pgste_set_pte(ptep
, pgste
, pte
);
281 pgste_set_unlock(ptep
, pgste
);
287 EXPORT_SYMBOL(ptep_modify_prot_commit
);
289 static inline pmd_t
pmdp_flush_direct(struct mm_struct
*mm
,
290 unsigned long addr
, pmd_t
*pmdp
)
295 if (pmd_val(old
) & _SEGMENT_ENTRY_INVALID
)
297 if (!MACHINE_HAS_IDTE
) {
301 atomic_inc(&mm
->context
.flush_count
);
302 if (MACHINE_HAS_TLB_LC
&&
303 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
304 __pmdp_idte(addr
, pmdp
, IDTE_LOCAL
);
306 __pmdp_idte(addr
, pmdp
, IDTE_GLOBAL
);
307 atomic_dec(&mm
->context
.flush_count
);
311 static inline pmd_t
pmdp_flush_lazy(struct mm_struct
*mm
,
312 unsigned long addr
, pmd_t
*pmdp
)
317 if (pmd_val(old
) & _SEGMENT_ENTRY_INVALID
)
319 atomic_inc(&mm
->context
.flush_count
);
320 if (cpumask_equal(&mm
->context
.cpu_attach_mask
,
321 cpumask_of(smp_processor_id()))) {
322 pmd_val(*pmdp
) |= _SEGMENT_ENTRY_INVALID
;
323 mm
->context
.flush_mm
= 1;
324 } else if (MACHINE_HAS_IDTE
)
325 __pmdp_idte(addr
, pmdp
, IDTE_GLOBAL
);
328 atomic_dec(&mm
->context
.flush_count
);
332 pmd_t
pmdp_xchg_direct(struct mm_struct
*mm
, unsigned long addr
,
333 pmd_t
*pmdp
, pmd_t
new)
338 old
= pmdp_flush_direct(mm
, addr
, pmdp
);
343 EXPORT_SYMBOL(pmdp_xchg_direct
);
345 pmd_t
pmdp_xchg_lazy(struct mm_struct
*mm
, unsigned long addr
,
346 pmd_t
*pmdp
, pmd_t
new)
351 old
= pmdp_flush_lazy(mm
, addr
, pmdp
);
356 EXPORT_SYMBOL(pmdp_xchg_lazy
);
358 static inline pud_t
pudp_flush_direct(struct mm_struct
*mm
,
359 unsigned long addr
, pud_t
*pudp
)
364 if (pud_val(old
) & _REGION_ENTRY_INVALID
)
366 if (!MACHINE_HAS_IDTE
) {
368 * Invalid bit position is the same for pmd and pud, so we can
369 * re-use _pmd_csp() here
371 __pmdp_csp((pmd_t
*) pudp
);
374 atomic_inc(&mm
->context
.flush_count
);
375 if (MACHINE_HAS_TLB_LC
&&
376 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
377 __pudp_idte(addr
, pudp
, IDTE_LOCAL
);
379 __pudp_idte(addr
, pudp
, IDTE_GLOBAL
);
380 atomic_dec(&mm
->context
.flush_count
);
384 pud_t
pudp_xchg_direct(struct mm_struct
*mm
, unsigned long addr
,
385 pud_t
*pudp
, pud_t
new)
390 old
= pudp_flush_direct(mm
, addr
, pudp
);
395 EXPORT_SYMBOL(pudp_xchg_direct
);
397 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
398 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
401 struct list_head
*lh
= (struct list_head
*) pgtable
;
403 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
406 if (!pmd_huge_pte(mm
, pmdp
))
409 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
410 pmd_huge_pte(mm
, pmdp
) = pgtable
;
413 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
415 struct list_head
*lh
;
419 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
422 pgtable
= pmd_huge_pte(mm
, pmdp
);
423 lh
= (struct list_head
*) pgtable
;
425 pmd_huge_pte(mm
, pmdp
) = NULL
;
427 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
430 ptep
= (pte_t
*) pgtable
;
431 pte_val(*ptep
) = _PAGE_INVALID
;
433 pte_val(*ptep
) = _PAGE_INVALID
;
436 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
439 void ptep_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
440 pte_t
*ptep
, pte_t entry
)
444 /* the mm_has_pgste() check is done in set_pte_at() */
446 pgste
= pgste_get_lock(ptep
);
447 pgste_val(pgste
) &= ~_PGSTE_GPS_ZERO
;
448 pgste_set_key(ptep
, pgste
, entry
, mm
);
449 pgste
= pgste_set_pte(ptep
, pgste
, entry
);
450 pgste_set_unlock(ptep
, pgste
);
454 void ptep_set_notify(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
459 pgste
= pgste_get_lock(ptep
);
460 pgste_val(pgste
) |= PGSTE_IN_BIT
;
461 pgste_set_unlock(ptep
, pgste
);
466 * ptep_force_prot - change access rights of a locked pte
467 * @mm: pointer to the process mm_struct
468 * @addr: virtual address in the guest address space
469 * @ptep: pointer to the page table entry
470 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
471 * @bit: pgste bit to set (e.g. for notification)
473 * Returns 0 if the access rights were changed and -EAGAIN if the current
474 * and requested access rights are incompatible.
476 int ptep_force_prot(struct mm_struct
*mm
, unsigned long addr
,
477 pte_t
*ptep
, int prot
, unsigned long bit
)
483 pgste
= pgste_get_lock(ptep
);
485 /* Check pte entry after all locks have been acquired */
486 pte_i
= pte_val(entry
) & _PAGE_INVALID
;
487 pte_p
= pte_val(entry
) & _PAGE_PROTECT
;
488 if ((pte_i
&& (prot
!= PROT_NONE
)) ||
489 (pte_p
&& (prot
& PROT_WRITE
))) {
490 pgste_set_unlock(ptep
, pgste
);
493 /* Change access rights and set pgste bit */
494 if (prot
== PROT_NONE
&& !pte_i
) {
495 ptep_flush_direct(mm
, addr
, ptep
);
496 pgste
= pgste_update_all(entry
, pgste
, mm
);
497 pte_val(entry
) |= _PAGE_INVALID
;
499 if (prot
== PROT_READ
&& !pte_p
) {
500 ptep_flush_direct(mm
, addr
, ptep
);
501 pte_val(entry
) &= ~_PAGE_INVALID
;
502 pte_val(entry
) |= _PAGE_PROTECT
;
504 pgste_val(pgste
) |= bit
;
505 pgste
= pgste_set_pte(ptep
, pgste
, entry
);
506 pgste_set_unlock(ptep
, pgste
);
510 int ptep_shadow_pte(struct mm_struct
*mm
, unsigned long saddr
,
511 pte_t
*sptep
, pte_t
*tptep
, pte_t pte
)
513 pgste_t spgste
, tpgste
;
517 if (!(pte_val(*tptep
) & _PAGE_INVALID
))
518 return 0; /* already shadowed */
519 spgste
= pgste_get_lock(sptep
);
521 if (!(pte_val(spte
) & _PAGE_INVALID
) &&
522 !((pte_val(spte
) & _PAGE_PROTECT
) &&
523 !(pte_val(pte
) & _PAGE_PROTECT
))) {
524 pgste_val(spgste
) |= PGSTE_VSIE_BIT
;
525 tpgste
= pgste_get_lock(tptep
);
526 pte_val(tpte
) = (pte_val(spte
) & PAGE_MASK
) |
527 (pte_val(pte
) & _PAGE_PROTECT
);
528 /* don't touch the storage key - it belongs to parent pgste */
529 tpgste
= pgste_set_pte(tptep
, tpgste
, tpte
);
530 pgste_set_unlock(tptep
, tpgste
);
533 pgste_set_unlock(sptep
, spgste
);
537 void ptep_unshadow_pte(struct mm_struct
*mm
, unsigned long saddr
, pte_t
*ptep
)
541 pgste
= pgste_get_lock(ptep
);
542 /* notifier is called by the caller */
543 ptep_flush_direct(mm
, saddr
, ptep
);
544 /* don't touch the storage key - it belongs to parent pgste */
545 pgste
= pgste_set_pte(ptep
, pgste
, __pte(_PAGE_INVALID
));
546 pgste_set_unlock(ptep
, pgste
);
549 static void ptep_zap_swap_entry(struct mm_struct
*mm
, swp_entry_t entry
)
551 if (!non_swap_entry(entry
))
552 dec_mm_counter(mm
, MM_SWAPENTS
);
553 else if (is_migration_entry(entry
)) {
554 struct page
*page
= migration_entry_to_page(entry
);
556 dec_mm_counter(mm
, mm_counter(page
));
558 free_swap_and_cache(entry
);
561 void ptep_zap_unused(struct mm_struct
*mm
, unsigned long addr
,
562 pte_t
*ptep
, int reset
)
564 unsigned long pgstev
;
568 /* Zap unused and logically-zero pages */
570 pgste
= pgste_get_lock(ptep
);
571 pgstev
= pgste_val(pgste
);
573 if (!reset
&& pte_swap(pte
) &&
574 ((pgstev
& _PGSTE_GPS_USAGE_MASK
) == _PGSTE_GPS_USAGE_UNUSED
||
575 (pgstev
& _PGSTE_GPS_ZERO
))) {
576 ptep_zap_swap_entry(mm
, pte_to_swp_entry(pte
));
577 pte_clear(mm
, addr
, ptep
);
580 pgste_val(pgste
) &= ~_PGSTE_GPS_USAGE_MASK
;
581 pgste_set_unlock(ptep
, pgste
);
585 void ptep_zap_key(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
590 /* Clear storage key */
592 pgste
= pgste_get_lock(ptep
);
593 pgste_val(pgste
) &= ~(PGSTE_ACC_BITS
| PGSTE_FP_BIT
|
594 PGSTE_GR_BIT
| PGSTE_GC_BIT
);
595 ptev
= pte_val(*ptep
);
596 if (!(ptev
& _PAGE_INVALID
) && (ptev
& _PAGE_WRITE
))
597 page_set_storage_key(ptev
& PAGE_MASK
, PAGE_DEFAULT_KEY
, 1);
598 pgste_set_unlock(ptep
, pgste
);
603 * Test and reset if a guest page is dirty
605 bool test_and_clear_guest_dirty(struct mm_struct
*mm
, unsigned long addr
)
613 ptep
= get_locked_pte(mm
, addr
, &ptl
);
617 pgste
= pgste_get_lock(ptep
);
618 dirty
= !!(pgste_val(pgste
) & PGSTE_UC_BIT
);
619 pgste_val(pgste
) &= ~PGSTE_UC_BIT
;
621 if (dirty
&& (pte_val(pte
) & _PAGE_PRESENT
)) {
622 pgste
= pgste_pte_notify(mm
, addr
, ptep
, pgste
);
623 __ptep_ipte(addr
, ptep
, IPTE_GLOBAL
);
624 if (MACHINE_HAS_ESOP
|| !(pte_val(pte
) & _PAGE_WRITE
))
625 pte_val(pte
) |= _PAGE_PROTECT
;
627 pte_val(pte
) |= _PAGE_INVALID
;
630 pgste_set_unlock(ptep
, pgste
);
635 EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty
);
637 int set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
638 unsigned char key
, bool nq
)
645 ptep
= get_locked_pte(mm
, addr
, &ptl
);
649 new = old
= pgste_get_lock(ptep
);
650 pgste_val(new) &= ~(PGSTE_GR_BIT
| PGSTE_GC_BIT
|
651 PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
652 keyul
= (unsigned long) key
;
653 pgste_val(new) |= (keyul
& (_PAGE_CHANGED
| _PAGE_REFERENCED
)) << 48;
654 pgste_val(new) |= (keyul
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
655 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
656 unsigned long address
, bits
, skey
;
658 address
= pte_val(*ptep
) & PAGE_MASK
;
659 skey
= (unsigned long) page_get_storage_key(address
);
660 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
661 skey
= key
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
);
662 /* Set storage key ACC and FP */
663 page_set_storage_key(address
, skey
, !nq
);
664 /* Merge host changed & referenced into pgste */
665 pgste_val(new) |= bits
<< 52;
667 /* changing the guest storage key is considered a change of the page */
668 if ((pgste_val(new) ^ pgste_val(old
)) &
669 (PGSTE_ACC_BITS
| PGSTE_FP_BIT
| PGSTE_GR_BIT
| PGSTE_GC_BIT
))
670 pgste_val(new) |= PGSTE_UC_BIT
;
672 pgste_set_unlock(ptep
, new);
673 pte_unmap_unlock(ptep
, ptl
);
676 EXPORT_SYMBOL(set_guest_storage_key
);
679 * Conditionally set a guest storage key (handling csske).
680 * oldkey will be updated when either mr or mc is set and a pointer is given.
682 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
683 * storage key was updated and -EFAULT on access errors.
685 int cond_set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
686 unsigned char key
, unsigned char *oldkey
,
687 bool nq
, bool mr
, bool mc
)
689 unsigned char tmp
, mask
= _PAGE_ACC_BITS
| _PAGE_FP_BIT
;
692 /* we can drop the pgste lock between getting and setting the key */
694 rc
= get_guest_storage_key(current
->mm
, addr
, &tmp
);
700 mask
|= _PAGE_REFERENCED
;
702 mask
|= _PAGE_CHANGED
;
703 if (!((tmp
^ key
) & mask
))
706 rc
= set_guest_storage_key(current
->mm
, addr
, key
, nq
);
707 return rc
< 0 ? rc
: 1;
709 EXPORT_SYMBOL(cond_set_guest_storage_key
);
712 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
714 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
716 int reset_guest_reference_bit(struct mm_struct
*mm
, unsigned long addr
)
723 ptep
= get_locked_pte(mm
, addr
, &ptl
);
727 new = old
= pgste_get_lock(ptep
);
728 /* Reset guest reference bit only */
729 pgste_val(new) &= ~PGSTE_GR_BIT
;
731 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
732 cc
= page_reset_referenced(pte_val(*ptep
) & PAGE_MASK
);
733 /* Merge real referenced bit into host-set */
734 pgste_val(new) |= ((unsigned long) cc
<< 53) & PGSTE_HR_BIT
;
736 /* Reflect guest's logical view, not physical */
737 cc
|= (pgste_val(old
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 49;
738 /* Changing the guest storage key is considered a change of the page */
739 if ((pgste_val(new) ^ pgste_val(old
)) & PGSTE_GR_BIT
)
740 pgste_val(new) |= PGSTE_UC_BIT
;
742 pgste_set_unlock(ptep
, new);
743 pte_unmap_unlock(ptep
, ptl
);
746 EXPORT_SYMBOL(reset_guest_reference_bit
);
748 int get_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
755 ptep
= get_locked_pte(mm
, addr
, &ptl
);
759 pgste
= pgste_get_lock(ptep
);
760 *key
= (pgste_val(pgste
) & (PGSTE_ACC_BITS
| PGSTE_FP_BIT
)) >> 56;
761 if (!(pte_val(*ptep
) & _PAGE_INVALID
))
762 *key
= page_get_storage_key(pte_val(*ptep
) & PAGE_MASK
);
763 /* Reflect guest's logical view, not physical */
764 *key
|= (pgste_val(pgste
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 48;
765 pgste_set_unlock(ptep
, pgste
);
766 pte_unmap_unlock(ptep
, ptl
);
769 EXPORT_SYMBOL(get_guest_storage_key
);