perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / arch / s390 / mm / pgtable.c
blobf2cc7da473e4ed2afb858c868a6e3238d9c1dce5
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2011
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/rcupdate.h>
16 #include <linux/slab.h>
17 #include <linux/swapops.h>
18 #include <linux/sysctl.h>
19 #include <linux/ksm.h>
20 #include <linux/mman.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlb.h>
25 #include <asm/tlbflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/page-states.h>
29 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
30 pte_t *ptep, int nodat)
32 unsigned long opt, asce;
34 if (MACHINE_HAS_TLB_GUEST) {
35 opt = 0;
36 asce = READ_ONCE(mm->context.gmap_asce);
37 if (asce == 0UL || nodat)
38 opt |= IPTE_NODAT;
39 if (asce != -1UL) {
40 asce = asce ? : mm->context.asce;
41 opt |= IPTE_GUEST_ASCE;
43 __ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
44 } else {
45 __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
49 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
50 pte_t *ptep, int nodat)
52 unsigned long opt, asce;
54 if (MACHINE_HAS_TLB_GUEST) {
55 opt = 0;
56 asce = READ_ONCE(mm->context.gmap_asce);
57 if (asce == 0UL || nodat)
58 opt |= IPTE_NODAT;
59 if (asce != -1UL) {
60 asce = asce ? : mm->context.asce;
61 opt |= IPTE_GUEST_ASCE;
63 __ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
64 } else {
65 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
69 static inline pte_t ptep_flush_direct(struct mm_struct *mm,
70 unsigned long addr, pte_t *ptep,
71 int nodat)
73 pte_t old;
75 old = *ptep;
76 if (unlikely(pte_val(old) & _PAGE_INVALID))
77 return old;
78 atomic_inc(&mm->context.flush_count);
79 if (MACHINE_HAS_TLB_LC &&
80 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
81 ptep_ipte_local(mm, addr, ptep, nodat);
82 else
83 ptep_ipte_global(mm, addr, ptep, nodat);
84 atomic_dec(&mm->context.flush_count);
85 return old;
88 static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
89 unsigned long addr, pte_t *ptep,
90 int nodat)
92 pte_t old;
94 old = *ptep;
95 if (unlikely(pte_val(old) & _PAGE_INVALID))
96 return old;
97 atomic_inc(&mm->context.flush_count);
98 if (cpumask_equal(&mm->context.cpu_attach_mask,
99 cpumask_of(smp_processor_id()))) {
100 pte_val(*ptep) |= _PAGE_INVALID;
101 mm->context.flush_mm = 1;
102 } else
103 ptep_ipte_global(mm, addr, ptep, nodat);
104 atomic_dec(&mm->context.flush_count);
105 return old;
108 static inline pgste_t pgste_get_lock(pte_t *ptep)
110 unsigned long new = 0;
111 #ifdef CONFIG_PGSTE
112 unsigned long old;
114 asm(
115 " lg %0,%2\n"
116 "0: lgr %1,%0\n"
117 " nihh %0,0xff7f\n" /* clear PCL bit in old */
118 " oihh %1,0x0080\n" /* set PCL bit in new */
119 " csg %0,%1,%2\n"
120 " jl 0b\n"
121 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
122 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
123 #endif
124 return __pgste(new);
127 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
129 #ifdef CONFIG_PGSTE
130 asm(
131 " nihh %1,0xff7f\n" /* clear PCL bit */
132 " stg %1,%0\n"
133 : "=Q" (ptep[PTRS_PER_PTE])
134 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
135 : "cc", "memory");
136 #endif
139 static inline pgste_t pgste_get(pte_t *ptep)
141 unsigned long pgste = 0;
142 #ifdef CONFIG_PGSTE
143 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
144 #endif
145 return __pgste(pgste);
148 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
150 #ifdef CONFIG_PGSTE
151 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
152 #endif
155 static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
156 struct mm_struct *mm)
158 #ifdef CONFIG_PGSTE
159 unsigned long address, bits, skey;
161 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
162 return pgste;
163 address = pte_val(pte) & PAGE_MASK;
164 skey = (unsigned long) page_get_storage_key(address);
165 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
166 /* Transfer page changed & referenced bit to guest bits in pgste */
167 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
168 /* Copy page access key and fetch protection bit to pgste */
169 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
170 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
171 #endif
172 return pgste;
176 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
177 struct mm_struct *mm)
179 #ifdef CONFIG_PGSTE
180 unsigned long address;
181 unsigned long nkey;
183 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
184 return;
185 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
186 address = pte_val(entry) & PAGE_MASK;
188 * Set page access key and fetch protection bit from pgste.
189 * The guest C/R information is still in the PGSTE, set real
190 * key C/R to 0.
192 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
193 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
194 page_set_storage_key(address, nkey, 0);
195 #endif
198 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
200 #ifdef CONFIG_PGSTE
201 if ((pte_val(entry) & _PAGE_PRESENT) &&
202 (pte_val(entry) & _PAGE_WRITE) &&
203 !(pte_val(entry) & _PAGE_INVALID)) {
204 if (!MACHINE_HAS_ESOP) {
206 * Without enhanced suppression-on-protection force
207 * the dirty bit on for all writable ptes.
209 pte_val(entry) |= _PAGE_DIRTY;
210 pte_val(entry) &= ~_PAGE_PROTECT;
212 if (!(pte_val(entry) & _PAGE_PROTECT))
213 /* This pte allows write access, set user-dirty */
214 pgste_val(pgste) |= PGSTE_UC_BIT;
216 #endif
217 *ptep = entry;
218 return pgste;
221 static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
222 unsigned long addr,
223 pte_t *ptep, pgste_t pgste)
225 #ifdef CONFIG_PGSTE
226 unsigned long bits;
228 bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
229 if (bits) {
230 pgste_val(pgste) ^= bits;
231 ptep_notify(mm, addr, ptep, bits);
233 #endif
234 return pgste;
237 static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
238 unsigned long addr, pte_t *ptep)
240 pgste_t pgste = __pgste(0);
242 if (mm_has_pgste(mm)) {
243 pgste = pgste_get_lock(ptep);
244 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
246 return pgste;
249 static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
250 unsigned long addr, pte_t *ptep,
251 pgste_t pgste, pte_t old, pte_t new)
253 if (mm_has_pgste(mm)) {
254 if (pte_val(old) & _PAGE_INVALID)
255 pgste_set_key(ptep, pgste, new, mm);
256 if (pte_val(new) & _PAGE_INVALID) {
257 pgste = pgste_update_all(old, pgste, mm);
258 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
259 _PGSTE_GPS_USAGE_UNUSED)
260 pte_val(old) |= _PAGE_UNUSED;
262 pgste = pgste_set_pte(ptep, pgste, new);
263 pgste_set_unlock(ptep, pgste);
264 } else {
265 *ptep = new;
267 return old;
270 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
271 pte_t *ptep, pte_t new)
273 pgste_t pgste;
274 pte_t old;
275 int nodat;
277 preempt_disable();
278 pgste = ptep_xchg_start(mm, addr, ptep);
279 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
280 old = ptep_flush_direct(mm, addr, ptep, nodat);
281 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
282 preempt_enable();
283 return old;
285 EXPORT_SYMBOL(ptep_xchg_direct);
287 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
288 pte_t *ptep, pte_t new)
290 pgste_t pgste;
291 pte_t old;
292 int nodat;
294 preempt_disable();
295 pgste = ptep_xchg_start(mm, addr, ptep);
296 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
297 old = ptep_flush_lazy(mm, addr, ptep, nodat);
298 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
299 preempt_enable();
300 return old;
302 EXPORT_SYMBOL(ptep_xchg_lazy);
304 pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
305 pte_t *ptep)
307 pgste_t pgste;
308 pte_t old;
309 int nodat;
311 preempt_disable();
312 pgste = ptep_xchg_start(mm, addr, ptep);
313 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
314 old = ptep_flush_lazy(mm, addr, ptep, nodat);
315 if (mm_has_pgste(mm)) {
316 pgste = pgste_update_all(old, pgste, mm);
317 pgste_set(ptep, pgste);
319 return old;
321 EXPORT_SYMBOL(ptep_modify_prot_start);
323 void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
324 pte_t *ptep, pte_t pte)
326 pgste_t pgste;
328 if (!MACHINE_HAS_NX)
329 pte_val(pte) &= ~_PAGE_NOEXEC;
330 if (mm_has_pgste(mm)) {
331 pgste = pgste_get(ptep);
332 pgste_set_key(ptep, pgste, pte, mm);
333 pgste = pgste_set_pte(ptep, pgste, pte);
334 pgste_set_unlock(ptep, pgste);
335 } else {
336 *ptep = pte;
338 preempt_enable();
340 EXPORT_SYMBOL(ptep_modify_prot_commit);
342 static inline void pmdp_idte_local(struct mm_struct *mm,
343 unsigned long addr, pmd_t *pmdp)
345 if (MACHINE_HAS_TLB_GUEST)
346 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
347 mm->context.asce, IDTE_LOCAL);
348 else
349 __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
350 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
351 gmap_pmdp_idte_local(mm, addr);
354 static inline void pmdp_idte_global(struct mm_struct *mm,
355 unsigned long addr, pmd_t *pmdp)
357 if (MACHINE_HAS_TLB_GUEST) {
358 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
359 mm->context.asce, IDTE_GLOBAL);
360 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
361 gmap_pmdp_idte_global(mm, addr);
362 } else if (MACHINE_HAS_IDTE) {
363 __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
364 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
365 gmap_pmdp_idte_global(mm, addr);
366 } else {
367 __pmdp_csp(pmdp);
368 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
369 gmap_pmdp_csp(mm, addr);
373 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
374 unsigned long addr, pmd_t *pmdp)
376 pmd_t old;
378 old = *pmdp;
379 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
380 return old;
381 atomic_inc(&mm->context.flush_count);
382 if (MACHINE_HAS_TLB_LC &&
383 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
384 pmdp_idte_local(mm, addr, pmdp);
385 else
386 pmdp_idte_global(mm, addr, pmdp);
387 atomic_dec(&mm->context.flush_count);
388 return old;
391 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
392 unsigned long addr, pmd_t *pmdp)
394 pmd_t old;
396 old = *pmdp;
397 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
398 return old;
399 atomic_inc(&mm->context.flush_count);
400 if (cpumask_equal(&mm->context.cpu_attach_mask,
401 cpumask_of(smp_processor_id()))) {
402 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
403 mm->context.flush_mm = 1;
404 if (mm_has_pgste(mm))
405 gmap_pmdp_invalidate(mm, addr);
406 } else {
407 pmdp_idte_global(mm, addr, pmdp);
409 atomic_dec(&mm->context.flush_count);
410 return old;
413 static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
415 pgd_t *pgd;
416 p4d_t *p4d;
417 pud_t *pud;
418 pmd_t *pmd;
420 pgd = pgd_offset(mm, addr);
421 p4d = p4d_alloc(mm, pgd, addr);
422 if (!p4d)
423 return NULL;
424 pud = pud_alloc(mm, p4d, addr);
425 if (!pud)
426 return NULL;
427 pmd = pmd_alloc(mm, pud, addr);
428 return pmd;
431 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
432 pmd_t *pmdp, pmd_t new)
434 pmd_t old;
436 preempt_disable();
437 old = pmdp_flush_direct(mm, addr, pmdp);
438 *pmdp = new;
439 preempt_enable();
440 return old;
442 EXPORT_SYMBOL(pmdp_xchg_direct);
444 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
445 pmd_t *pmdp, pmd_t new)
447 pmd_t old;
449 preempt_disable();
450 old = pmdp_flush_lazy(mm, addr, pmdp);
451 *pmdp = new;
452 preempt_enable();
453 return old;
455 EXPORT_SYMBOL(pmdp_xchg_lazy);
457 static inline void pudp_idte_local(struct mm_struct *mm,
458 unsigned long addr, pud_t *pudp)
460 if (MACHINE_HAS_TLB_GUEST)
461 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
462 mm->context.asce, IDTE_LOCAL);
463 else
464 __pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
467 static inline void pudp_idte_global(struct mm_struct *mm,
468 unsigned long addr, pud_t *pudp)
470 if (MACHINE_HAS_TLB_GUEST)
471 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
472 mm->context.asce, IDTE_GLOBAL);
473 else if (MACHINE_HAS_IDTE)
474 __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
475 else
477 * Invalid bit position is the same for pmd and pud, so we can
478 * re-use _pmd_csp() here
480 __pmdp_csp((pmd_t *) pudp);
483 static inline pud_t pudp_flush_direct(struct mm_struct *mm,
484 unsigned long addr, pud_t *pudp)
486 pud_t old;
488 old = *pudp;
489 if (pud_val(old) & _REGION_ENTRY_INVALID)
490 return old;
491 atomic_inc(&mm->context.flush_count);
492 if (MACHINE_HAS_TLB_LC &&
493 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
494 pudp_idte_local(mm, addr, pudp);
495 else
496 pudp_idte_global(mm, addr, pudp);
497 atomic_dec(&mm->context.flush_count);
498 return old;
501 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
502 pud_t *pudp, pud_t new)
504 pud_t old;
506 preempt_disable();
507 old = pudp_flush_direct(mm, addr, pudp);
508 *pudp = new;
509 preempt_enable();
510 return old;
512 EXPORT_SYMBOL(pudp_xchg_direct);
514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
515 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
516 pgtable_t pgtable)
518 struct list_head *lh = (struct list_head *) pgtable;
520 assert_spin_locked(pmd_lockptr(mm, pmdp));
522 /* FIFO */
523 if (!pmd_huge_pte(mm, pmdp))
524 INIT_LIST_HEAD(lh);
525 else
526 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
527 pmd_huge_pte(mm, pmdp) = pgtable;
530 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
532 struct list_head *lh;
533 pgtable_t pgtable;
534 pte_t *ptep;
536 assert_spin_locked(pmd_lockptr(mm, pmdp));
538 /* FIFO */
539 pgtable = pmd_huge_pte(mm, pmdp);
540 lh = (struct list_head *) pgtable;
541 if (list_empty(lh))
542 pmd_huge_pte(mm, pmdp) = NULL;
543 else {
544 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
545 list_del(lh);
547 ptep = (pte_t *) pgtable;
548 pte_val(*ptep) = _PAGE_INVALID;
549 ptep++;
550 pte_val(*ptep) = _PAGE_INVALID;
551 return pgtable;
553 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
555 #ifdef CONFIG_PGSTE
556 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
557 pte_t *ptep, pte_t entry)
559 pgste_t pgste;
561 /* the mm_has_pgste() check is done in set_pte_at() */
562 preempt_disable();
563 pgste = pgste_get_lock(ptep);
564 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
565 pgste_set_key(ptep, pgste, entry, mm);
566 pgste = pgste_set_pte(ptep, pgste, entry);
567 pgste_set_unlock(ptep, pgste);
568 preempt_enable();
571 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
573 pgste_t pgste;
575 preempt_disable();
576 pgste = pgste_get_lock(ptep);
577 pgste_val(pgste) |= PGSTE_IN_BIT;
578 pgste_set_unlock(ptep, pgste);
579 preempt_enable();
583 * ptep_force_prot - change access rights of a locked pte
584 * @mm: pointer to the process mm_struct
585 * @addr: virtual address in the guest address space
586 * @ptep: pointer to the page table entry
587 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
588 * @bit: pgste bit to set (e.g. for notification)
590 * Returns 0 if the access rights were changed and -EAGAIN if the current
591 * and requested access rights are incompatible.
593 int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
594 pte_t *ptep, int prot, unsigned long bit)
596 pte_t entry;
597 pgste_t pgste;
598 int pte_i, pte_p, nodat;
600 pgste = pgste_get_lock(ptep);
601 entry = *ptep;
602 /* Check pte entry after all locks have been acquired */
603 pte_i = pte_val(entry) & _PAGE_INVALID;
604 pte_p = pte_val(entry) & _PAGE_PROTECT;
605 if ((pte_i && (prot != PROT_NONE)) ||
606 (pte_p && (prot & PROT_WRITE))) {
607 pgste_set_unlock(ptep, pgste);
608 return -EAGAIN;
610 /* Change access rights and set pgste bit */
611 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
612 if (prot == PROT_NONE && !pte_i) {
613 ptep_flush_direct(mm, addr, ptep, nodat);
614 pgste = pgste_update_all(entry, pgste, mm);
615 pte_val(entry) |= _PAGE_INVALID;
617 if (prot == PROT_READ && !pte_p) {
618 ptep_flush_direct(mm, addr, ptep, nodat);
619 pte_val(entry) &= ~_PAGE_INVALID;
620 pte_val(entry) |= _PAGE_PROTECT;
622 pgste_val(pgste) |= bit;
623 pgste = pgste_set_pte(ptep, pgste, entry);
624 pgste_set_unlock(ptep, pgste);
625 return 0;
628 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
629 pte_t *sptep, pte_t *tptep, pte_t pte)
631 pgste_t spgste, tpgste;
632 pte_t spte, tpte;
633 int rc = -EAGAIN;
635 if (!(pte_val(*tptep) & _PAGE_INVALID))
636 return 0; /* already shadowed */
637 spgste = pgste_get_lock(sptep);
638 spte = *sptep;
639 if (!(pte_val(spte) & _PAGE_INVALID) &&
640 !((pte_val(spte) & _PAGE_PROTECT) &&
641 !(pte_val(pte) & _PAGE_PROTECT))) {
642 pgste_val(spgste) |= PGSTE_VSIE_BIT;
643 tpgste = pgste_get_lock(tptep);
644 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
645 (pte_val(pte) & _PAGE_PROTECT);
646 /* don't touch the storage key - it belongs to parent pgste */
647 tpgste = pgste_set_pte(tptep, tpgste, tpte);
648 pgste_set_unlock(tptep, tpgste);
649 rc = 1;
651 pgste_set_unlock(sptep, spgste);
652 return rc;
655 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
657 pgste_t pgste;
658 int nodat;
660 pgste = pgste_get_lock(ptep);
661 /* notifier is called by the caller */
662 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
663 ptep_flush_direct(mm, saddr, ptep, nodat);
664 /* don't touch the storage key - it belongs to parent pgste */
665 pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
666 pgste_set_unlock(ptep, pgste);
669 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
671 if (!non_swap_entry(entry))
672 dec_mm_counter(mm, MM_SWAPENTS);
673 else if (is_migration_entry(entry)) {
674 struct page *page = migration_entry_to_page(entry);
676 dec_mm_counter(mm, mm_counter(page));
678 free_swap_and_cache(entry);
681 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
682 pte_t *ptep, int reset)
684 unsigned long pgstev;
685 pgste_t pgste;
686 pte_t pte;
688 /* Zap unused and logically-zero pages */
689 preempt_disable();
690 pgste = pgste_get_lock(ptep);
691 pgstev = pgste_val(pgste);
692 pte = *ptep;
693 if (!reset && pte_swap(pte) &&
694 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
695 (pgstev & _PGSTE_GPS_ZERO))) {
696 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
697 pte_clear(mm, addr, ptep);
699 if (reset)
700 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
701 pgste_set_unlock(ptep, pgste);
702 preempt_enable();
705 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
707 unsigned long ptev;
708 pgste_t pgste;
710 /* Clear storage key ACC and F, but set R/C */
711 preempt_disable();
712 pgste = pgste_get_lock(ptep);
713 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
714 pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
715 ptev = pte_val(*ptep);
716 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
717 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
718 pgste_set_unlock(ptep, pgste);
719 preempt_enable();
723 * Test and reset if a guest page is dirty
725 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
726 pte_t *ptep)
728 pgste_t pgste;
729 pte_t pte;
730 bool dirty;
731 int nodat;
733 pgste = pgste_get_lock(ptep);
734 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
735 pgste_val(pgste) &= ~PGSTE_UC_BIT;
736 pte = *ptep;
737 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
738 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
739 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
740 ptep_ipte_global(mm, addr, ptep, nodat);
741 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
742 pte_val(pte) |= _PAGE_PROTECT;
743 else
744 pte_val(pte) |= _PAGE_INVALID;
745 *ptep = pte;
747 pgste_set_unlock(ptep, pgste);
748 return dirty;
750 EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
752 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
753 unsigned char key, bool nq)
755 unsigned long keyul, paddr;
756 spinlock_t *ptl;
757 pgste_t old, new;
758 pmd_t *pmdp;
759 pte_t *ptep;
761 pmdp = pmd_alloc_map(mm, addr);
762 if (unlikely(!pmdp))
763 return -EFAULT;
765 ptl = pmd_lock(mm, pmdp);
766 if (!pmd_present(*pmdp)) {
767 spin_unlock(ptl);
768 return -EFAULT;
771 if (pmd_large(*pmdp)) {
772 paddr = pmd_val(*pmdp) & HPAGE_MASK;
773 paddr |= addr & ~HPAGE_MASK;
775 * Huge pmds need quiescing operations, they are
776 * always mapped.
778 page_set_storage_key(paddr, key, 1);
779 spin_unlock(ptl);
780 return 0;
782 spin_unlock(ptl);
784 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
785 if (unlikely(!ptep))
786 return -EFAULT;
788 new = old = pgste_get_lock(ptep);
789 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
790 PGSTE_ACC_BITS | PGSTE_FP_BIT);
791 keyul = (unsigned long) key;
792 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
793 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
794 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
795 unsigned long bits, skey;
797 paddr = pte_val(*ptep) & PAGE_MASK;
798 skey = (unsigned long) page_get_storage_key(paddr);
799 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
800 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
801 /* Set storage key ACC and FP */
802 page_set_storage_key(paddr, skey, !nq);
803 /* Merge host changed & referenced into pgste */
804 pgste_val(new) |= bits << 52;
806 /* changing the guest storage key is considered a change of the page */
807 if ((pgste_val(new) ^ pgste_val(old)) &
808 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
809 pgste_val(new) |= PGSTE_UC_BIT;
811 pgste_set_unlock(ptep, new);
812 pte_unmap_unlock(ptep, ptl);
813 return 0;
815 EXPORT_SYMBOL(set_guest_storage_key);
818 * Conditionally set a guest storage key (handling csske).
819 * oldkey will be updated when either mr or mc is set and a pointer is given.
821 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
822 * storage key was updated and -EFAULT on access errors.
824 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
825 unsigned char key, unsigned char *oldkey,
826 bool nq, bool mr, bool mc)
828 unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
829 int rc;
831 /* we can drop the pgste lock between getting and setting the key */
832 if (mr | mc) {
833 rc = get_guest_storage_key(current->mm, addr, &tmp);
834 if (rc)
835 return rc;
836 if (oldkey)
837 *oldkey = tmp;
838 if (!mr)
839 mask |= _PAGE_REFERENCED;
840 if (!mc)
841 mask |= _PAGE_CHANGED;
842 if (!((tmp ^ key) & mask))
843 return 0;
845 rc = set_guest_storage_key(current->mm, addr, key, nq);
846 return rc < 0 ? rc : 1;
848 EXPORT_SYMBOL(cond_set_guest_storage_key);
851 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
853 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
855 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
857 spinlock_t *ptl;
858 unsigned long paddr;
859 pgste_t old, new;
860 pmd_t *pmdp;
861 pte_t *ptep;
862 int cc = 0;
864 pmdp = pmd_alloc_map(mm, addr);
865 if (unlikely(!pmdp))
866 return -EFAULT;
868 ptl = pmd_lock(mm, pmdp);
869 if (!pmd_present(*pmdp)) {
870 spin_unlock(ptl);
871 return -EFAULT;
874 if (pmd_large(*pmdp)) {
875 paddr = pmd_val(*pmdp) & HPAGE_MASK;
876 paddr |= addr & ~HPAGE_MASK;
877 cc = page_reset_referenced(paddr);
878 spin_unlock(ptl);
879 return cc;
881 spin_unlock(ptl);
883 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
884 if (unlikely(!ptep))
885 return -EFAULT;
887 new = old = pgste_get_lock(ptep);
888 /* Reset guest reference bit only */
889 pgste_val(new) &= ~PGSTE_GR_BIT;
891 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
892 paddr = pte_val(*ptep) & PAGE_MASK;
893 cc = page_reset_referenced(paddr);
894 /* Merge real referenced bit into host-set */
895 pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
897 /* Reflect guest's logical view, not physical */
898 cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
899 /* Changing the guest storage key is considered a change of the page */
900 if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
901 pgste_val(new) |= PGSTE_UC_BIT;
903 pgste_set_unlock(ptep, new);
904 pte_unmap_unlock(ptep, ptl);
905 return cc;
907 EXPORT_SYMBOL(reset_guest_reference_bit);
909 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
910 unsigned char *key)
912 unsigned long paddr;
913 spinlock_t *ptl;
914 pgste_t pgste;
915 pmd_t *pmdp;
916 pte_t *ptep;
918 pmdp = pmd_alloc_map(mm, addr);
919 if (unlikely(!pmdp))
920 return -EFAULT;
922 ptl = pmd_lock(mm, pmdp);
923 if (!pmd_present(*pmdp)) {
924 /* Not yet mapped memory has a zero key */
925 spin_unlock(ptl);
926 *key = 0;
927 return 0;
930 if (pmd_large(*pmdp)) {
931 paddr = pmd_val(*pmdp) & HPAGE_MASK;
932 paddr |= addr & ~HPAGE_MASK;
933 *key = page_get_storage_key(paddr);
934 spin_unlock(ptl);
935 return 0;
937 spin_unlock(ptl);
939 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
940 if (unlikely(!ptep))
941 return -EFAULT;
943 pgste = pgste_get_lock(ptep);
944 *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
945 paddr = pte_val(*ptep) & PAGE_MASK;
946 if (!(pte_val(*ptep) & _PAGE_INVALID))
947 *key = page_get_storage_key(paddr);
948 /* Reflect guest's logical view, not physical */
949 *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
950 pgste_set_unlock(ptep, pgste);
951 pte_unmap_unlock(ptep, ptl);
952 return 0;
954 EXPORT_SYMBOL(get_guest_storage_key);
957 * pgste_perform_essa - perform ESSA actions on the PGSTE.
958 * @mm: the memory context. It must have PGSTEs, no check is performed here!
959 * @hva: the host virtual address of the page whose PGSTE is to be processed
960 * @orc: the specific action to perform, see the ESSA_SET_* macros.
961 * @oldpte: the PTE will be saved there if the pointer is not NULL.
962 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
964 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
965 * or < 0 in case of error. -EINVAL is returned for invalid values
966 * of orc, -EFAULT for invalid addresses.
968 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
969 unsigned long *oldpte, unsigned long *oldpgste)
971 unsigned long pgstev;
972 spinlock_t *ptl;
973 pgste_t pgste;
974 pte_t *ptep;
975 int res = 0;
977 WARN_ON_ONCE(orc > ESSA_MAX);
978 if (unlikely(orc > ESSA_MAX))
979 return -EINVAL;
980 ptep = get_locked_pte(mm, hva, &ptl);
981 if (unlikely(!ptep))
982 return -EFAULT;
983 pgste = pgste_get_lock(ptep);
984 pgstev = pgste_val(pgste);
985 if (oldpte)
986 *oldpte = pte_val(*ptep);
987 if (oldpgste)
988 *oldpgste = pgstev;
990 switch (orc) {
991 case ESSA_GET_STATE:
992 break;
993 case ESSA_SET_STABLE:
994 pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
995 pgstev |= _PGSTE_GPS_USAGE_STABLE;
996 break;
997 case ESSA_SET_UNUSED:
998 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
999 pgstev |= _PGSTE_GPS_USAGE_UNUSED;
1000 if (pte_val(*ptep) & _PAGE_INVALID)
1001 res = 1;
1002 break;
1003 case ESSA_SET_VOLATILE:
1004 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1005 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1006 if (pte_val(*ptep) & _PAGE_INVALID)
1007 res = 1;
1008 break;
1009 case ESSA_SET_POT_VOLATILE:
1010 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1011 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1012 pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
1013 break;
1015 if (pgstev & _PGSTE_GPS_ZERO) {
1016 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1017 break;
1019 if (!(pgstev & PGSTE_GC_BIT)) {
1020 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1021 res = 1;
1022 break;
1024 break;
1025 case ESSA_SET_STABLE_RESIDENT:
1026 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1027 pgstev |= _PGSTE_GPS_USAGE_STABLE;
1029 * Since the resident state can go away any time after this
1030 * call, we will not make this page resident. We can revisit
1031 * this decision if a guest will ever start using this.
1033 break;
1034 case ESSA_SET_STABLE_IF_RESIDENT:
1035 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1036 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1037 pgstev |= _PGSTE_GPS_USAGE_STABLE;
1039 break;
1040 case ESSA_SET_STABLE_NODAT:
1041 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1042 pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
1043 break;
1044 default:
1045 /* we should never get here! */
1046 break;
1048 /* If we are discarding a page, set it to logical zero */
1049 if (res)
1050 pgstev |= _PGSTE_GPS_ZERO;
1052 pgste_val(pgste) = pgstev;
1053 pgste_set_unlock(ptep, pgste);
1054 pte_unmap_unlock(ptep, ptl);
1055 return res;
1057 EXPORT_SYMBOL(pgste_perform_essa);
1060 * set_pgste_bits - set specific PGSTE bits.
1061 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1062 * @hva: the host virtual address of the page whose PGSTE is to be processed
1063 * @bits: a bitmask representing the bits that will be touched
1064 * @value: the values of the bits to be written. Only the bits in the mask
1065 * will be written.
1067 * Return: 0 on success, < 0 in case of error.
1069 int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
1070 unsigned long bits, unsigned long value)
1072 spinlock_t *ptl;
1073 pgste_t new;
1074 pte_t *ptep;
1076 ptep = get_locked_pte(mm, hva, &ptl);
1077 if (unlikely(!ptep))
1078 return -EFAULT;
1079 new = pgste_get_lock(ptep);
1081 pgste_val(new) &= ~bits;
1082 pgste_val(new) |= value & bits;
1084 pgste_set_unlock(ptep, new);
1085 pte_unmap_unlock(ptep, ptl);
1086 return 0;
1088 EXPORT_SYMBOL(set_pgste_bits);
1091 * get_pgste - get the current PGSTE for the given address.
1092 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1093 * @hva: the host virtual address of the page whose PGSTE is to be processed
1094 * @pgstep: will be written with the current PGSTE for the given address.
1096 * Return: 0 on success, < 0 in case of error.
1098 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
1100 spinlock_t *ptl;
1101 pte_t *ptep;
1103 ptep = get_locked_pte(mm, hva, &ptl);
1104 if (unlikely(!ptep))
1105 return -EFAULT;
1106 *pgstep = pgste_val(pgste_get(ptep));
1107 pte_unmap_unlock(ptep, ptl);
1108 return 0;
1110 EXPORT_SYMBOL(get_pgste);
1111 #endif