1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_H
3 #define _ASM_X86_PGTABLE_H
5 #include <linux/mem_encrypt.h>
7 #include <asm/pgtable_types.h>
10 * Macro to mark a page protection value as UC-
12 #define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
19 * Macros to add or remove encryption attribute
21 #define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
22 #define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
25 #include <asm/x86_init.h>
27 extern pgd_t early_top_pgt
[PTRS_PER_PGD
];
28 int __init
__early_make_pgtable(unsigned long address
, pmdval_t pmd
);
30 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
);
31 void ptdump_walk_pgd_level_debugfs(struct seq_file
*m
, pgd_t
*pgd
, bool user
);
32 void ptdump_walk_pgd_level_checkwx(void);
33 void ptdump_walk_user_pgd_level_checkwx(void);
35 #ifdef CONFIG_DEBUG_WX
36 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
37 #define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx()
39 #define debug_checkwx() do { } while (0)
40 #define debug_checkwx_user() do { } while (0)
44 * ZERO_PAGE is a global shared page that is always zero: used
45 * for zero-mapped memory areas etc..
47 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)]
49 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
51 extern spinlock_t pgd_lock
;
52 extern struct list_head pgd_list
;
54 extern struct mm_struct
*pgd_page_get_mm(struct page
*page
);
56 extern pmdval_t early_pmd_flags
;
58 #ifdef CONFIG_PARAVIRT
59 #include <asm/paravirt.h>
60 #else /* !CONFIG_PARAVIRT */
61 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
62 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
64 #define set_pte_atomic(ptep, pte) \
65 native_set_pte_atomic(ptep, pte)
67 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
69 #ifndef __PAGETABLE_P4D_FOLDED
70 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
71 #define pgd_clear(pgd) (pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
75 # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
78 #ifndef __PAGETABLE_PUD_FOLDED
79 #define p4d_clear(p4d) native_p4d_clear(p4d)
83 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
86 #ifndef __PAGETABLE_PUD_FOLDED
87 #define pud_clear(pud) native_pud_clear(pud)
90 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
91 #define pmd_clear(pmd) native_pmd_clear(pmd)
93 #define pgd_val(x) native_pgd_val(x)
94 #define __pgd(x) native_make_pgd(x)
96 #ifndef __PAGETABLE_P4D_FOLDED
97 #define p4d_val(x) native_p4d_val(x)
98 #define __p4d(x) native_make_p4d(x)
101 #ifndef __PAGETABLE_PUD_FOLDED
102 #define pud_val(x) native_pud_val(x)
103 #define __pud(x) native_make_pud(x)
106 #ifndef __PAGETABLE_PMD_FOLDED
107 #define pmd_val(x) native_pmd_val(x)
108 #define __pmd(x) native_make_pmd(x)
111 #define pte_val(x) native_pte_val(x)
112 #define __pte(x) native_make_pte(x)
114 #define arch_end_context_switch(prev) do {} while(0)
116 #endif /* CONFIG_PARAVIRT */
119 * The following only work if pte_present() is true.
120 * Undefined behaviour if not..
122 static inline int pte_dirty(pte_t pte
)
124 return pte_flags(pte
) & _PAGE_DIRTY
;
128 static inline u32
read_pkru(void)
130 if (boot_cpu_has(X86_FEATURE_OSPKE
))
131 return __read_pkru();
135 static inline void write_pkru(u32 pkru
)
137 if (boot_cpu_has(X86_FEATURE_OSPKE
))
141 static inline int pte_young(pte_t pte
)
143 return pte_flags(pte
) & _PAGE_ACCESSED
;
146 static inline int pmd_dirty(pmd_t pmd
)
148 return pmd_flags(pmd
) & _PAGE_DIRTY
;
151 static inline int pmd_young(pmd_t pmd
)
153 return pmd_flags(pmd
) & _PAGE_ACCESSED
;
156 static inline int pud_dirty(pud_t pud
)
158 return pud_flags(pud
) & _PAGE_DIRTY
;
161 static inline int pud_young(pud_t pud
)
163 return pud_flags(pud
) & _PAGE_ACCESSED
;
166 static inline int pte_write(pte_t pte
)
168 return pte_flags(pte
) & _PAGE_RW
;
171 static inline int pte_huge(pte_t pte
)
173 return pte_flags(pte
) & _PAGE_PSE
;
176 static inline int pte_global(pte_t pte
)
178 return pte_flags(pte
) & _PAGE_GLOBAL
;
181 static inline int pte_exec(pte_t pte
)
183 return !(pte_flags(pte
) & _PAGE_NX
);
186 static inline int pte_special(pte_t pte
)
188 return pte_flags(pte
) & _PAGE_SPECIAL
;
191 /* Entries that were set to PROT_NONE are inverted */
193 static inline u64
protnone_mask(u64 val
);
195 static inline unsigned long pte_pfn(pte_t pte
)
197 phys_addr_t pfn
= pte_val(pte
);
198 pfn
^= protnone_mask(pfn
);
199 return (pfn
& PTE_PFN_MASK
) >> PAGE_SHIFT
;
202 static inline unsigned long pmd_pfn(pmd_t pmd
)
204 phys_addr_t pfn
= pmd_val(pmd
);
205 pfn
^= protnone_mask(pfn
);
206 return (pfn
& pmd_pfn_mask(pmd
)) >> PAGE_SHIFT
;
209 static inline unsigned long pud_pfn(pud_t pud
)
211 phys_addr_t pfn
= pud_val(pud
);
212 pfn
^= protnone_mask(pfn
);
213 return (pfn
& pud_pfn_mask(pud
)) >> PAGE_SHIFT
;
216 static inline unsigned long p4d_pfn(p4d_t p4d
)
218 return (p4d_val(p4d
) & p4d_pfn_mask(p4d
)) >> PAGE_SHIFT
;
221 static inline unsigned long pgd_pfn(pgd_t pgd
)
223 return (pgd_val(pgd
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
226 static inline int p4d_large(p4d_t p4d
)
228 /* No 512 GiB pages yet */
232 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
234 static inline int pmd_large(pmd_t pte
)
236 return pmd_flags(pte
) & _PAGE_PSE
;
239 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
240 static inline int pmd_trans_huge(pmd_t pmd
)
242 return (pmd_val(pmd
) & (_PAGE_PSE
|_PAGE_DEVMAP
)) == _PAGE_PSE
;
245 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
246 static inline int pud_trans_huge(pud_t pud
)
248 return (pud_val(pud
) & (_PAGE_PSE
|_PAGE_DEVMAP
)) == _PAGE_PSE
;
252 #define has_transparent_hugepage has_transparent_hugepage
253 static inline int has_transparent_hugepage(void)
255 return boot_cpu_has(X86_FEATURE_PSE
);
258 #ifdef __HAVE_ARCH_PTE_DEVMAP
259 static inline int pmd_devmap(pmd_t pmd
)
261 return !!(pmd_val(pmd
) & _PAGE_DEVMAP
);
264 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
265 static inline int pud_devmap(pud_t pud
)
267 return !!(pud_val(pud
) & _PAGE_DEVMAP
);
270 static inline int pud_devmap(pud_t pud
)
276 static inline int pgd_devmap(pgd_t pgd
)
281 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
283 static inline pte_t
pte_set_flags(pte_t pte
, pteval_t set
)
285 pteval_t v
= native_pte_val(pte
);
287 return native_make_pte(v
| set
);
290 static inline pte_t
pte_clear_flags(pte_t pte
, pteval_t clear
)
292 pteval_t v
= native_pte_val(pte
);
294 return native_make_pte(v
& ~clear
);
297 static inline pte_t
pte_mkclean(pte_t pte
)
299 return pte_clear_flags(pte
, _PAGE_DIRTY
);
302 static inline pte_t
pte_mkold(pte_t pte
)
304 return pte_clear_flags(pte
, _PAGE_ACCESSED
);
307 static inline pte_t
pte_wrprotect(pte_t pte
)
309 return pte_clear_flags(pte
, _PAGE_RW
);
312 static inline pte_t
pte_mkexec(pte_t pte
)
314 return pte_clear_flags(pte
, _PAGE_NX
);
317 static inline pte_t
pte_mkdirty(pte_t pte
)
319 return pte_set_flags(pte
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
322 static inline pte_t
pte_mkyoung(pte_t pte
)
324 return pte_set_flags(pte
, _PAGE_ACCESSED
);
327 static inline pte_t
pte_mkwrite(pte_t pte
)
329 return pte_set_flags(pte
, _PAGE_RW
);
332 static inline pte_t
pte_mkhuge(pte_t pte
)
334 return pte_set_flags(pte
, _PAGE_PSE
);
337 static inline pte_t
pte_clrhuge(pte_t pte
)
339 return pte_clear_flags(pte
, _PAGE_PSE
);
342 static inline pte_t
pte_mkglobal(pte_t pte
)
344 return pte_set_flags(pte
, _PAGE_GLOBAL
);
347 static inline pte_t
pte_clrglobal(pte_t pte
)
349 return pte_clear_flags(pte
, _PAGE_GLOBAL
);
352 static inline pte_t
pte_mkspecial(pte_t pte
)
354 return pte_set_flags(pte
, _PAGE_SPECIAL
);
357 static inline pte_t
pte_mkdevmap(pte_t pte
)
359 return pte_set_flags(pte
, _PAGE_SPECIAL
|_PAGE_DEVMAP
);
362 static inline pmd_t
pmd_set_flags(pmd_t pmd
, pmdval_t set
)
364 pmdval_t v
= native_pmd_val(pmd
);
366 return native_make_pmd(v
| set
);
369 static inline pmd_t
pmd_clear_flags(pmd_t pmd
, pmdval_t clear
)
371 pmdval_t v
= native_pmd_val(pmd
);
373 return native_make_pmd(v
& ~clear
);
376 static inline pmd_t
pmd_mkold(pmd_t pmd
)
378 return pmd_clear_flags(pmd
, _PAGE_ACCESSED
);
381 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
383 return pmd_clear_flags(pmd
, _PAGE_DIRTY
);
386 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
388 return pmd_clear_flags(pmd
, _PAGE_RW
);
391 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
393 return pmd_set_flags(pmd
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
396 static inline pmd_t
pmd_mkdevmap(pmd_t pmd
)
398 return pmd_set_flags(pmd
, _PAGE_DEVMAP
);
401 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
403 return pmd_set_flags(pmd
, _PAGE_PSE
);
406 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
408 return pmd_set_flags(pmd
, _PAGE_ACCESSED
);
411 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
413 return pmd_set_flags(pmd
, _PAGE_RW
);
416 static inline pud_t
pud_set_flags(pud_t pud
, pudval_t set
)
418 pudval_t v
= native_pud_val(pud
);
420 return native_make_pud(v
| set
);
423 static inline pud_t
pud_clear_flags(pud_t pud
, pudval_t clear
)
425 pudval_t v
= native_pud_val(pud
);
427 return native_make_pud(v
& ~clear
);
430 static inline pud_t
pud_mkold(pud_t pud
)
432 return pud_clear_flags(pud
, _PAGE_ACCESSED
);
435 static inline pud_t
pud_mkclean(pud_t pud
)
437 return pud_clear_flags(pud
, _PAGE_DIRTY
);
440 static inline pud_t
pud_wrprotect(pud_t pud
)
442 return pud_clear_flags(pud
, _PAGE_RW
);
445 static inline pud_t
pud_mkdirty(pud_t pud
)
447 return pud_set_flags(pud
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
450 static inline pud_t
pud_mkdevmap(pud_t pud
)
452 return pud_set_flags(pud
, _PAGE_DEVMAP
);
455 static inline pud_t
pud_mkhuge(pud_t pud
)
457 return pud_set_flags(pud
, _PAGE_PSE
);
460 static inline pud_t
pud_mkyoung(pud_t pud
)
462 return pud_set_flags(pud
, _PAGE_ACCESSED
);
465 static inline pud_t
pud_mkwrite(pud_t pud
)
467 return pud_set_flags(pud
, _PAGE_RW
);
470 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
471 static inline int pte_soft_dirty(pte_t pte
)
473 return pte_flags(pte
) & _PAGE_SOFT_DIRTY
;
476 static inline int pmd_soft_dirty(pmd_t pmd
)
478 return pmd_flags(pmd
) & _PAGE_SOFT_DIRTY
;
481 static inline int pud_soft_dirty(pud_t pud
)
483 return pud_flags(pud
) & _PAGE_SOFT_DIRTY
;
486 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
488 return pte_set_flags(pte
, _PAGE_SOFT_DIRTY
);
491 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
493 return pmd_set_flags(pmd
, _PAGE_SOFT_DIRTY
);
496 static inline pud_t
pud_mksoft_dirty(pud_t pud
)
498 return pud_set_flags(pud
, _PAGE_SOFT_DIRTY
);
501 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
503 return pte_clear_flags(pte
, _PAGE_SOFT_DIRTY
);
506 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
508 return pmd_clear_flags(pmd
, _PAGE_SOFT_DIRTY
);
511 static inline pud_t
pud_clear_soft_dirty(pud_t pud
)
513 return pud_clear_flags(pud
, _PAGE_SOFT_DIRTY
);
516 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
519 * Mask out unsupported bits in a present pgprot. Non-present pgprots
520 * can use those bits for other purposes, so leave them be.
522 static inline pgprotval_t
massage_pgprot(pgprot_t pgprot
)
524 pgprotval_t protval
= pgprot_val(pgprot
);
526 if (protval
& _PAGE_PRESENT
)
527 protval
&= __supported_pte_mask
;
532 static inline pgprotval_t
check_pgprot(pgprot_t pgprot
)
534 pgprotval_t massaged_val
= massage_pgprot(pgprot
);
536 /* mmdebug.h can not be included here because of dependencies */
537 #ifdef CONFIG_DEBUG_VM
538 WARN_ONCE(pgprot_val(pgprot
) != massaged_val
,
539 "attempted to set unsupported pgprot: %016llx "
540 "bits: %016llx supported: %016llx\n",
541 (u64
)pgprot_val(pgprot
),
542 (u64
)pgprot_val(pgprot
) ^ massaged_val
,
543 (u64
)__supported_pte_mask
);
549 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
551 phys_addr_t pfn
= (phys_addr_t
)page_nr
<< PAGE_SHIFT
;
552 pfn
^= protnone_mask(pgprot_val(pgprot
));
554 return __pte(pfn
| check_pgprot(pgprot
));
557 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
559 phys_addr_t pfn
= (phys_addr_t
)page_nr
<< PAGE_SHIFT
;
560 pfn
^= protnone_mask(pgprot_val(pgprot
));
561 pfn
&= PHYSICAL_PMD_PAGE_MASK
;
562 return __pmd(pfn
| check_pgprot(pgprot
));
565 static inline pud_t
pfn_pud(unsigned long page_nr
, pgprot_t pgprot
)
567 phys_addr_t pfn
= (phys_addr_t
)page_nr
<< PAGE_SHIFT
;
568 pfn
^= protnone_mask(pgprot_val(pgprot
));
569 pfn
&= PHYSICAL_PUD_PAGE_MASK
;
570 return __pud(pfn
| check_pgprot(pgprot
));
573 static inline pmd_t
pmd_mknotpresent(pmd_t pmd
)
575 return pfn_pmd(pmd_pfn(pmd
),
576 __pgprot(pmd_flags(pmd
) & ~(_PAGE_PRESENT
|_PAGE_PROTNONE
)));
579 static inline pud_t
pud_mknotpresent(pud_t pud
)
581 return pfn_pud(pud_pfn(pud
),
582 __pgprot(pud_flags(pud
) & ~(_PAGE_PRESENT
|_PAGE_PROTNONE
)));
585 static inline u64
flip_protnone_guard(u64 oldval
, u64 val
, u64 mask
);
587 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
589 pteval_t val
= pte_val(pte
), oldval
= val
;
592 * Chop off the NX bit (if present), and add the NX portion of
593 * the newprot (if present):
595 val
&= _PAGE_CHG_MASK
;
596 val
|= check_pgprot(newprot
) & ~_PAGE_CHG_MASK
;
597 val
= flip_protnone_guard(oldval
, val
, PTE_PFN_MASK
);
601 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
603 pmdval_t val
= pmd_val(pmd
), oldval
= val
;
605 val
&= _HPAGE_CHG_MASK
;
606 val
|= check_pgprot(newprot
) & ~_HPAGE_CHG_MASK
;
607 val
= flip_protnone_guard(oldval
, val
, PHYSICAL_PMD_PAGE_MASK
);
611 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
612 #define pgprot_modify pgprot_modify
613 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
615 pgprotval_t preservebits
= pgprot_val(oldprot
) & _PAGE_CHG_MASK
;
616 pgprotval_t addbits
= pgprot_val(newprot
);
617 return __pgprot(preservebits
| addbits
);
620 #define pte_pgprot(x) __pgprot(pte_flags(x))
621 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
622 #define pud_pgprot(x) __pgprot(pud_flags(x))
623 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
625 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
627 static inline pgprot_t
arch_filter_pgprot(pgprot_t prot
)
629 return canon_pgprot(prot
);
632 static inline int is_new_memtype_allowed(u64 paddr
, unsigned long size
,
633 enum page_cache_mode pcm
,
634 enum page_cache_mode new_pcm
)
637 * PAT type is always WB for untracked ranges, so no need to check.
639 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ size
))
643 * Certain new memtypes are not allowed with certain
645 * - request is uncached, return cannot be write-back
646 * - request is write-combine, return cannot be write-back
647 * - request is write-through, return cannot be write-back
648 * - request is write-through, return cannot be write-combine
650 if ((pcm
== _PAGE_CACHE_MODE_UC_MINUS
&&
651 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
652 (pcm
== _PAGE_CACHE_MODE_WC
&&
653 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
654 (pcm
== _PAGE_CACHE_MODE_WT
&&
655 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
656 (pcm
== _PAGE_CACHE_MODE_WT
&&
657 new_pcm
== _PAGE_CACHE_MODE_WC
)) {
664 pmd_t
*populate_extra_pmd(unsigned long vaddr
);
665 pte_t
*populate_extra_pte(unsigned long vaddr
);
667 #ifdef CONFIG_PAGE_TABLE_ISOLATION
668 pgd_t
__pti_set_user_pgtbl(pgd_t
*pgdp
, pgd_t pgd
);
671 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
672 * Populates the user and returns the resulting PGD that must be set in
673 * the kernel copy of the page tables.
675 static inline pgd_t
pti_set_user_pgtbl(pgd_t
*pgdp
, pgd_t pgd
)
677 if (!static_cpu_has(X86_FEATURE_PTI
))
679 return __pti_set_user_pgtbl(pgdp
, pgd
);
681 #else /* CONFIG_PAGE_TABLE_ISOLATION */
682 static inline pgd_t
pti_set_user_pgtbl(pgd_t
*pgdp
, pgd_t pgd
)
686 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
688 #endif /* __ASSEMBLY__ */
692 # include <asm/pgtable_32.h>
694 # include <asm/pgtable_64.h>
698 #include <linux/mm_types.h>
699 #include <linux/mmdebug.h>
700 #include <linux/log2.h>
701 #include <asm/fixmap.h>
703 static inline int pte_none(pte_t pte
)
705 return !(pte
.pte
& ~(_PAGE_KNL_ERRATUM_MASK
));
708 #define __HAVE_ARCH_PTE_SAME
709 static inline int pte_same(pte_t a
, pte_t b
)
711 return a
.pte
== b
.pte
;
714 static inline int pte_present(pte_t a
)
716 return pte_flags(a
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
);
719 #ifdef __HAVE_ARCH_PTE_DEVMAP
720 static inline int pte_devmap(pte_t a
)
722 return (pte_flags(a
) & _PAGE_DEVMAP
) == _PAGE_DEVMAP
;
726 #define pte_accessible pte_accessible
727 static inline bool pte_accessible(struct mm_struct
*mm
, pte_t a
)
729 if (pte_flags(a
) & _PAGE_PRESENT
)
732 if ((pte_flags(a
) & _PAGE_PROTNONE
) &&
733 mm_tlb_flush_pending(mm
))
739 static inline int pmd_present(pmd_t pmd
)
742 * Checking for _PAGE_PSE is needed too because
743 * split_huge_page will temporarily clear the present bit (but
744 * the _PAGE_PSE flag will remain set at all times while the
745 * _PAGE_PRESENT bit is clear).
747 return pmd_flags(pmd
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
| _PAGE_PSE
);
750 #ifdef CONFIG_NUMA_BALANCING
752 * These work without NUMA balancing but the kernel does not care. See the
753 * comment in include/asm-generic/pgtable.h
755 static inline int pte_protnone(pte_t pte
)
757 return (pte_flags(pte
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
761 static inline int pmd_protnone(pmd_t pmd
)
763 return (pmd_flags(pmd
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
766 #endif /* CONFIG_NUMA_BALANCING */
768 static inline int pmd_none(pmd_t pmd
)
770 /* Only check low word on 32-bit platforms, since it might be
771 out of sync with upper half. */
772 unsigned long val
= native_pmd_val(pmd
);
773 return (val
& ~_PAGE_KNL_ERRATUM_MASK
) == 0;
776 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
778 return (unsigned long)__va(pmd_val(pmd
) & pmd_pfn_mask(pmd
));
782 * Currently stuck as a macro due to indirect forward reference to
783 * linux/mmzone.h's __section_mem_map_addr() definition:
785 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
788 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
790 * this macro returns the index of the entry in the pmd page which would
791 * control the given virtual address
793 static inline unsigned long pmd_index(unsigned long address
)
795 return (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
799 * Conversion functions: convert a page and protection to a page entry,
800 * and a page entry and page directory to the page they refer to.
802 * (Currently stuck as a macro because of indirect forward reference
803 * to linux/mm.h:page_to_nid())
805 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
808 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
810 * this function returns the index of the entry in the pte page which would
811 * control the given virtual address
813 static inline unsigned long pte_index(unsigned long address
)
815 return (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
818 static inline pte_t
*pte_offset_kernel(pmd_t
*pmd
, unsigned long address
)
820 return (pte_t
*)pmd_page_vaddr(*pmd
) + pte_index(address
);
823 static inline int pmd_bad(pmd_t pmd
)
825 return (pmd_flags(pmd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
828 static inline unsigned long pages_to_mb(unsigned long npg
)
830 return npg
>> (20 - PAGE_SHIFT
);
833 #if CONFIG_PGTABLE_LEVELS > 2
834 static inline int pud_none(pud_t pud
)
836 return (native_pud_val(pud
) & ~(_PAGE_KNL_ERRATUM_MASK
)) == 0;
839 static inline int pud_present(pud_t pud
)
841 return pud_flags(pud
) & _PAGE_PRESENT
;
844 static inline unsigned long pud_page_vaddr(pud_t pud
)
846 return (unsigned long)__va(pud_val(pud
) & pud_pfn_mask(pud
));
850 * Currently stuck as a macro due to indirect forward reference to
851 * linux/mmzone.h's __section_mem_map_addr() definition:
853 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
855 /* Find an entry in the second-level page table.. */
856 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
858 return (pmd_t
*)pud_page_vaddr(*pud
) + pmd_index(address
);
861 static inline int pud_large(pud_t pud
)
863 return (pud_val(pud
) & (_PAGE_PSE
| _PAGE_PRESENT
)) ==
864 (_PAGE_PSE
| _PAGE_PRESENT
);
867 static inline int pud_bad(pud_t pud
)
869 return (pud_flags(pud
) & ~(_KERNPG_TABLE
| _PAGE_USER
)) != 0;
872 static inline int pud_large(pud_t pud
)
876 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
878 static inline unsigned long pud_index(unsigned long address
)
880 return (address
>> PUD_SHIFT
) & (PTRS_PER_PUD
- 1);
883 #if CONFIG_PGTABLE_LEVELS > 3
884 static inline int p4d_none(p4d_t p4d
)
886 return (native_p4d_val(p4d
) & ~(_PAGE_KNL_ERRATUM_MASK
)) == 0;
889 static inline int p4d_present(p4d_t p4d
)
891 return p4d_flags(p4d
) & _PAGE_PRESENT
;
894 static inline unsigned long p4d_page_vaddr(p4d_t p4d
)
896 return (unsigned long)__va(p4d_val(p4d
) & p4d_pfn_mask(p4d
));
900 * Currently stuck as a macro due to indirect forward reference to
901 * linux/mmzone.h's __section_mem_map_addr() definition:
903 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
905 /* Find an entry in the third-level page table.. */
906 static inline pud_t
*pud_offset(p4d_t
*p4d
, unsigned long address
)
908 return (pud_t
*)p4d_page_vaddr(*p4d
) + pud_index(address
);
911 static inline int p4d_bad(p4d_t p4d
)
913 unsigned long ignore_flags
= _KERNPG_TABLE
| _PAGE_USER
;
915 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION
))
916 ignore_flags
|= _PAGE_NX
;
918 return (p4d_flags(p4d
) & ~ignore_flags
) != 0;
920 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
922 static inline unsigned long p4d_index(unsigned long address
)
924 return (address
>> P4D_SHIFT
) & (PTRS_PER_P4D
- 1);
927 #if CONFIG_PGTABLE_LEVELS > 4
928 static inline int pgd_present(pgd_t pgd
)
930 if (!pgtable_l5_enabled())
932 return pgd_flags(pgd
) & _PAGE_PRESENT
;
935 static inline unsigned long pgd_page_vaddr(pgd_t pgd
)
937 return (unsigned long)__va((unsigned long)pgd_val(pgd
) & PTE_PFN_MASK
);
941 * Currently stuck as a macro due to indirect forward reference to
942 * linux/mmzone.h's __section_mem_map_addr() definition:
944 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
946 /* to find an entry in a page-table-directory. */
947 static inline p4d_t
*p4d_offset(pgd_t
*pgd
, unsigned long address
)
949 if (!pgtable_l5_enabled())
951 return (p4d_t
*)pgd_page_vaddr(*pgd
) + p4d_index(address
);
954 static inline int pgd_bad(pgd_t pgd
)
956 unsigned long ignore_flags
= _PAGE_USER
;
958 if (!pgtable_l5_enabled())
961 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION
))
962 ignore_flags
|= _PAGE_NX
;
964 return (pgd_flags(pgd
) & ~ignore_flags
) != _KERNPG_TABLE
;
967 static inline int pgd_none(pgd_t pgd
)
969 if (!pgtable_l5_enabled())
972 * There is no need to do a workaround for the KNL stray
973 * A/D bit erratum here. PGDs only point to page tables
974 * except on 32-bit non-PAE which is not supported on
977 return !native_pgd_val(pgd
);
979 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
981 #endif /* __ASSEMBLY__ */
984 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
986 * this macro returns the index of the entry in the pgd page which would
987 * control the given virtual address
989 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
992 * pgd_offset() returns a (pgd_t *)
993 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
995 #define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
997 * a shortcut to get a pgd_t in a given mm
999 #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
1001 * a shortcut which implies the use of the kernel's pgd, instead
1004 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
1007 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
1008 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
1010 #ifndef __ASSEMBLY__
1012 extern int direct_gbpages
;
1013 void init_mem_mapping(void);
1014 void early_alloc_pgt_buf(void);
1015 extern void memblock_find_dma_reserve(void);
1017 #ifdef CONFIG_X86_64
1018 /* Realmode trampoline initialization. */
1019 extern pgd_t trampoline_pgd_entry
;
1020 static inline void __meminit
init_trampoline_default(void)
1022 /* Default trampoline pgd value */
1023 trampoline_pgd_entry
= init_top_pgt
[pgd_index(__PAGE_OFFSET
)];
1025 # ifdef CONFIG_RANDOMIZE_MEMORY
1026 void __meminit
init_trampoline(void);
1028 # define init_trampoline init_trampoline_default
1031 static inline void init_trampoline(void) { }
1034 /* local pte updates need not use xchg for locking */
1035 static inline pte_t
native_local_ptep_get_and_clear(pte_t
*ptep
)
1039 /* Pure native function needs no input for mm, addr */
1040 native_pte_clear(NULL
, 0, ptep
);
1044 static inline pmd_t
native_local_pmdp_get_and_clear(pmd_t
*pmdp
)
1048 native_pmd_clear(pmdp
);
1052 static inline pud_t
native_local_pudp_get_and_clear(pud_t
*pudp
)
1056 native_pud_clear(pudp
);
1060 static inline void native_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1061 pte_t
*ptep
, pte_t pte
)
1063 native_set_pte(ptep
, pte
);
1066 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
1067 pmd_t
*pmdp
, pmd_t pmd
)
1069 native_set_pmd(pmdp
, pmd
);
1072 static inline void set_pud_at(struct mm_struct
*mm
, unsigned long addr
,
1073 pud_t
*pudp
, pud_t pud
)
1075 native_set_pud(pudp
, pud
);
1079 * We only update the dirty/accessed state if we set
1080 * the dirty bit by hand in the kernel, since the hardware
1081 * will do the accessed bit for us, and we don't want to
1082 * race with other CPU's that might be updating the dirty
1083 * bit at the same time.
1085 struct vm_area_struct
;
1087 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1088 extern int ptep_set_access_flags(struct vm_area_struct
*vma
,
1089 unsigned long address
, pte_t
*ptep
,
1090 pte_t entry
, int dirty
);
1092 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1093 extern int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
1094 unsigned long addr
, pte_t
*ptep
);
1096 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1097 extern int ptep_clear_flush_young(struct vm_area_struct
*vma
,
1098 unsigned long address
, pte_t
*ptep
);
1100 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1101 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
1104 pte_t pte
= native_ptep_get_and_clear(ptep
);
1108 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1109 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1110 unsigned long addr
, pte_t
*ptep
,
1116 * Full address destruction in progress; paravirt does not
1117 * care about updates and native needs no locking
1119 pte
= native_local_ptep_get_and_clear(ptep
);
1121 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
1126 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1127 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
1128 unsigned long addr
, pte_t
*ptep
)
1130 clear_bit(_PAGE_BIT_RW
, (unsigned long *)&ptep
->pte
);
1133 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1135 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1137 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1138 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
1139 unsigned long address
, pmd_t
*pmdp
,
1140 pmd_t entry
, int dirty
);
1141 extern int pudp_set_access_flags(struct vm_area_struct
*vma
,
1142 unsigned long address
, pud_t
*pudp
,
1143 pud_t entry
, int dirty
);
1145 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1146 extern int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1147 unsigned long addr
, pmd_t
*pmdp
);
1148 extern int pudp_test_and_clear_young(struct vm_area_struct
*vma
,
1149 unsigned long addr
, pud_t
*pudp
);
1151 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1152 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
1153 unsigned long address
, pmd_t
*pmdp
);
1156 #define pmd_write pmd_write
1157 static inline int pmd_write(pmd_t pmd
)
1159 return pmd_flags(pmd
) & _PAGE_RW
;
1162 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1163 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
1166 return native_pmdp_get_and_clear(pmdp
);
1169 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1170 static inline pud_t
pudp_huge_get_and_clear(struct mm_struct
*mm
,
1171 unsigned long addr
, pud_t
*pudp
)
1173 return native_pudp_get_and_clear(pudp
);
1176 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1177 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1178 unsigned long addr
, pmd_t
*pmdp
)
1180 clear_bit(_PAGE_BIT_RW
, (unsigned long *)pmdp
);
1183 #define pud_write pud_write
1184 static inline int pud_write(pud_t pud
)
1186 return pud_flags(pud
) & _PAGE_RW
;
1189 #ifndef pmdp_establish
1190 #define pmdp_establish pmdp_establish
1191 static inline pmd_t
pmdp_establish(struct vm_area_struct
*vma
,
1192 unsigned long address
, pmd_t
*pmdp
, pmd_t pmd
)
1194 if (IS_ENABLED(CONFIG_SMP
)) {
1195 return xchg(pmdp
, pmd
);
1198 WRITE_ONCE(*pmdp
, pmd
);
1204 * Page table pages are page-aligned. The lower half of the top
1205 * level is used for userspace and the top half for the kernel.
1207 * Returns true for parts of the PGD that map userspace and
1208 * false for the parts that map the kernel.
1210 static inline bool pgdp_maps_userspace(void *__ptr
)
1212 unsigned long ptr
= (unsigned long)__ptr
;
1214 return (((ptr
& ~PAGE_MASK
) / sizeof(pgd_t
)) < PGD_KERNEL_START
);
1217 static inline int pgd_large(pgd_t pgd
) { return 0; }
1219 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1221 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1222 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
1223 * the user one is in the last 4k. To switch between them, you
1224 * just need to flip the 12th bit in their addresses.
1226 #define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
1229 * This generates better code than the inline assembly in
1232 static inline void *ptr_set_bit(void *ptr
, int bit
)
1234 unsigned long __ptr
= (unsigned long)ptr
;
1237 return (void *)__ptr
;
1239 static inline void *ptr_clear_bit(void *ptr
, int bit
)
1241 unsigned long __ptr
= (unsigned long)ptr
;
1244 return (void *)__ptr
;
1247 static inline pgd_t
*kernel_to_user_pgdp(pgd_t
*pgdp
)
1249 return ptr_set_bit(pgdp
, PTI_PGTABLE_SWITCH_BIT
);
1252 static inline pgd_t
*user_to_kernel_pgdp(pgd_t
*pgdp
)
1254 return ptr_clear_bit(pgdp
, PTI_PGTABLE_SWITCH_BIT
);
1257 static inline p4d_t
*kernel_to_user_p4dp(p4d_t
*p4dp
)
1259 return ptr_set_bit(p4dp
, PTI_PGTABLE_SWITCH_BIT
);
1262 static inline p4d_t
*user_to_kernel_p4dp(p4d_t
*p4dp
)
1264 return ptr_clear_bit(p4dp
, PTI_PGTABLE_SWITCH_BIT
);
1266 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
1269 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1271 * dst - pointer to pgd range anwhere on a pgd page
1273 * count - the number of pgds to copy.
1275 * dst and src can be on the same page, but the range must not overlap,
1276 * and must not cross a page boundary.
1278 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
1280 memcpy(dst
, src
, count
* sizeof(pgd_t
));
1281 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1282 if (!static_cpu_has(X86_FEATURE_PTI
))
1284 /* Clone the user space pgd as well */
1285 memcpy(kernel_to_user_pgdp(dst
), kernel_to_user_pgdp(src
),
1286 count
* sizeof(pgd_t
));
1290 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
1291 static inline int page_level_shift(enum pg_level level
)
1293 return (PAGE_SHIFT
- PTE_SHIFT
) + level
* PTE_SHIFT
;
1295 static inline unsigned long page_level_size(enum pg_level level
)
1297 return 1UL << page_level_shift(level
);
1299 static inline unsigned long page_level_mask(enum pg_level level
)
1301 return ~(page_level_size(level
) - 1);
1305 * The x86 doesn't have any external MMU info: the kernel page
1306 * tables contain all the necessary information.
1308 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
1309 unsigned long addr
, pte_t
*ptep
)
1312 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
1313 unsigned long addr
, pmd_t
*pmd
)
1316 static inline void update_mmu_cache_pud(struct vm_area_struct
*vma
,
1317 unsigned long addr
, pud_t
*pud
)
1321 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1322 static inline pte_t
pte_swp_mksoft_dirty(pte_t pte
)
1324 return pte_set_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
1327 static inline int pte_swp_soft_dirty(pte_t pte
)
1329 return pte_flags(pte
) & _PAGE_SWP_SOFT_DIRTY
;
1332 static inline pte_t
pte_swp_clear_soft_dirty(pte_t pte
)
1334 return pte_clear_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
1337 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1338 static inline pmd_t
pmd_swp_mksoft_dirty(pmd_t pmd
)
1340 return pmd_set_flags(pmd
, _PAGE_SWP_SOFT_DIRTY
);
1343 static inline int pmd_swp_soft_dirty(pmd_t pmd
)
1345 return pmd_flags(pmd
) & _PAGE_SWP_SOFT_DIRTY
;
1348 static inline pmd_t
pmd_swp_clear_soft_dirty(pmd_t pmd
)
1350 return pmd_clear_flags(pmd
, _PAGE_SWP_SOFT_DIRTY
);
1355 #define PKRU_AD_BIT 0x1
1356 #define PKRU_WD_BIT 0x2
1357 #define PKRU_BITS_PER_PKEY 2
1359 static inline bool __pkru_allows_read(u32 pkru
, u16 pkey
)
1361 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
1362 return !(pkru
& (PKRU_AD_BIT
<< pkru_pkey_bits
));
1365 static inline bool __pkru_allows_write(u32 pkru
, u16 pkey
)
1367 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
1369 * Access-disable disables writes too so we need to check
1372 return !(pkru
& ((PKRU_AD_BIT
|PKRU_WD_BIT
) << pkru_pkey_bits
));
1375 static inline u16
pte_flags_pkey(unsigned long pte_flags
)
1377 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1378 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1379 return (pte_flags
& _PAGE_PKEY_MASK
) >> _PAGE_BIT_PKEY_BIT0
;
1385 static inline bool __pkru_allows_pkey(u16 pkey
, bool write
)
1387 u32 pkru
= read_pkru();
1389 if (!__pkru_allows_read(pkru
, pkey
))
1391 if (write
&& !__pkru_allows_write(pkru
, pkey
))
1398 * 'pteval' can come from a PTE, PMD or PUD. We only check
1399 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1400 * same value on all 3 types.
1402 static inline bool __pte_access_permitted(unsigned long pteval
, bool write
)
1404 unsigned long need_pte_bits
= _PAGE_PRESENT
|_PAGE_USER
;
1407 need_pte_bits
|= _PAGE_RW
;
1409 if ((pteval
& need_pte_bits
) != need_pte_bits
)
1412 return __pkru_allows_pkey(pte_flags_pkey(pteval
), write
);
1415 #define pte_access_permitted pte_access_permitted
1416 static inline bool pte_access_permitted(pte_t pte
, bool write
)
1418 return __pte_access_permitted(pte_val(pte
), write
);
1421 #define pmd_access_permitted pmd_access_permitted
1422 static inline bool pmd_access_permitted(pmd_t pmd
, bool write
)
1424 return __pte_access_permitted(pmd_val(pmd
), write
);
1427 #define pud_access_permitted pud_access_permitted
1428 static inline bool pud_access_permitted(pud_t pud
, bool write
)
1430 return __pte_access_permitted(pud_val(pud
), write
);
1433 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1434 extern bool pfn_modify_allowed(unsigned long pfn
, pgprot_t prot
);
1436 static inline bool arch_has_pfn_modify_check(void)
1438 return boot_cpu_has_bug(X86_BUG_L1TF
);
1441 #include <asm-generic/pgtable.h>
1442 #endif /* __ASSEMBLY__ */
1444 #endif /* _ASM_X86_PGTABLE_H */