1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_H
3 #define _ASM_X86_PGTABLE_H
5 #include <linux/mem_encrypt.h>
7 #include <asm/pgtable_types.h>
10 * Macro to mark a page protection value as UC-
12 #define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
19 * Macros to add or remove encryption attribute
21 #define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
22 #define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
25 #include <asm/x86_init.h>
27 extern pgd_t early_top_pgt
[PTRS_PER_PGD
];
28 int __init
__early_make_pgtable(unsigned long address
, pmdval_t pmd
);
30 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
);
31 void ptdump_walk_pgd_level_debugfs(struct seq_file
*m
, pgd_t
*pgd
, bool user
);
32 void ptdump_walk_pgd_level_checkwx(void);
33 void ptdump_walk_user_pgd_level_checkwx(void);
35 #ifdef CONFIG_DEBUG_WX
36 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
37 #define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx()
39 #define debug_checkwx() do { } while (0)
40 #define debug_checkwx_user() do { } while (0)
44 * ZERO_PAGE is a global shared page that is always zero: used
45 * for zero-mapped memory areas etc..
47 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)]
49 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
51 extern spinlock_t pgd_lock
;
52 extern struct list_head pgd_list
;
54 extern struct mm_struct
*pgd_page_get_mm(struct page
*page
);
56 extern pmdval_t early_pmd_flags
;
58 #ifdef CONFIG_PARAVIRT_XXL
59 #include <asm/paravirt.h>
60 #else /* !CONFIG_PARAVIRT_XXL */
61 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
62 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
64 #define set_pte_atomic(ptep, pte) \
65 native_set_pte_atomic(ptep, pte)
67 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
69 #ifndef __PAGETABLE_P4D_FOLDED
70 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
71 #define pgd_clear(pgd) (pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
75 # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
78 #ifndef __PAGETABLE_PUD_FOLDED
79 #define p4d_clear(p4d) native_p4d_clear(p4d)
83 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
86 #ifndef __PAGETABLE_PUD_FOLDED
87 #define pud_clear(pud) native_pud_clear(pud)
90 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
91 #define pmd_clear(pmd) native_pmd_clear(pmd)
93 #define pgd_val(x) native_pgd_val(x)
94 #define __pgd(x) native_make_pgd(x)
96 #ifndef __PAGETABLE_P4D_FOLDED
97 #define p4d_val(x) native_p4d_val(x)
98 #define __p4d(x) native_make_p4d(x)
101 #ifndef __PAGETABLE_PUD_FOLDED
102 #define pud_val(x) native_pud_val(x)
103 #define __pud(x) native_make_pud(x)
106 #ifndef __PAGETABLE_PMD_FOLDED
107 #define pmd_val(x) native_pmd_val(x)
108 #define __pmd(x) native_make_pmd(x)
111 #define pte_val(x) native_pte_val(x)
112 #define __pte(x) native_make_pte(x)
114 #define arch_end_context_switch(prev) do {} while(0)
115 #endif /* CONFIG_PARAVIRT_XXL */
118 * The following only work if pte_present() is true.
119 * Undefined behaviour if not..
121 static inline int pte_dirty(pte_t pte
)
123 return pte_flags(pte
) & _PAGE_DIRTY
;
127 static inline u32
read_pkru(void)
129 if (boot_cpu_has(X86_FEATURE_OSPKE
))
130 return __read_pkru();
134 static inline void write_pkru(u32 pkru
)
136 if (boot_cpu_has(X86_FEATURE_OSPKE
))
140 static inline int pte_young(pte_t pte
)
142 return pte_flags(pte
) & _PAGE_ACCESSED
;
145 static inline int pmd_dirty(pmd_t pmd
)
147 return pmd_flags(pmd
) & _PAGE_DIRTY
;
150 static inline int pmd_young(pmd_t pmd
)
152 return pmd_flags(pmd
) & _PAGE_ACCESSED
;
155 static inline int pud_dirty(pud_t pud
)
157 return pud_flags(pud
) & _PAGE_DIRTY
;
160 static inline int pud_young(pud_t pud
)
162 return pud_flags(pud
) & _PAGE_ACCESSED
;
165 static inline int pte_write(pte_t pte
)
167 return pte_flags(pte
) & _PAGE_RW
;
170 static inline int pte_huge(pte_t pte
)
172 return pte_flags(pte
) & _PAGE_PSE
;
175 static inline int pte_global(pte_t pte
)
177 return pte_flags(pte
) & _PAGE_GLOBAL
;
180 static inline int pte_exec(pte_t pte
)
182 return !(pte_flags(pte
) & _PAGE_NX
);
185 static inline int pte_special(pte_t pte
)
187 return pte_flags(pte
) & _PAGE_SPECIAL
;
190 /* Entries that were set to PROT_NONE are inverted */
192 static inline u64
protnone_mask(u64 val
);
194 static inline unsigned long pte_pfn(pte_t pte
)
196 phys_addr_t pfn
= pte_val(pte
);
197 pfn
^= protnone_mask(pfn
);
198 return (pfn
& PTE_PFN_MASK
) >> PAGE_SHIFT
;
201 static inline unsigned long pmd_pfn(pmd_t pmd
)
203 phys_addr_t pfn
= pmd_val(pmd
);
204 pfn
^= protnone_mask(pfn
);
205 return (pfn
& pmd_pfn_mask(pmd
)) >> PAGE_SHIFT
;
208 static inline unsigned long pud_pfn(pud_t pud
)
210 phys_addr_t pfn
= pud_val(pud
);
211 pfn
^= protnone_mask(pfn
);
212 return (pfn
& pud_pfn_mask(pud
)) >> PAGE_SHIFT
;
215 static inline unsigned long p4d_pfn(p4d_t p4d
)
217 return (p4d_val(p4d
) & p4d_pfn_mask(p4d
)) >> PAGE_SHIFT
;
220 static inline unsigned long pgd_pfn(pgd_t pgd
)
222 return (pgd_val(pgd
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
225 static inline int p4d_large(p4d_t p4d
)
227 /* No 512 GiB pages yet */
231 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
233 static inline int pmd_large(pmd_t pte
)
235 return pmd_flags(pte
) & _PAGE_PSE
;
238 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
239 static inline int pmd_trans_huge(pmd_t pmd
)
241 return (pmd_val(pmd
) & (_PAGE_PSE
|_PAGE_DEVMAP
)) == _PAGE_PSE
;
244 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
245 static inline int pud_trans_huge(pud_t pud
)
247 return (pud_val(pud
) & (_PAGE_PSE
|_PAGE_DEVMAP
)) == _PAGE_PSE
;
251 #define has_transparent_hugepage has_transparent_hugepage
252 static inline int has_transparent_hugepage(void)
254 return boot_cpu_has(X86_FEATURE_PSE
);
257 #ifdef __HAVE_ARCH_PTE_DEVMAP
258 static inline int pmd_devmap(pmd_t pmd
)
260 return !!(pmd_val(pmd
) & _PAGE_DEVMAP
);
263 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
264 static inline int pud_devmap(pud_t pud
)
266 return !!(pud_val(pud
) & _PAGE_DEVMAP
);
269 static inline int pud_devmap(pud_t pud
)
275 static inline int pgd_devmap(pgd_t pgd
)
280 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
282 static inline pte_t
pte_set_flags(pte_t pte
, pteval_t set
)
284 pteval_t v
= native_pte_val(pte
);
286 return native_make_pte(v
| set
);
289 static inline pte_t
pte_clear_flags(pte_t pte
, pteval_t clear
)
291 pteval_t v
= native_pte_val(pte
);
293 return native_make_pte(v
& ~clear
);
296 static inline pte_t
pte_mkclean(pte_t pte
)
298 return pte_clear_flags(pte
, _PAGE_DIRTY
);
301 static inline pte_t
pte_mkold(pte_t pte
)
303 return pte_clear_flags(pte
, _PAGE_ACCESSED
);
306 static inline pte_t
pte_wrprotect(pte_t pte
)
308 return pte_clear_flags(pte
, _PAGE_RW
);
311 static inline pte_t
pte_mkexec(pte_t pte
)
313 return pte_clear_flags(pte
, _PAGE_NX
);
316 static inline pte_t
pte_mkdirty(pte_t pte
)
318 return pte_set_flags(pte
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
321 static inline pte_t
pte_mkyoung(pte_t pte
)
323 return pte_set_flags(pte
, _PAGE_ACCESSED
);
326 static inline pte_t
pte_mkwrite(pte_t pte
)
328 return pte_set_flags(pte
, _PAGE_RW
);
331 static inline pte_t
pte_mkhuge(pte_t pte
)
333 return pte_set_flags(pte
, _PAGE_PSE
);
336 static inline pte_t
pte_clrhuge(pte_t pte
)
338 return pte_clear_flags(pte
, _PAGE_PSE
);
341 static inline pte_t
pte_mkglobal(pte_t pte
)
343 return pte_set_flags(pte
, _PAGE_GLOBAL
);
346 static inline pte_t
pte_clrglobal(pte_t pte
)
348 return pte_clear_flags(pte
, _PAGE_GLOBAL
);
351 static inline pte_t
pte_mkspecial(pte_t pte
)
353 return pte_set_flags(pte
, _PAGE_SPECIAL
);
356 static inline pte_t
pte_mkdevmap(pte_t pte
)
358 return pte_set_flags(pte
, _PAGE_SPECIAL
|_PAGE_DEVMAP
);
361 static inline pmd_t
pmd_set_flags(pmd_t pmd
, pmdval_t set
)
363 pmdval_t v
= native_pmd_val(pmd
);
365 return native_make_pmd(v
| set
);
368 static inline pmd_t
pmd_clear_flags(pmd_t pmd
, pmdval_t clear
)
370 pmdval_t v
= native_pmd_val(pmd
);
372 return native_make_pmd(v
& ~clear
);
375 static inline pmd_t
pmd_mkold(pmd_t pmd
)
377 return pmd_clear_flags(pmd
, _PAGE_ACCESSED
);
380 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
382 return pmd_clear_flags(pmd
, _PAGE_DIRTY
);
385 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
387 return pmd_clear_flags(pmd
, _PAGE_RW
);
390 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
392 return pmd_set_flags(pmd
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
395 static inline pmd_t
pmd_mkdevmap(pmd_t pmd
)
397 return pmd_set_flags(pmd
, _PAGE_DEVMAP
);
400 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
402 return pmd_set_flags(pmd
, _PAGE_PSE
);
405 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
407 return pmd_set_flags(pmd
, _PAGE_ACCESSED
);
410 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
412 return pmd_set_flags(pmd
, _PAGE_RW
);
415 static inline pud_t
pud_set_flags(pud_t pud
, pudval_t set
)
417 pudval_t v
= native_pud_val(pud
);
419 return native_make_pud(v
| set
);
422 static inline pud_t
pud_clear_flags(pud_t pud
, pudval_t clear
)
424 pudval_t v
= native_pud_val(pud
);
426 return native_make_pud(v
& ~clear
);
429 static inline pud_t
pud_mkold(pud_t pud
)
431 return pud_clear_flags(pud
, _PAGE_ACCESSED
);
434 static inline pud_t
pud_mkclean(pud_t pud
)
436 return pud_clear_flags(pud
, _PAGE_DIRTY
);
439 static inline pud_t
pud_wrprotect(pud_t pud
)
441 return pud_clear_flags(pud
, _PAGE_RW
);
444 static inline pud_t
pud_mkdirty(pud_t pud
)
446 return pud_set_flags(pud
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
449 static inline pud_t
pud_mkdevmap(pud_t pud
)
451 return pud_set_flags(pud
, _PAGE_DEVMAP
);
454 static inline pud_t
pud_mkhuge(pud_t pud
)
456 return pud_set_flags(pud
, _PAGE_PSE
);
459 static inline pud_t
pud_mkyoung(pud_t pud
)
461 return pud_set_flags(pud
, _PAGE_ACCESSED
);
464 static inline pud_t
pud_mkwrite(pud_t pud
)
466 return pud_set_flags(pud
, _PAGE_RW
);
469 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
470 static inline int pte_soft_dirty(pte_t pte
)
472 return pte_flags(pte
) & _PAGE_SOFT_DIRTY
;
475 static inline int pmd_soft_dirty(pmd_t pmd
)
477 return pmd_flags(pmd
) & _PAGE_SOFT_DIRTY
;
480 static inline int pud_soft_dirty(pud_t pud
)
482 return pud_flags(pud
) & _PAGE_SOFT_DIRTY
;
485 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
487 return pte_set_flags(pte
, _PAGE_SOFT_DIRTY
);
490 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
492 return pmd_set_flags(pmd
, _PAGE_SOFT_DIRTY
);
495 static inline pud_t
pud_mksoft_dirty(pud_t pud
)
497 return pud_set_flags(pud
, _PAGE_SOFT_DIRTY
);
500 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
502 return pte_clear_flags(pte
, _PAGE_SOFT_DIRTY
);
505 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
507 return pmd_clear_flags(pmd
, _PAGE_SOFT_DIRTY
);
510 static inline pud_t
pud_clear_soft_dirty(pud_t pud
)
512 return pud_clear_flags(pud
, _PAGE_SOFT_DIRTY
);
515 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
518 * Mask out unsupported bits in a present pgprot. Non-present pgprots
519 * can use those bits for other purposes, so leave them be.
521 static inline pgprotval_t
massage_pgprot(pgprot_t pgprot
)
523 pgprotval_t protval
= pgprot_val(pgprot
);
525 if (protval
& _PAGE_PRESENT
)
526 protval
&= __supported_pte_mask
;
531 static inline pgprotval_t
check_pgprot(pgprot_t pgprot
)
533 pgprotval_t massaged_val
= massage_pgprot(pgprot
);
535 /* mmdebug.h can not be included here because of dependencies */
536 #ifdef CONFIG_DEBUG_VM
537 WARN_ONCE(pgprot_val(pgprot
) != massaged_val
,
538 "attempted to set unsupported pgprot: %016llx "
539 "bits: %016llx supported: %016llx\n",
540 (u64
)pgprot_val(pgprot
),
541 (u64
)pgprot_val(pgprot
) ^ massaged_val
,
542 (u64
)__supported_pte_mask
);
548 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
550 phys_addr_t pfn
= (phys_addr_t
)page_nr
<< PAGE_SHIFT
;
551 pfn
^= protnone_mask(pgprot_val(pgprot
));
553 return __pte(pfn
| check_pgprot(pgprot
));
556 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
558 phys_addr_t pfn
= (phys_addr_t
)page_nr
<< PAGE_SHIFT
;
559 pfn
^= protnone_mask(pgprot_val(pgprot
));
560 pfn
&= PHYSICAL_PMD_PAGE_MASK
;
561 return __pmd(pfn
| check_pgprot(pgprot
));
564 static inline pud_t
pfn_pud(unsigned long page_nr
, pgprot_t pgprot
)
566 phys_addr_t pfn
= (phys_addr_t
)page_nr
<< PAGE_SHIFT
;
567 pfn
^= protnone_mask(pgprot_val(pgprot
));
568 pfn
&= PHYSICAL_PUD_PAGE_MASK
;
569 return __pud(pfn
| check_pgprot(pgprot
));
572 static inline pmd_t
pmd_mknotpresent(pmd_t pmd
)
574 return pfn_pmd(pmd_pfn(pmd
),
575 __pgprot(pmd_flags(pmd
) & ~(_PAGE_PRESENT
|_PAGE_PROTNONE
)));
578 static inline pud_t
pud_mknotpresent(pud_t pud
)
580 return pfn_pud(pud_pfn(pud
),
581 __pgprot(pud_flags(pud
) & ~(_PAGE_PRESENT
|_PAGE_PROTNONE
)));
584 static inline u64
flip_protnone_guard(u64 oldval
, u64 val
, u64 mask
);
586 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
588 pteval_t val
= pte_val(pte
), oldval
= val
;
591 * Chop off the NX bit (if present), and add the NX portion of
592 * the newprot (if present):
594 val
&= _PAGE_CHG_MASK
;
595 val
|= check_pgprot(newprot
) & ~_PAGE_CHG_MASK
;
596 val
= flip_protnone_guard(oldval
, val
, PTE_PFN_MASK
);
600 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
602 pmdval_t val
= pmd_val(pmd
), oldval
= val
;
604 val
&= _HPAGE_CHG_MASK
;
605 val
|= check_pgprot(newprot
) & ~_HPAGE_CHG_MASK
;
606 val
= flip_protnone_guard(oldval
, val
, PHYSICAL_PMD_PAGE_MASK
);
610 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
611 #define pgprot_modify pgprot_modify
612 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
614 pgprotval_t preservebits
= pgprot_val(oldprot
) & _PAGE_CHG_MASK
;
615 pgprotval_t addbits
= pgprot_val(newprot
);
616 return __pgprot(preservebits
| addbits
);
619 #define pte_pgprot(x) __pgprot(pte_flags(x))
620 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
621 #define pud_pgprot(x) __pgprot(pud_flags(x))
622 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
624 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
626 static inline pgprot_t
arch_filter_pgprot(pgprot_t prot
)
628 return canon_pgprot(prot
);
631 static inline int is_new_memtype_allowed(u64 paddr
, unsigned long size
,
632 enum page_cache_mode pcm
,
633 enum page_cache_mode new_pcm
)
636 * PAT type is always WB for untracked ranges, so no need to check.
638 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ size
))
642 * Certain new memtypes are not allowed with certain
644 * - request is uncached, return cannot be write-back
645 * - request is write-combine, return cannot be write-back
646 * - request is write-through, return cannot be write-back
647 * - request is write-through, return cannot be write-combine
649 if ((pcm
== _PAGE_CACHE_MODE_UC_MINUS
&&
650 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
651 (pcm
== _PAGE_CACHE_MODE_WC
&&
652 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
653 (pcm
== _PAGE_CACHE_MODE_WT
&&
654 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
655 (pcm
== _PAGE_CACHE_MODE_WT
&&
656 new_pcm
== _PAGE_CACHE_MODE_WC
)) {
663 pmd_t
*populate_extra_pmd(unsigned long vaddr
);
664 pte_t
*populate_extra_pte(unsigned long vaddr
);
666 #ifdef CONFIG_PAGE_TABLE_ISOLATION
667 pgd_t
__pti_set_user_pgtbl(pgd_t
*pgdp
, pgd_t pgd
);
670 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
671 * Populates the user and returns the resulting PGD that must be set in
672 * the kernel copy of the page tables.
674 static inline pgd_t
pti_set_user_pgtbl(pgd_t
*pgdp
, pgd_t pgd
)
676 if (!static_cpu_has(X86_FEATURE_PTI
))
678 return __pti_set_user_pgtbl(pgdp
, pgd
);
680 #else /* CONFIG_PAGE_TABLE_ISOLATION */
681 static inline pgd_t
pti_set_user_pgtbl(pgd_t
*pgdp
, pgd_t pgd
)
685 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
687 #endif /* __ASSEMBLY__ */
691 # include <asm/pgtable_32.h>
693 # include <asm/pgtable_64.h>
697 #include <linux/mm_types.h>
698 #include <linux/mmdebug.h>
699 #include <linux/log2.h>
700 #include <asm/fixmap.h>
702 static inline int pte_none(pte_t pte
)
704 return !(pte
.pte
& ~(_PAGE_KNL_ERRATUM_MASK
));
707 #define __HAVE_ARCH_PTE_SAME
708 static inline int pte_same(pte_t a
, pte_t b
)
710 return a
.pte
== b
.pte
;
713 static inline int pte_present(pte_t a
)
715 return pte_flags(a
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
);
718 #ifdef __HAVE_ARCH_PTE_DEVMAP
719 static inline int pte_devmap(pte_t a
)
721 return (pte_flags(a
) & _PAGE_DEVMAP
) == _PAGE_DEVMAP
;
725 #define pte_accessible pte_accessible
726 static inline bool pte_accessible(struct mm_struct
*mm
, pte_t a
)
728 if (pte_flags(a
) & _PAGE_PRESENT
)
731 if ((pte_flags(a
) & _PAGE_PROTNONE
) &&
732 mm_tlb_flush_pending(mm
))
738 static inline int pmd_present(pmd_t pmd
)
741 * Checking for _PAGE_PSE is needed too because
742 * split_huge_page will temporarily clear the present bit (but
743 * the _PAGE_PSE flag will remain set at all times while the
744 * _PAGE_PRESENT bit is clear).
746 return pmd_flags(pmd
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
| _PAGE_PSE
);
749 #ifdef CONFIG_NUMA_BALANCING
751 * These work without NUMA balancing but the kernel does not care. See the
752 * comment in include/asm-generic/pgtable.h
754 static inline int pte_protnone(pte_t pte
)
756 return (pte_flags(pte
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
760 static inline int pmd_protnone(pmd_t pmd
)
762 return (pmd_flags(pmd
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
765 #endif /* CONFIG_NUMA_BALANCING */
767 static inline int pmd_none(pmd_t pmd
)
769 /* Only check low word on 32-bit platforms, since it might be
770 out of sync with upper half. */
771 unsigned long val
= native_pmd_val(pmd
);
772 return (val
& ~_PAGE_KNL_ERRATUM_MASK
) == 0;
775 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
777 return (unsigned long)__va(pmd_val(pmd
) & pmd_pfn_mask(pmd
));
781 * Currently stuck as a macro due to indirect forward reference to
782 * linux/mmzone.h's __section_mem_map_addr() definition:
784 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
787 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
789 * this macro returns the index of the entry in the pmd page which would
790 * control the given virtual address
792 static inline unsigned long pmd_index(unsigned long address
)
794 return (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
798 * Conversion functions: convert a page and protection to a page entry,
799 * and a page entry and page directory to the page they refer to.
801 * (Currently stuck as a macro because of indirect forward reference
802 * to linux/mm.h:page_to_nid())
804 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
807 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
809 * this function returns the index of the entry in the pte page which would
810 * control the given virtual address
812 static inline unsigned long pte_index(unsigned long address
)
814 return (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
817 static inline pte_t
*pte_offset_kernel(pmd_t
*pmd
, unsigned long address
)
819 return (pte_t
*)pmd_page_vaddr(*pmd
) + pte_index(address
);
822 static inline int pmd_bad(pmd_t pmd
)
824 return (pmd_flags(pmd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
827 static inline unsigned long pages_to_mb(unsigned long npg
)
829 return npg
>> (20 - PAGE_SHIFT
);
832 #if CONFIG_PGTABLE_LEVELS > 2
833 static inline int pud_none(pud_t pud
)
835 return (native_pud_val(pud
) & ~(_PAGE_KNL_ERRATUM_MASK
)) == 0;
838 static inline int pud_present(pud_t pud
)
840 return pud_flags(pud
) & _PAGE_PRESENT
;
843 static inline unsigned long pud_page_vaddr(pud_t pud
)
845 return (unsigned long)__va(pud_val(pud
) & pud_pfn_mask(pud
));
849 * Currently stuck as a macro due to indirect forward reference to
850 * linux/mmzone.h's __section_mem_map_addr() definition:
852 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
854 /* Find an entry in the second-level page table.. */
855 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
857 return (pmd_t
*)pud_page_vaddr(*pud
) + pmd_index(address
);
860 static inline int pud_large(pud_t pud
)
862 return (pud_val(pud
) & (_PAGE_PSE
| _PAGE_PRESENT
)) ==
863 (_PAGE_PSE
| _PAGE_PRESENT
);
866 static inline int pud_bad(pud_t pud
)
868 return (pud_flags(pud
) & ~(_KERNPG_TABLE
| _PAGE_USER
)) != 0;
871 static inline int pud_large(pud_t pud
)
875 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
877 static inline unsigned long pud_index(unsigned long address
)
879 return (address
>> PUD_SHIFT
) & (PTRS_PER_PUD
- 1);
882 #if CONFIG_PGTABLE_LEVELS > 3
883 static inline int p4d_none(p4d_t p4d
)
885 return (native_p4d_val(p4d
) & ~(_PAGE_KNL_ERRATUM_MASK
)) == 0;
888 static inline int p4d_present(p4d_t p4d
)
890 return p4d_flags(p4d
) & _PAGE_PRESENT
;
893 static inline unsigned long p4d_page_vaddr(p4d_t p4d
)
895 return (unsigned long)__va(p4d_val(p4d
) & p4d_pfn_mask(p4d
));
899 * Currently stuck as a macro due to indirect forward reference to
900 * linux/mmzone.h's __section_mem_map_addr() definition:
902 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
904 /* Find an entry in the third-level page table.. */
905 static inline pud_t
*pud_offset(p4d_t
*p4d
, unsigned long address
)
907 return (pud_t
*)p4d_page_vaddr(*p4d
) + pud_index(address
);
910 static inline int p4d_bad(p4d_t p4d
)
912 unsigned long ignore_flags
= _KERNPG_TABLE
| _PAGE_USER
;
914 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION
))
915 ignore_flags
|= _PAGE_NX
;
917 return (p4d_flags(p4d
) & ~ignore_flags
) != 0;
919 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
921 static inline unsigned long p4d_index(unsigned long address
)
923 return (address
>> P4D_SHIFT
) & (PTRS_PER_P4D
- 1);
926 #if CONFIG_PGTABLE_LEVELS > 4
927 static inline int pgd_present(pgd_t pgd
)
929 if (!pgtable_l5_enabled())
931 return pgd_flags(pgd
) & _PAGE_PRESENT
;
934 static inline unsigned long pgd_page_vaddr(pgd_t pgd
)
936 return (unsigned long)__va((unsigned long)pgd_val(pgd
) & PTE_PFN_MASK
);
940 * Currently stuck as a macro due to indirect forward reference to
941 * linux/mmzone.h's __section_mem_map_addr() definition:
943 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
945 /* to find an entry in a page-table-directory. */
946 static inline p4d_t
*p4d_offset(pgd_t
*pgd
, unsigned long address
)
948 if (!pgtable_l5_enabled())
950 return (p4d_t
*)pgd_page_vaddr(*pgd
) + p4d_index(address
);
953 static inline int pgd_bad(pgd_t pgd
)
955 unsigned long ignore_flags
= _PAGE_USER
;
957 if (!pgtable_l5_enabled())
960 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION
))
961 ignore_flags
|= _PAGE_NX
;
963 return (pgd_flags(pgd
) & ~ignore_flags
) != _KERNPG_TABLE
;
966 static inline int pgd_none(pgd_t pgd
)
968 if (!pgtable_l5_enabled())
971 * There is no need to do a workaround for the KNL stray
972 * A/D bit erratum here. PGDs only point to page tables
973 * except on 32-bit non-PAE which is not supported on
976 return !native_pgd_val(pgd
);
978 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
980 #endif /* __ASSEMBLY__ */
983 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
985 * this macro returns the index of the entry in the pgd page which would
986 * control the given virtual address
988 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
991 * pgd_offset() returns a (pgd_t *)
992 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
994 #define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
996 * a shortcut to get a pgd_t in a given mm
998 #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
1000 * a shortcut which implies the use of the kernel's pgd, instead
1003 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
1006 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
1007 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
1009 #ifndef __ASSEMBLY__
1011 extern int direct_gbpages
;
1012 void init_mem_mapping(void);
1013 void early_alloc_pgt_buf(void);
1014 extern void memblock_find_dma_reserve(void);
1016 #ifdef CONFIG_X86_64
1017 /* Realmode trampoline initialization. */
1018 extern pgd_t trampoline_pgd_entry
;
1019 static inline void __meminit
init_trampoline_default(void)
1021 /* Default trampoline pgd value */
1022 trampoline_pgd_entry
= init_top_pgt
[pgd_index(__PAGE_OFFSET
)];
1024 # ifdef CONFIG_RANDOMIZE_MEMORY
1025 void __meminit
init_trampoline(void);
1027 # define init_trampoline init_trampoline_default
1030 static inline void init_trampoline(void) { }
1033 /* local pte updates need not use xchg for locking */
1034 static inline pte_t
native_local_ptep_get_and_clear(pte_t
*ptep
)
1038 /* Pure native function needs no input for mm, addr */
1039 native_pte_clear(NULL
, 0, ptep
);
1043 static inline pmd_t
native_local_pmdp_get_and_clear(pmd_t
*pmdp
)
1047 native_pmd_clear(pmdp
);
1051 static inline pud_t
native_local_pudp_get_and_clear(pud_t
*pudp
)
1055 native_pud_clear(pudp
);
1059 static inline void native_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1060 pte_t
*ptep
, pte_t pte
)
1062 native_set_pte(ptep
, pte
);
1065 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
1066 pmd_t
*pmdp
, pmd_t pmd
)
1068 native_set_pmd(pmdp
, pmd
);
1071 static inline void set_pud_at(struct mm_struct
*mm
, unsigned long addr
,
1072 pud_t
*pudp
, pud_t pud
)
1074 native_set_pud(pudp
, pud
);
1078 * We only update the dirty/accessed state if we set
1079 * the dirty bit by hand in the kernel, since the hardware
1080 * will do the accessed bit for us, and we don't want to
1081 * race with other CPU's that might be updating the dirty
1082 * bit at the same time.
1084 struct vm_area_struct
;
1086 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1087 extern int ptep_set_access_flags(struct vm_area_struct
*vma
,
1088 unsigned long address
, pte_t
*ptep
,
1089 pte_t entry
, int dirty
);
1091 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1092 extern int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
1093 unsigned long addr
, pte_t
*ptep
);
1095 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1096 extern int ptep_clear_flush_young(struct vm_area_struct
*vma
,
1097 unsigned long address
, pte_t
*ptep
);
1099 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1100 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
1103 pte_t pte
= native_ptep_get_and_clear(ptep
);
1107 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1108 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1109 unsigned long addr
, pte_t
*ptep
,
1115 * Full address destruction in progress; paravirt does not
1116 * care about updates and native needs no locking
1118 pte
= native_local_ptep_get_and_clear(ptep
);
1120 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
1125 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1126 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
1127 unsigned long addr
, pte_t
*ptep
)
1129 clear_bit(_PAGE_BIT_RW
, (unsigned long *)&ptep
->pte
);
1132 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1134 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1136 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1137 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
1138 unsigned long address
, pmd_t
*pmdp
,
1139 pmd_t entry
, int dirty
);
1140 extern int pudp_set_access_flags(struct vm_area_struct
*vma
,
1141 unsigned long address
, pud_t
*pudp
,
1142 pud_t entry
, int dirty
);
1144 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1145 extern int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1146 unsigned long addr
, pmd_t
*pmdp
);
1147 extern int pudp_test_and_clear_young(struct vm_area_struct
*vma
,
1148 unsigned long addr
, pud_t
*pudp
);
1150 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1151 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
1152 unsigned long address
, pmd_t
*pmdp
);
1155 #define pmd_write pmd_write
1156 static inline int pmd_write(pmd_t pmd
)
1158 return pmd_flags(pmd
) & _PAGE_RW
;
1161 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1162 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
1165 return native_pmdp_get_and_clear(pmdp
);
1168 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1169 static inline pud_t
pudp_huge_get_and_clear(struct mm_struct
*mm
,
1170 unsigned long addr
, pud_t
*pudp
)
1172 return native_pudp_get_and_clear(pudp
);
1175 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1176 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1177 unsigned long addr
, pmd_t
*pmdp
)
1179 clear_bit(_PAGE_BIT_RW
, (unsigned long *)pmdp
);
1182 #define pud_write pud_write
1183 static inline int pud_write(pud_t pud
)
1185 return pud_flags(pud
) & _PAGE_RW
;
1188 #ifndef pmdp_establish
1189 #define pmdp_establish pmdp_establish
1190 static inline pmd_t
pmdp_establish(struct vm_area_struct
*vma
,
1191 unsigned long address
, pmd_t
*pmdp
, pmd_t pmd
)
1193 if (IS_ENABLED(CONFIG_SMP
)) {
1194 return xchg(pmdp
, pmd
);
1197 WRITE_ONCE(*pmdp
, pmd
);
1203 * Page table pages are page-aligned. The lower half of the top
1204 * level is used for userspace and the top half for the kernel.
1206 * Returns true for parts of the PGD that map userspace and
1207 * false for the parts that map the kernel.
1209 static inline bool pgdp_maps_userspace(void *__ptr
)
1211 unsigned long ptr
= (unsigned long)__ptr
;
1213 return (((ptr
& ~PAGE_MASK
) / sizeof(pgd_t
)) < PGD_KERNEL_START
);
1216 static inline int pgd_large(pgd_t pgd
) { return 0; }
1218 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1220 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1221 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
1222 * the user one is in the last 4k. To switch between them, you
1223 * just need to flip the 12th bit in their addresses.
1225 #define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
1228 * This generates better code than the inline assembly in
1231 static inline void *ptr_set_bit(void *ptr
, int bit
)
1233 unsigned long __ptr
= (unsigned long)ptr
;
1236 return (void *)__ptr
;
1238 static inline void *ptr_clear_bit(void *ptr
, int bit
)
1240 unsigned long __ptr
= (unsigned long)ptr
;
1243 return (void *)__ptr
;
1246 static inline pgd_t
*kernel_to_user_pgdp(pgd_t
*pgdp
)
1248 return ptr_set_bit(pgdp
, PTI_PGTABLE_SWITCH_BIT
);
1251 static inline pgd_t
*user_to_kernel_pgdp(pgd_t
*pgdp
)
1253 return ptr_clear_bit(pgdp
, PTI_PGTABLE_SWITCH_BIT
);
1256 static inline p4d_t
*kernel_to_user_p4dp(p4d_t
*p4dp
)
1258 return ptr_set_bit(p4dp
, PTI_PGTABLE_SWITCH_BIT
);
1261 static inline p4d_t
*user_to_kernel_p4dp(p4d_t
*p4dp
)
1263 return ptr_clear_bit(p4dp
, PTI_PGTABLE_SWITCH_BIT
);
1265 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
1268 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1270 * dst - pointer to pgd range anwhere on a pgd page
1272 * count - the number of pgds to copy.
1274 * dst and src can be on the same page, but the range must not overlap,
1275 * and must not cross a page boundary.
1277 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
1279 memcpy(dst
, src
, count
* sizeof(pgd_t
));
1280 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1281 if (!static_cpu_has(X86_FEATURE_PTI
))
1283 /* Clone the user space pgd as well */
1284 memcpy(kernel_to_user_pgdp(dst
), kernel_to_user_pgdp(src
),
1285 count
* sizeof(pgd_t
));
1289 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
1290 static inline int page_level_shift(enum pg_level level
)
1292 return (PAGE_SHIFT
- PTE_SHIFT
) + level
* PTE_SHIFT
;
1294 static inline unsigned long page_level_size(enum pg_level level
)
1296 return 1UL << page_level_shift(level
);
1298 static inline unsigned long page_level_mask(enum pg_level level
)
1300 return ~(page_level_size(level
) - 1);
1304 * The x86 doesn't have any external MMU info: the kernel page
1305 * tables contain all the necessary information.
1307 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
1308 unsigned long addr
, pte_t
*ptep
)
1311 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
1312 unsigned long addr
, pmd_t
*pmd
)
1315 static inline void update_mmu_cache_pud(struct vm_area_struct
*vma
,
1316 unsigned long addr
, pud_t
*pud
)
1320 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1321 static inline pte_t
pte_swp_mksoft_dirty(pte_t pte
)
1323 return pte_set_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
1326 static inline int pte_swp_soft_dirty(pte_t pte
)
1328 return pte_flags(pte
) & _PAGE_SWP_SOFT_DIRTY
;
1331 static inline pte_t
pte_swp_clear_soft_dirty(pte_t pte
)
1333 return pte_clear_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
1336 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1337 static inline pmd_t
pmd_swp_mksoft_dirty(pmd_t pmd
)
1339 return pmd_set_flags(pmd
, _PAGE_SWP_SOFT_DIRTY
);
1342 static inline int pmd_swp_soft_dirty(pmd_t pmd
)
1344 return pmd_flags(pmd
) & _PAGE_SWP_SOFT_DIRTY
;
1347 static inline pmd_t
pmd_swp_clear_soft_dirty(pmd_t pmd
)
1349 return pmd_clear_flags(pmd
, _PAGE_SWP_SOFT_DIRTY
);
1354 #define PKRU_AD_BIT 0x1
1355 #define PKRU_WD_BIT 0x2
1356 #define PKRU_BITS_PER_PKEY 2
1358 static inline bool __pkru_allows_read(u32 pkru
, u16 pkey
)
1360 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
1361 return !(pkru
& (PKRU_AD_BIT
<< pkru_pkey_bits
));
1364 static inline bool __pkru_allows_write(u32 pkru
, u16 pkey
)
1366 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
1368 * Access-disable disables writes too so we need to check
1371 return !(pkru
& ((PKRU_AD_BIT
|PKRU_WD_BIT
) << pkru_pkey_bits
));
1374 static inline u16
pte_flags_pkey(unsigned long pte_flags
)
1376 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1377 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1378 return (pte_flags
& _PAGE_PKEY_MASK
) >> _PAGE_BIT_PKEY_BIT0
;
1384 static inline bool __pkru_allows_pkey(u16 pkey
, bool write
)
1386 u32 pkru
= read_pkru();
1388 if (!__pkru_allows_read(pkru
, pkey
))
1390 if (write
&& !__pkru_allows_write(pkru
, pkey
))
1397 * 'pteval' can come from a PTE, PMD or PUD. We only check
1398 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1399 * same value on all 3 types.
1401 static inline bool __pte_access_permitted(unsigned long pteval
, bool write
)
1403 unsigned long need_pte_bits
= _PAGE_PRESENT
|_PAGE_USER
;
1406 need_pte_bits
|= _PAGE_RW
;
1408 if ((pteval
& need_pte_bits
) != need_pte_bits
)
1411 return __pkru_allows_pkey(pte_flags_pkey(pteval
), write
);
1414 #define pte_access_permitted pte_access_permitted
1415 static inline bool pte_access_permitted(pte_t pte
, bool write
)
1417 return __pte_access_permitted(pte_val(pte
), write
);
1420 #define pmd_access_permitted pmd_access_permitted
1421 static inline bool pmd_access_permitted(pmd_t pmd
, bool write
)
1423 return __pte_access_permitted(pmd_val(pmd
), write
);
1426 #define pud_access_permitted pud_access_permitted
1427 static inline bool pud_access_permitted(pud_t pud
, bool write
)
1429 return __pte_access_permitted(pud_val(pud
), write
);
1432 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1433 extern bool pfn_modify_allowed(unsigned long pfn
, pgprot_t prot
);
1435 static inline bool arch_has_pfn_modify_check(void)
1437 return boot_cpu_has_bug(X86_BUG_L1TF
);
1440 #include <asm-generic/pgtable.h>
1441 #endif /* __ASSEMBLY__ */
1443 #endif /* _ASM_X86_PGTABLE_H */