1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
5 #include <asm/pgtable_types.h>
8 * Macro to mark a page protection value as UC-
10 #define pgprot_noncached(prot) \
11 ((boot_cpu_data.x86 > 3) \
12 ? (__pgprot(pgprot_val(prot) | \
13 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
17 #include <asm/x86_init.h>
19 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
);
20 void ptdump_walk_pgd_level_checkwx(void);
22 #ifdef CONFIG_DEBUG_WX
23 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
25 #define debug_checkwx() do { } while (0)
29 * ZERO_PAGE is a global shared page that is always zero: used
30 * for zero-mapped memory areas etc..
32 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)]
34 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
36 extern spinlock_t pgd_lock
;
37 extern struct list_head pgd_list
;
39 extern struct mm_struct
*pgd_page_get_mm(struct page
*page
);
41 #ifdef CONFIG_PARAVIRT
42 #include <asm/paravirt.h>
43 #else /* !CONFIG_PARAVIRT */
44 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
45 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
46 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
47 #define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud)
49 #define set_pte_atomic(ptep, pte) \
50 native_set_pte_atomic(ptep, pte)
52 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
54 #ifndef __PAGETABLE_P4D_FOLDED
55 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
56 #define pgd_clear(pgd) native_pgd_clear(pgd)
60 # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
63 #ifndef __PAGETABLE_PUD_FOLDED
64 #define p4d_clear(p4d) native_p4d_clear(p4d)
68 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
71 #ifndef __PAGETABLE_PUD_FOLDED
72 #define pud_clear(pud) native_pud_clear(pud)
75 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
76 #define pmd_clear(pmd) native_pmd_clear(pmd)
78 #define pte_update(mm, addr, ptep) do { } while (0)
80 #define pgd_val(x) native_pgd_val(x)
81 #define __pgd(x) native_make_pgd(x)
83 #ifndef __PAGETABLE_P4D_FOLDED
84 #define p4d_val(x) native_p4d_val(x)
85 #define __p4d(x) native_make_p4d(x)
88 #ifndef __PAGETABLE_PUD_FOLDED
89 #define pud_val(x) native_pud_val(x)
90 #define __pud(x) native_make_pud(x)
93 #ifndef __PAGETABLE_PMD_FOLDED
94 #define pmd_val(x) native_pmd_val(x)
95 #define __pmd(x) native_make_pmd(x)
98 #define pte_val(x) native_pte_val(x)
99 #define __pte(x) native_make_pte(x)
101 #define arch_end_context_switch(prev) do {} while(0)
103 #endif /* CONFIG_PARAVIRT */
106 * The following only work if pte_present() is true.
107 * Undefined behaviour if not..
109 static inline int pte_dirty(pte_t pte
)
111 return pte_flags(pte
) & _PAGE_DIRTY
;
115 static inline u32
read_pkru(void)
117 if (boot_cpu_has(X86_FEATURE_OSPKE
))
118 return __read_pkru();
122 static inline void write_pkru(u32 pkru
)
124 if (boot_cpu_has(X86_FEATURE_OSPKE
))
128 static inline int pte_young(pte_t pte
)
130 return pte_flags(pte
) & _PAGE_ACCESSED
;
133 static inline int pmd_dirty(pmd_t pmd
)
135 return pmd_flags(pmd
) & _PAGE_DIRTY
;
138 static inline int pmd_young(pmd_t pmd
)
140 return pmd_flags(pmd
) & _PAGE_ACCESSED
;
143 static inline int pud_dirty(pud_t pud
)
145 return pud_flags(pud
) & _PAGE_DIRTY
;
148 static inline int pud_young(pud_t pud
)
150 return pud_flags(pud
) & _PAGE_ACCESSED
;
153 static inline int pte_write(pte_t pte
)
155 return pte_flags(pte
) & _PAGE_RW
;
158 static inline int pte_huge(pte_t pte
)
160 return pte_flags(pte
) & _PAGE_PSE
;
163 static inline int pte_global(pte_t pte
)
165 return pte_flags(pte
) & _PAGE_GLOBAL
;
168 static inline int pte_exec(pte_t pte
)
170 return !(pte_flags(pte
) & _PAGE_NX
);
173 static inline int pte_special(pte_t pte
)
175 return pte_flags(pte
) & _PAGE_SPECIAL
;
178 static inline unsigned long pte_pfn(pte_t pte
)
180 return (pte_val(pte
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
183 static inline unsigned long pmd_pfn(pmd_t pmd
)
185 return (pmd_val(pmd
) & pmd_pfn_mask(pmd
)) >> PAGE_SHIFT
;
188 static inline unsigned long pud_pfn(pud_t pud
)
190 return (pud_val(pud
) & pud_pfn_mask(pud
)) >> PAGE_SHIFT
;
193 static inline unsigned long p4d_pfn(p4d_t p4d
)
195 return (p4d_val(p4d
) & p4d_pfn_mask(p4d
)) >> PAGE_SHIFT
;
198 static inline int p4d_large(p4d_t p4d
)
200 /* No 512 GiB pages yet */
204 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
206 static inline int pmd_large(pmd_t pte
)
208 return pmd_flags(pte
) & _PAGE_PSE
;
211 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
212 static inline int pmd_trans_huge(pmd_t pmd
)
214 return (pmd_val(pmd
) & (_PAGE_PSE
|_PAGE_DEVMAP
)) == _PAGE_PSE
;
217 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
218 static inline int pud_trans_huge(pud_t pud
)
220 return (pud_val(pud
) & (_PAGE_PSE
|_PAGE_DEVMAP
)) == _PAGE_PSE
;
224 #define has_transparent_hugepage has_transparent_hugepage
225 static inline int has_transparent_hugepage(void)
227 return boot_cpu_has(X86_FEATURE_PSE
);
230 #ifdef __HAVE_ARCH_PTE_DEVMAP
231 static inline int pmd_devmap(pmd_t pmd
)
233 return !!(pmd_val(pmd
) & _PAGE_DEVMAP
);
236 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
237 static inline int pud_devmap(pud_t pud
)
239 return !!(pud_val(pud
) & _PAGE_DEVMAP
);
242 static inline int pud_devmap(pud_t pud
)
248 static inline int pgd_devmap(pgd_t pgd
)
253 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
255 static inline pte_t
pte_set_flags(pte_t pte
, pteval_t set
)
257 pteval_t v
= native_pte_val(pte
);
259 return native_make_pte(v
| set
);
262 static inline pte_t
pte_clear_flags(pte_t pte
, pteval_t clear
)
264 pteval_t v
= native_pte_val(pte
);
266 return native_make_pte(v
& ~clear
);
269 static inline pte_t
pte_mkclean(pte_t pte
)
271 return pte_clear_flags(pte
, _PAGE_DIRTY
);
274 static inline pte_t
pte_mkold(pte_t pte
)
276 return pte_clear_flags(pte
, _PAGE_ACCESSED
);
279 static inline pte_t
pte_wrprotect(pte_t pte
)
281 return pte_clear_flags(pte
, _PAGE_RW
);
284 static inline pte_t
pte_mkexec(pte_t pte
)
286 return pte_clear_flags(pte
, _PAGE_NX
);
289 static inline pte_t
pte_mkdirty(pte_t pte
)
291 return pte_set_flags(pte
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
294 static inline pte_t
pte_mkyoung(pte_t pte
)
296 return pte_set_flags(pte
, _PAGE_ACCESSED
);
299 static inline pte_t
pte_mkwrite(pte_t pte
)
301 return pte_set_flags(pte
, _PAGE_RW
);
304 static inline pte_t
pte_mkhuge(pte_t pte
)
306 return pte_set_flags(pte
, _PAGE_PSE
);
309 static inline pte_t
pte_clrhuge(pte_t pte
)
311 return pte_clear_flags(pte
, _PAGE_PSE
);
314 static inline pte_t
pte_mkglobal(pte_t pte
)
316 return pte_set_flags(pte
, _PAGE_GLOBAL
);
319 static inline pte_t
pte_clrglobal(pte_t pte
)
321 return pte_clear_flags(pte
, _PAGE_GLOBAL
);
324 static inline pte_t
pte_mkspecial(pte_t pte
)
326 return pte_set_flags(pte
, _PAGE_SPECIAL
);
329 static inline pte_t
pte_mkdevmap(pte_t pte
)
331 return pte_set_flags(pte
, _PAGE_SPECIAL
|_PAGE_DEVMAP
);
334 static inline pmd_t
pmd_set_flags(pmd_t pmd
, pmdval_t set
)
336 pmdval_t v
= native_pmd_val(pmd
);
338 return __pmd(v
| set
);
341 static inline pmd_t
pmd_clear_flags(pmd_t pmd
, pmdval_t clear
)
343 pmdval_t v
= native_pmd_val(pmd
);
345 return __pmd(v
& ~clear
);
348 static inline pmd_t
pmd_mkold(pmd_t pmd
)
350 return pmd_clear_flags(pmd
, _PAGE_ACCESSED
);
353 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
355 return pmd_clear_flags(pmd
, _PAGE_DIRTY
);
358 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
360 return pmd_clear_flags(pmd
, _PAGE_RW
);
363 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
365 return pmd_set_flags(pmd
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
368 static inline pmd_t
pmd_mkdevmap(pmd_t pmd
)
370 return pmd_set_flags(pmd
, _PAGE_DEVMAP
);
373 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
375 return pmd_set_flags(pmd
, _PAGE_PSE
);
378 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
380 return pmd_set_flags(pmd
, _PAGE_ACCESSED
);
383 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
385 return pmd_set_flags(pmd
, _PAGE_RW
);
388 static inline pmd_t
pmd_mknotpresent(pmd_t pmd
)
390 return pmd_clear_flags(pmd
, _PAGE_PRESENT
| _PAGE_PROTNONE
);
393 static inline pud_t
pud_set_flags(pud_t pud
, pudval_t set
)
395 pudval_t v
= native_pud_val(pud
);
397 return __pud(v
| set
);
400 static inline pud_t
pud_clear_flags(pud_t pud
, pudval_t clear
)
402 pudval_t v
= native_pud_val(pud
);
404 return __pud(v
& ~clear
);
407 static inline pud_t
pud_mkold(pud_t pud
)
409 return pud_clear_flags(pud
, _PAGE_ACCESSED
);
412 static inline pud_t
pud_mkclean(pud_t pud
)
414 return pud_clear_flags(pud
, _PAGE_DIRTY
);
417 static inline pud_t
pud_wrprotect(pud_t pud
)
419 return pud_clear_flags(pud
, _PAGE_RW
);
422 static inline pud_t
pud_mkdirty(pud_t pud
)
424 return pud_set_flags(pud
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
427 static inline pud_t
pud_mkdevmap(pud_t pud
)
429 return pud_set_flags(pud
, _PAGE_DEVMAP
);
432 static inline pud_t
pud_mkhuge(pud_t pud
)
434 return pud_set_flags(pud
, _PAGE_PSE
);
437 static inline pud_t
pud_mkyoung(pud_t pud
)
439 return pud_set_flags(pud
, _PAGE_ACCESSED
);
442 static inline pud_t
pud_mkwrite(pud_t pud
)
444 return pud_set_flags(pud
, _PAGE_RW
);
447 static inline pud_t
pud_mknotpresent(pud_t pud
)
449 return pud_clear_flags(pud
, _PAGE_PRESENT
| _PAGE_PROTNONE
);
452 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
453 static inline int pte_soft_dirty(pte_t pte
)
455 return pte_flags(pte
) & _PAGE_SOFT_DIRTY
;
458 static inline int pmd_soft_dirty(pmd_t pmd
)
460 return pmd_flags(pmd
) & _PAGE_SOFT_DIRTY
;
463 static inline int pud_soft_dirty(pud_t pud
)
465 return pud_flags(pud
) & _PAGE_SOFT_DIRTY
;
468 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
470 return pte_set_flags(pte
, _PAGE_SOFT_DIRTY
);
473 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
475 return pmd_set_flags(pmd
, _PAGE_SOFT_DIRTY
);
478 static inline pud_t
pud_mksoft_dirty(pud_t pud
)
480 return pud_set_flags(pud
, _PAGE_SOFT_DIRTY
);
483 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
485 return pte_clear_flags(pte
, _PAGE_SOFT_DIRTY
);
488 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
490 return pmd_clear_flags(pmd
, _PAGE_SOFT_DIRTY
);
493 static inline pud_t
pud_clear_soft_dirty(pud_t pud
)
495 return pud_clear_flags(pud
, _PAGE_SOFT_DIRTY
);
498 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
501 * Mask out unsupported bits in a present pgprot. Non-present pgprots
502 * can use those bits for other purposes, so leave them be.
504 static inline pgprotval_t
massage_pgprot(pgprot_t pgprot
)
506 pgprotval_t protval
= pgprot_val(pgprot
);
508 if (protval
& _PAGE_PRESENT
)
509 protval
&= __supported_pte_mask
;
514 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
516 return __pte(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
517 massage_pgprot(pgprot
));
520 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
522 return __pmd(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
523 massage_pgprot(pgprot
));
526 static inline pud_t
pfn_pud(unsigned long page_nr
, pgprot_t pgprot
)
528 return __pud(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
529 massage_pgprot(pgprot
));
532 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
534 pteval_t val
= pte_val(pte
);
537 * Chop off the NX bit (if present), and add the NX portion of
538 * the newprot (if present):
540 val
&= _PAGE_CHG_MASK
;
541 val
|= massage_pgprot(newprot
) & ~_PAGE_CHG_MASK
;
546 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
548 pmdval_t val
= pmd_val(pmd
);
550 val
&= _HPAGE_CHG_MASK
;
551 val
|= massage_pgprot(newprot
) & ~_HPAGE_CHG_MASK
;
556 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
557 #define pgprot_modify pgprot_modify
558 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
560 pgprotval_t preservebits
= pgprot_val(oldprot
) & _PAGE_CHG_MASK
;
561 pgprotval_t addbits
= pgprot_val(newprot
);
562 return __pgprot(preservebits
| addbits
);
565 #define pte_pgprot(x) __pgprot(pte_flags(x))
566 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
567 #define pud_pgprot(x) __pgprot(pud_flags(x))
568 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
570 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
572 static inline int is_new_memtype_allowed(u64 paddr
, unsigned long size
,
573 enum page_cache_mode pcm
,
574 enum page_cache_mode new_pcm
)
577 * PAT type is always WB for untracked ranges, so no need to check.
579 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ size
))
583 * Certain new memtypes are not allowed with certain
585 * - request is uncached, return cannot be write-back
586 * - request is write-combine, return cannot be write-back
587 * - request is write-through, return cannot be write-back
588 * - request is write-through, return cannot be write-combine
590 if ((pcm
== _PAGE_CACHE_MODE_UC_MINUS
&&
591 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
592 (pcm
== _PAGE_CACHE_MODE_WC
&&
593 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
594 (pcm
== _PAGE_CACHE_MODE_WT
&&
595 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
596 (pcm
== _PAGE_CACHE_MODE_WT
&&
597 new_pcm
== _PAGE_CACHE_MODE_WC
)) {
604 pmd_t
*populate_extra_pmd(unsigned long vaddr
);
605 pte_t
*populate_extra_pte(unsigned long vaddr
);
606 #endif /* __ASSEMBLY__ */
609 # include <asm/pgtable_32.h>
611 # include <asm/pgtable_64.h>
615 #include <linux/mm_types.h>
616 #include <linux/mmdebug.h>
617 #include <linux/log2.h>
618 #include <asm/fixmap.h>
620 static inline int pte_none(pte_t pte
)
622 return !(pte
.pte
& ~(_PAGE_KNL_ERRATUM_MASK
));
625 #define __HAVE_ARCH_PTE_SAME
626 static inline int pte_same(pte_t a
, pte_t b
)
628 return a
.pte
== b
.pte
;
631 static inline int pte_present(pte_t a
)
633 return pte_flags(a
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
);
636 #ifdef __HAVE_ARCH_PTE_DEVMAP
637 static inline int pte_devmap(pte_t a
)
639 return (pte_flags(a
) & _PAGE_DEVMAP
) == _PAGE_DEVMAP
;
643 #define pte_accessible pte_accessible
644 static inline bool pte_accessible(struct mm_struct
*mm
, pte_t a
)
646 if (pte_flags(a
) & _PAGE_PRESENT
)
649 if ((pte_flags(a
) & _PAGE_PROTNONE
) &&
650 mm_tlb_flush_pending(mm
))
656 static inline int pte_hidden(pte_t pte
)
658 return pte_flags(pte
) & _PAGE_HIDDEN
;
661 static inline int pmd_present(pmd_t pmd
)
664 * Checking for _PAGE_PSE is needed too because
665 * split_huge_page will temporarily clear the present bit (but
666 * the _PAGE_PSE flag will remain set at all times while the
667 * _PAGE_PRESENT bit is clear).
669 return pmd_flags(pmd
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
| _PAGE_PSE
);
672 #ifdef CONFIG_NUMA_BALANCING
674 * These work without NUMA balancing but the kernel does not care. See the
675 * comment in include/asm-generic/pgtable.h
677 static inline int pte_protnone(pte_t pte
)
679 return (pte_flags(pte
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
683 static inline int pmd_protnone(pmd_t pmd
)
685 return (pmd_flags(pmd
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
688 #endif /* CONFIG_NUMA_BALANCING */
690 static inline int pmd_none(pmd_t pmd
)
692 /* Only check low word on 32-bit platforms, since it might be
693 out of sync with upper half. */
694 unsigned long val
= native_pmd_val(pmd
);
695 return (val
& ~_PAGE_KNL_ERRATUM_MASK
) == 0;
698 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
700 return (unsigned long)__va(pmd_val(pmd
) & pmd_pfn_mask(pmd
));
704 * Currently stuck as a macro due to indirect forward reference to
705 * linux/mmzone.h's __section_mem_map_addr() definition:
707 #define pmd_page(pmd) \
708 pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
711 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
713 * this macro returns the index of the entry in the pmd page which would
714 * control the given virtual address
716 static inline unsigned long pmd_index(unsigned long address
)
718 return (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
722 * Conversion functions: convert a page and protection to a page entry,
723 * and a page entry and page directory to the page they refer to.
725 * (Currently stuck as a macro because of indirect forward reference
726 * to linux/mm.h:page_to_nid())
728 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
731 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
733 * this function returns the index of the entry in the pte page which would
734 * control the given virtual address
736 static inline unsigned long pte_index(unsigned long address
)
738 return (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
741 static inline pte_t
*pte_offset_kernel(pmd_t
*pmd
, unsigned long address
)
743 return (pte_t
*)pmd_page_vaddr(*pmd
) + pte_index(address
);
746 static inline int pmd_bad(pmd_t pmd
)
748 return (pmd_flags(pmd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
751 static inline unsigned long pages_to_mb(unsigned long npg
)
753 return npg
>> (20 - PAGE_SHIFT
);
756 #if CONFIG_PGTABLE_LEVELS > 2
757 static inline int pud_none(pud_t pud
)
759 return (native_pud_val(pud
) & ~(_PAGE_KNL_ERRATUM_MASK
)) == 0;
762 static inline int pud_present(pud_t pud
)
764 return pud_flags(pud
) & _PAGE_PRESENT
;
767 static inline unsigned long pud_page_vaddr(pud_t pud
)
769 return (unsigned long)__va(pud_val(pud
) & pud_pfn_mask(pud
));
773 * Currently stuck as a macro due to indirect forward reference to
774 * linux/mmzone.h's __section_mem_map_addr() definition:
776 #define pud_page(pud) \
777 pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
779 /* Find an entry in the second-level page table.. */
780 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
782 return (pmd_t
*)pud_page_vaddr(*pud
) + pmd_index(address
);
785 static inline int pud_large(pud_t pud
)
787 return (pud_val(pud
) & (_PAGE_PSE
| _PAGE_PRESENT
)) ==
788 (_PAGE_PSE
| _PAGE_PRESENT
);
791 static inline int pud_bad(pud_t pud
)
793 return (pud_flags(pud
) & ~(_KERNPG_TABLE
| _PAGE_USER
)) != 0;
796 static inline int pud_large(pud_t pud
)
800 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
802 static inline unsigned long pud_index(unsigned long address
)
804 return (address
>> PUD_SHIFT
) & (PTRS_PER_PUD
- 1);
807 #if CONFIG_PGTABLE_LEVELS > 3
808 static inline int p4d_none(p4d_t p4d
)
810 return (native_p4d_val(p4d
) & ~(_PAGE_KNL_ERRATUM_MASK
)) == 0;
813 static inline int p4d_present(p4d_t p4d
)
815 return p4d_flags(p4d
) & _PAGE_PRESENT
;
818 static inline unsigned long p4d_page_vaddr(p4d_t p4d
)
820 return (unsigned long)__va(p4d_val(p4d
) & p4d_pfn_mask(p4d
));
824 * Currently stuck as a macro due to indirect forward reference to
825 * linux/mmzone.h's __section_mem_map_addr() definition:
827 #define p4d_page(p4d) \
828 pfn_to_page((p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT)
830 /* Find an entry in the third-level page table.. */
831 static inline pud_t
*pud_offset(p4d_t
*p4d
, unsigned long address
)
833 return (pud_t
*)p4d_page_vaddr(*p4d
) + pud_index(address
);
836 static inline int p4d_bad(p4d_t p4d
)
838 return (p4d_flags(p4d
) & ~(_KERNPG_TABLE
| _PAGE_USER
)) != 0;
840 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
842 static inline unsigned long p4d_index(unsigned long address
)
844 return (address
>> P4D_SHIFT
) & (PTRS_PER_P4D
- 1);
847 #if CONFIG_PGTABLE_LEVELS > 4
848 static inline int pgd_present(pgd_t pgd
)
850 return pgd_flags(pgd
) & _PAGE_PRESENT
;
853 static inline unsigned long pgd_page_vaddr(pgd_t pgd
)
855 return (unsigned long)__va((unsigned long)pgd_val(pgd
) & PTE_PFN_MASK
);
859 * Currently stuck as a macro due to indirect forward reference to
860 * linux/mmzone.h's __section_mem_map_addr() definition:
862 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
864 /* to find an entry in a page-table-directory. */
865 static inline p4d_t
*p4d_offset(pgd_t
*pgd
, unsigned long address
)
867 return (p4d_t
*)pgd_page_vaddr(*pgd
) + p4d_index(address
);
870 static inline int pgd_bad(pgd_t pgd
)
872 return (pgd_flags(pgd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
875 static inline int pgd_none(pgd_t pgd
)
878 * There is no need to do a workaround for the KNL stray
879 * A/D bit erratum here. PGDs only point to page tables
880 * except on 32-bit non-PAE which is not supported on
883 return !native_pgd_val(pgd
);
885 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
887 #endif /* __ASSEMBLY__ */
890 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
892 * this macro returns the index of the entry in the pgd page which would
893 * control the given virtual address
895 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
898 * pgd_offset() returns a (pgd_t *)
899 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
901 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
903 * a shortcut which implies the use of the kernel's pgd, instead
906 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
909 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
910 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
914 extern int direct_gbpages
;
915 void init_mem_mapping(void);
916 void early_alloc_pgt_buf(void);
917 extern void memblock_find_dma_reserve(void);
920 /* Realmode trampoline initialization. */
921 extern pgd_t trampoline_pgd_entry
;
922 static inline void __meminit
init_trampoline_default(void)
924 /* Default trampoline pgd value */
925 trampoline_pgd_entry
= init_top_pgt
[pgd_index(__PAGE_OFFSET
)];
927 # ifdef CONFIG_RANDOMIZE_MEMORY
928 void __meminit
init_trampoline(void);
930 # define init_trampoline init_trampoline_default
933 static inline void init_trampoline(void) { }
936 /* local pte updates need not use xchg for locking */
937 static inline pte_t
native_local_ptep_get_and_clear(pte_t
*ptep
)
941 /* Pure native function needs no input for mm, addr */
942 native_pte_clear(NULL
, 0, ptep
);
946 static inline pmd_t
native_local_pmdp_get_and_clear(pmd_t
*pmdp
)
950 native_pmd_clear(pmdp
);
954 static inline pud_t
native_local_pudp_get_and_clear(pud_t
*pudp
)
958 native_pud_clear(pudp
);
962 static inline void native_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
963 pte_t
*ptep
, pte_t pte
)
965 native_set_pte(ptep
, pte
);
968 static inline void native_set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
969 pmd_t
*pmdp
, pmd_t pmd
)
971 native_set_pmd(pmdp
, pmd
);
974 static inline void native_set_pud_at(struct mm_struct
*mm
, unsigned long addr
,
975 pud_t
*pudp
, pud_t pud
)
977 native_set_pud(pudp
, pud
);
980 #ifndef CONFIG_PARAVIRT
982 * Rules for using pte_update - it must be called after any PTE update which
983 * has not been done using the set_pte / clear_pte interfaces. It is used by
984 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
985 * updates should either be sets, clears, or set_pte_atomic for P->P
986 * transitions, which means this hook should only be called for user PTEs.
987 * This hook implies a P->P protection or access change has taken place, which
988 * requires a subsequent TLB flush.
990 #define pte_update(mm, addr, ptep) do { } while (0)
994 * We only update the dirty/accessed state if we set
995 * the dirty bit by hand in the kernel, since the hardware
996 * will do the accessed bit for us, and we don't want to
997 * race with other CPU's that might be updating the dirty
998 * bit at the same time.
1000 struct vm_area_struct
;
1002 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1003 extern int ptep_set_access_flags(struct vm_area_struct
*vma
,
1004 unsigned long address
, pte_t
*ptep
,
1005 pte_t entry
, int dirty
);
1007 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1008 extern int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
1009 unsigned long addr
, pte_t
*ptep
);
1011 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1012 extern int ptep_clear_flush_young(struct vm_area_struct
*vma
,
1013 unsigned long address
, pte_t
*ptep
);
1015 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1016 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
1019 pte_t pte
= native_ptep_get_and_clear(ptep
);
1020 pte_update(mm
, addr
, ptep
);
1024 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1025 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1026 unsigned long addr
, pte_t
*ptep
,
1032 * Full address destruction in progress; paravirt does not
1033 * care about updates and native needs no locking
1035 pte
= native_local_ptep_get_and_clear(ptep
);
1037 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
1042 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1043 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
1044 unsigned long addr
, pte_t
*ptep
)
1046 clear_bit(_PAGE_BIT_RW
, (unsigned long *)&ptep
->pte
);
1047 pte_update(mm
, addr
, ptep
);
1050 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1052 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1054 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1055 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
1056 unsigned long address
, pmd_t
*pmdp
,
1057 pmd_t entry
, int dirty
);
1058 extern int pudp_set_access_flags(struct vm_area_struct
*vma
,
1059 unsigned long address
, pud_t
*pudp
,
1060 pud_t entry
, int dirty
);
1062 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1063 extern int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1064 unsigned long addr
, pmd_t
*pmdp
);
1065 extern int pudp_test_and_clear_young(struct vm_area_struct
*vma
,
1066 unsigned long addr
, pud_t
*pudp
);
1068 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1069 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
1070 unsigned long address
, pmd_t
*pmdp
);
1073 #define __HAVE_ARCH_PMD_WRITE
1074 static inline int pmd_write(pmd_t pmd
)
1076 return pmd_flags(pmd
) & _PAGE_RW
;
1079 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1080 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
1083 return native_pmdp_get_and_clear(pmdp
);
1086 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1087 static inline pud_t
pudp_huge_get_and_clear(struct mm_struct
*mm
,
1088 unsigned long addr
, pud_t
*pudp
)
1090 return native_pudp_get_and_clear(pudp
);
1093 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1094 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1095 unsigned long addr
, pmd_t
*pmdp
)
1097 clear_bit(_PAGE_BIT_RW
, (unsigned long *)pmdp
);
1101 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1103 * dst - pointer to pgd range anwhere on a pgd page
1105 * count - the number of pgds to copy.
1107 * dst and src can be on the same page, but the range must not overlap,
1108 * and must not cross a page boundary.
1110 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
1112 memcpy(dst
, src
, count
* sizeof(pgd_t
));
1115 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
1116 static inline int page_level_shift(enum pg_level level
)
1118 return (PAGE_SHIFT
- PTE_SHIFT
) + level
* PTE_SHIFT
;
1120 static inline unsigned long page_level_size(enum pg_level level
)
1122 return 1UL << page_level_shift(level
);
1124 static inline unsigned long page_level_mask(enum pg_level level
)
1126 return ~(page_level_size(level
) - 1);
1130 * The x86 doesn't have any external MMU info: the kernel page
1131 * tables contain all the necessary information.
1133 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
1134 unsigned long addr
, pte_t
*ptep
)
1137 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
1138 unsigned long addr
, pmd_t
*pmd
)
1141 static inline void update_mmu_cache_pud(struct vm_area_struct
*vma
,
1142 unsigned long addr
, pud_t
*pud
)
1146 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1147 static inline pte_t
pte_swp_mksoft_dirty(pte_t pte
)
1149 return pte_set_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
1152 static inline int pte_swp_soft_dirty(pte_t pte
)
1154 return pte_flags(pte
) & _PAGE_SWP_SOFT_DIRTY
;
1157 static inline pte_t
pte_swp_clear_soft_dirty(pte_t pte
)
1159 return pte_clear_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
1163 #define PKRU_AD_BIT 0x1
1164 #define PKRU_WD_BIT 0x2
1165 #define PKRU_BITS_PER_PKEY 2
1167 static inline bool __pkru_allows_read(u32 pkru
, u16 pkey
)
1169 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
1170 return !(pkru
& (PKRU_AD_BIT
<< pkru_pkey_bits
));
1173 static inline bool __pkru_allows_write(u32 pkru
, u16 pkey
)
1175 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
1177 * Access-disable disables writes too so we need to check
1180 return !(pkru
& ((PKRU_AD_BIT
|PKRU_WD_BIT
) << pkru_pkey_bits
));
1183 static inline u16
pte_flags_pkey(unsigned long pte_flags
)
1185 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1186 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1187 return (pte_flags
& _PAGE_PKEY_MASK
) >> _PAGE_BIT_PKEY_BIT0
;
1193 static inline bool __pkru_allows_pkey(u16 pkey
, bool write
)
1195 u32 pkru
= read_pkru();
1197 if (!__pkru_allows_read(pkru
, pkey
))
1199 if (write
&& !__pkru_allows_write(pkru
, pkey
))
1206 * 'pteval' can come from a PTE, PMD or PUD. We only check
1207 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1208 * same value on all 3 types.
1210 static inline bool __pte_access_permitted(unsigned long pteval
, bool write
)
1212 unsigned long need_pte_bits
= _PAGE_PRESENT
|_PAGE_USER
;
1215 need_pte_bits
|= _PAGE_RW
;
1217 if ((pteval
& need_pte_bits
) != need_pte_bits
)
1220 return __pkru_allows_pkey(pte_flags_pkey(pteval
), write
);
1223 #define pte_access_permitted pte_access_permitted
1224 static inline bool pte_access_permitted(pte_t pte
, bool write
)
1226 return __pte_access_permitted(pte_val(pte
), write
);
1229 #define pmd_access_permitted pmd_access_permitted
1230 static inline bool pmd_access_permitted(pmd_t pmd
, bool write
)
1232 return __pte_access_permitted(pmd_val(pmd
), write
);
1235 #define pud_access_permitted pud_access_permitted
1236 static inline bool pud_access_permitted(pud_t pud
, bool write
)
1238 return __pte_access_permitted(pud_val(pud
), write
);
1241 #include <asm-generic/pgtable.h>
1242 #endif /* __ASSEMBLY__ */
1244 #endif /* _ASM_X86_PGTABLE_H */