1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
7 #include <asm/pgtable_types.h>
10 * Macro to mark a page protection value as UC-
12 #define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
19 #include <asm/x86_init.h>
21 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
);
22 void ptdump_walk_pgd_level_checkwx(void);
24 #ifdef CONFIG_DEBUG_WX
25 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
27 #define debug_checkwx() do { } while (0)
31 * ZERO_PAGE is a global shared page that is always zero: used
32 * for zero-mapped memory areas etc..
34 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)]
36 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
38 extern spinlock_t pgd_lock
;
39 extern struct list_head pgd_list
;
41 extern struct mm_struct
*pgd_page_get_mm(struct page
*page
);
43 #ifdef CONFIG_PARAVIRT
44 #include <asm/paravirt.h>
45 #else /* !CONFIG_PARAVIRT */
46 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
47 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
48 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
50 #define set_pte_atomic(ptep, pte) \
51 native_set_pte_atomic(ptep, pte)
53 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
55 #ifndef __PAGETABLE_PUD_FOLDED
56 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
57 #define pgd_clear(pgd) native_pgd_clear(pgd)
61 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
64 #ifndef __PAGETABLE_PMD_FOLDED
65 #define pud_clear(pud) native_pud_clear(pud)
68 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
69 #define pmd_clear(pmd) native_pmd_clear(pmd)
71 #define pte_update(mm, addr, ptep) do { } while (0)
73 #define pgd_val(x) native_pgd_val(x)
74 #define __pgd(x) native_make_pgd(x)
76 #ifndef __PAGETABLE_PUD_FOLDED
77 #define pud_val(x) native_pud_val(x)
78 #define __pud(x) native_make_pud(x)
81 #ifndef __PAGETABLE_PMD_FOLDED
82 #define pmd_val(x) native_pmd_val(x)
83 #define __pmd(x) native_make_pmd(x)
86 #define pte_val(x) native_pte_val(x)
87 #define __pte(x) native_make_pte(x)
89 #define arch_end_context_switch(prev) do {} while(0)
91 #endif /* CONFIG_PARAVIRT */
94 * The following only work if pte_present() is true.
95 * Undefined behaviour if not..
97 static inline int pte_dirty(pte_t pte
)
99 return pte_flags(pte
) & _PAGE_DIRTY
;
103 static inline u32
read_pkru(void)
105 if (boot_cpu_has(X86_FEATURE_OSPKE
))
106 return __read_pkru();
110 static inline void write_pkru(u32 pkru
)
112 if (boot_cpu_has(X86_FEATURE_OSPKE
))
116 static inline int pte_young(pte_t pte
)
118 return pte_flags(pte
) & _PAGE_ACCESSED
;
121 static inline int pmd_dirty(pmd_t pmd
)
123 return pmd_flags(pmd
) & _PAGE_DIRTY
;
126 static inline int pmd_young(pmd_t pmd
)
128 return pmd_flags(pmd
) & _PAGE_ACCESSED
;
131 static inline int pte_write(pte_t pte
)
133 return pte_flags(pte
) & _PAGE_RW
;
136 static inline int pte_huge(pte_t pte
)
138 return pte_flags(pte
) & _PAGE_PSE
;
141 static inline int pte_global(pte_t pte
)
143 return pte_flags(pte
) & _PAGE_GLOBAL
;
146 static inline int pte_exec(pte_t pte
)
148 return !(pte_flags(pte
) & _PAGE_NX
);
151 static inline int pte_special(pte_t pte
)
153 return pte_flags(pte
) & _PAGE_SPECIAL
;
156 static inline unsigned long pte_pfn(pte_t pte
)
158 return (pte_val(pte
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
161 static inline unsigned long pmd_pfn(pmd_t pmd
)
163 return (pmd_val(pmd
) & pmd_pfn_mask(pmd
)) >> PAGE_SHIFT
;
166 static inline unsigned long pud_pfn(pud_t pud
)
168 return (pud_val(pud
) & pud_pfn_mask(pud
)) >> PAGE_SHIFT
;
171 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
173 static inline int pmd_large(pmd_t pte
)
175 return pmd_flags(pte
) & _PAGE_PSE
;
178 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
179 static inline int pmd_trans_huge(pmd_t pmd
)
181 return (pmd_val(pmd
) & (_PAGE_PSE
|_PAGE_DEVMAP
)) == _PAGE_PSE
;
184 #define has_transparent_hugepage has_transparent_hugepage
185 static inline int has_transparent_hugepage(void)
187 return boot_cpu_has(X86_FEATURE_PSE
);
190 #ifdef __HAVE_ARCH_PTE_DEVMAP
191 static inline int pmd_devmap(pmd_t pmd
)
193 return !!(pmd_val(pmd
) & _PAGE_DEVMAP
);
196 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
198 static inline pte_t
pte_set_flags(pte_t pte
, pteval_t set
)
200 pteval_t v
= native_pte_val(pte
);
202 return native_make_pte(v
| set
);
205 static inline pte_t
pte_clear_flags(pte_t pte
, pteval_t clear
)
207 pteval_t v
= native_pte_val(pte
);
209 return native_make_pte(v
& ~clear
);
212 static inline pte_t
pte_mkclean(pte_t pte
)
214 return pte_clear_flags(pte
, _PAGE_DIRTY
);
217 static inline pte_t
pte_mkold(pte_t pte
)
219 return pte_clear_flags(pte
, _PAGE_ACCESSED
);
222 static inline pte_t
pte_wrprotect(pte_t pte
)
224 return pte_clear_flags(pte
, _PAGE_RW
);
227 static inline pte_t
pte_mkexec(pte_t pte
)
229 return pte_clear_flags(pte
, _PAGE_NX
);
232 static inline pte_t
pte_mkdirty(pte_t pte
)
234 return pte_set_flags(pte
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
237 static inline pte_t
pte_mkyoung(pte_t pte
)
239 return pte_set_flags(pte
, _PAGE_ACCESSED
);
242 static inline pte_t
pte_mkwrite(pte_t pte
)
244 return pte_set_flags(pte
, _PAGE_RW
);
247 static inline pte_t
pte_mkhuge(pte_t pte
)
249 return pte_set_flags(pte
, _PAGE_PSE
);
252 static inline pte_t
pte_clrhuge(pte_t pte
)
254 return pte_clear_flags(pte
, _PAGE_PSE
);
257 static inline pte_t
pte_mkglobal(pte_t pte
)
259 return pte_set_flags(pte
, _PAGE_GLOBAL
);
262 static inline pte_t
pte_clrglobal(pte_t pte
)
264 return pte_clear_flags(pte
, _PAGE_GLOBAL
);
267 static inline pte_t
pte_mkspecial(pte_t pte
)
269 return pte_set_flags(pte
, _PAGE_SPECIAL
);
272 static inline pte_t
pte_mkdevmap(pte_t pte
)
274 return pte_set_flags(pte
, _PAGE_SPECIAL
|_PAGE_DEVMAP
);
277 static inline pmd_t
pmd_set_flags(pmd_t pmd
, pmdval_t set
)
279 pmdval_t v
= native_pmd_val(pmd
);
281 return __pmd(v
| set
);
284 static inline pmd_t
pmd_clear_flags(pmd_t pmd
, pmdval_t clear
)
286 pmdval_t v
= native_pmd_val(pmd
);
288 return __pmd(v
& ~clear
);
291 static inline pmd_t
pmd_mkold(pmd_t pmd
)
293 return pmd_clear_flags(pmd
, _PAGE_ACCESSED
);
296 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
298 return pmd_clear_flags(pmd
, _PAGE_DIRTY
);
301 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
303 return pmd_clear_flags(pmd
, _PAGE_RW
);
306 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
308 return pmd_set_flags(pmd
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
311 static inline pmd_t
pmd_mkdevmap(pmd_t pmd
)
313 return pmd_set_flags(pmd
, _PAGE_DEVMAP
);
316 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
318 return pmd_set_flags(pmd
, _PAGE_PSE
);
321 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
323 return pmd_set_flags(pmd
, _PAGE_ACCESSED
);
326 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
328 return pmd_set_flags(pmd
, _PAGE_RW
);
331 static inline pmd_t
pmd_mknotpresent(pmd_t pmd
)
333 return pmd_clear_flags(pmd
, _PAGE_PRESENT
| _PAGE_PROTNONE
);
336 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
337 static inline int pte_soft_dirty(pte_t pte
)
339 return pte_flags(pte
) & _PAGE_SOFT_DIRTY
;
342 static inline int pmd_soft_dirty(pmd_t pmd
)
344 return pmd_flags(pmd
) & _PAGE_SOFT_DIRTY
;
347 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
349 return pte_set_flags(pte
, _PAGE_SOFT_DIRTY
);
352 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
354 return pmd_set_flags(pmd
, _PAGE_SOFT_DIRTY
);
357 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
359 return pte_clear_flags(pte
, _PAGE_SOFT_DIRTY
);
362 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
364 return pmd_clear_flags(pmd
, _PAGE_SOFT_DIRTY
);
367 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
370 * Mask out unsupported bits in a present pgprot. Non-present pgprots
371 * can use those bits for other purposes, so leave them be.
373 static inline pgprotval_t
massage_pgprot(pgprot_t pgprot
)
375 pgprotval_t protval
= pgprot_val(pgprot
);
377 if (protval
& _PAGE_PRESENT
)
378 protval
&= __supported_pte_mask
;
383 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
385 return __pte(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
386 massage_pgprot(pgprot
));
389 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
391 return __pmd(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
392 massage_pgprot(pgprot
));
395 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
397 pteval_t val
= pte_val(pte
);
400 * Chop off the NX bit (if present), and add the NX portion of
401 * the newprot (if present):
403 val
&= _PAGE_CHG_MASK
;
404 val
|= massage_pgprot(newprot
) & ~_PAGE_CHG_MASK
;
409 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
411 pmdval_t val
= pmd_val(pmd
);
413 val
&= _HPAGE_CHG_MASK
;
414 val
|= massage_pgprot(newprot
) & ~_HPAGE_CHG_MASK
;
419 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
420 #define pgprot_modify pgprot_modify
421 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
423 pgprotval_t preservebits
= pgprot_val(oldprot
) & _PAGE_CHG_MASK
;
424 pgprotval_t addbits
= pgprot_val(newprot
);
425 return __pgprot(preservebits
| addbits
);
428 #define pte_pgprot(x) __pgprot(pte_flags(x))
429 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
430 #define pud_pgprot(x) __pgprot(pud_flags(x))
432 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
434 static inline int is_new_memtype_allowed(u64 paddr
, unsigned long size
,
435 enum page_cache_mode pcm
,
436 enum page_cache_mode new_pcm
)
439 * PAT type is always WB for untracked ranges, so no need to check.
441 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ size
))
445 * Certain new memtypes are not allowed with certain
447 * - request is uncached, return cannot be write-back
448 * - request is write-combine, return cannot be write-back
449 * - request is write-through, return cannot be write-back
450 * - request is write-through, return cannot be write-combine
452 if ((pcm
== _PAGE_CACHE_MODE_UC_MINUS
&&
453 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
454 (pcm
== _PAGE_CACHE_MODE_WC
&&
455 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
456 (pcm
== _PAGE_CACHE_MODE_WT
&&
457 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
458 (pcm
== _PAGE_CACHE_MODE_WT
&&
459 new_pcm
== _PAGE_CACHE_MODE_WC
)) {
466 pmd_t
*populate_extra_pmd(unsigned long vaddr
);
467 pte_t
*populate_extra_pte(unsigned long vaddr
);
468 #endif /* __ASSEMBLY__ */
471 # include <asm/pgtable_32.h>
473 # include <asm/pgtable_64.h>
477 #include <linux/mm_types.h>
478 #include <linux/mmdebug.h>
479 #include <linux/log2.h>
481 static inline int pte_none(pte_t pte
)
483 return !(pte
.pte
& ~(_PAGE_KNL_ERRATUM_MASK
));
486 #define __HAVE_ARCH_PTE_SAME
487 static inline int pte_same(pte_t a
, pte_t b
)
489 return a
.pte
== b
.pte
;
492 static inline int pte_present(pte_t a
)
494 return pte_flags(a
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
);
497 #ifdef __HAVE_ARCH_PTE_DEVMAP
498 static inline int pte_devmap(pte_t a
)
500 return (pte_flags(a
) & _PAGE_DEVMAP
) == _PAGE_DEVMAP
;
504 #define pte_accessible pte_accessible
505 static inline bool pte_accessible(struct mm_struct
*mm
, pte_t a
)
507 if (pte_flags(a
) & _PAGE_PRESENT
)
510 if ((pte_flags(a
) & _PAGE_PROTNONE
) &&
511 mm_tlb_flush_pending(mm
))
517 static inline int pte_hidden(pte_t pte
)
519 return pte_flags(pte
) & _PAGE_HIDDEN
;
522 static inline int pmd_present(pmd_t pmd
)
525 * Checking for _PAGE_PSE is needed too because
526 * split_huge_page will temporarily clear the present bit (but
527 * the _PAGE_PSE flag will remain set at all times while the
528 * _PAGE_PRESENT bit is clear).
530 return pmd_flags(pmd
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
| _PAGE_PSE
);
533 #ifdef CONFIG_NUMA_BALANCING
535 * These work without NUMA balancing but the kernel does not care. See the
536 * comment in include/asm-generic/pgtable.h
538 static inline int pte_protnone(pte_t pte
)
540 return (pte_flags(pte
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
544 static inline int pmd_protnone(pmd_t pmd
)
546 return (pmd_flags(pmd
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
549 #endif /* CONFIG_NUMA_BALANCING */
551 static inline int pmd_none(pmd_t pmd
)
553 /* Only check low word on 32-bit platforms, since it might be
554 out of sync with upper half. */
555 unsigned long val
= native_pmd_val(pmd
);
556 return (val
& ~_PAGE_KNL_ERRATUM_MASK
) == 0;
559 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
561 return (unsigned long)__va(pmd_val(pmd
) & pmd_pfn_mask(pmd
));
565 * Currently stuck as a macro due to indirect forward reference to
566 * linux/mmzone.h's __section_mem_map_addr() definition:
568 #define pmd_page(pmd) \
569 pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
572 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
574 * this macro returns the index of the entry in the pmd page which would
575 * control the given virtual address
577 static inline unsigned long pmd_index(unsigned long address
)
579 return (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
583 * Conversion functions: convert a page and protection to a page entry,
584 * and a page entry and page directory to the page they refer to.
586 * (Currently stuck as a macro because of indirect forward reference
587 * to linux/mm.h:page_to_nid())
589 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
592 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
594 * this function returns the index of the entry in the pte page which would
595 * control the given virtual address
597 static inline unsigned long pte_index(unsigned long address
)
599 return (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
602 static inline pte_t
*pte_offset_kernel(pmd_t
*pmd
, unsigned long address
)
604 return (pte_t
*)pmd_page_vaddr(*pmd
) + pte_index(address
);
607 static inline int pmd_bad(pmd_t pmd
)
609 return (pmd_flags(pmd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
612 static inline unsigned long pages_to_mb(unsigned long npg
)
614 return npg
>> (20 - PAGE_SHIFT
);
617 #if CONFIG_PGTABLE_LEVELS > 2
618 static inline int pud_none(pud_t pud
)
620 return (native_pud_val(pud
) & ~(_PAGE_KNL_ERRATUM_MASK
)) == 0;
623 static inline int pud_present(pud_t pud
)
625 return pud_flags(pud
) & _PAGE_PRESENT
;
628 static inline unsigned long pud_page_vaddr(pud_t pud
)
630 return (unsigned long)__va(pud_val(pud
) & pud_pfn_mask(pud
));
634 * Currently stuck as a macro due to indirect forward reference to
635 * linux/mmzone.h's __section_mem_map_addr() definition:
637 #define pud_page(pud) \
638 pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
640 /* Find an entry in the second-level page table.. */
641 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
643 return (pmd_t
*)pud_page_vaddr(*pud
) + pmd_index(address
);
646 static inline int pud_large(pud_t pud
)
648 return (pud_val(pud
) & (_PAGE_PSE
| _PAGE_PRESENT
)) ==
649 (_PAGE_PSE
| _PAGE_PRESENT
);
652 static inline int pud_bad(pud_t pud
)
654 return (pud_flags(pud
) & ~(_KERNPG_TABLE
| _PAGE_USER
)) != 0;
657 static inline int pud_large(pud_t pud
)
661 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
663 #if CONFIG_PGTABLE_LEVELS > 3
664 static inline int pgd_present(pgd_t pgd
)
666 return pgd_flags(pgd
) & _PAGE_PRESENT
;
669 static inline unsigned long pgd_page_vaddr(pgd_t pgd
)
671 return (unsigned long)__va((unsigned long)pgd_val(pgd
) & PTE_PFN_MASK
);
675 * Currently stuck as a macro due to indirect forward reference to
676 * linux/mmzone.h's __section_mem_map_addr() definition:
678 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
680 /* to find an entry in a page-table-directory. */
681 static inline unsigned long pud_index(unsigned long address
)
683 return (address
>> PUD_SHIFT
) & (PTRS_PER_PUD
- 1);
686 static inline pud_t
*pud_offset(pgd_t
*pgd
, unsigned long address
)
688 return (pud_t
*)pgd_page_vaddr(*pgd
) + pud_index(address
);
691 static inline int pgd_bad(pgd_t pgd
)
693 return (pgd_flags(pgd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
696 static inline int pgd_none(pgd_t pgd
)
699 * There is no need to do a workaround for the KNL stray
700 * A/D bit erratum here. PGDs only point to page tables
701 * except on 32-bit non-PAE which is not supported on
704 return !native_pgd_val(pgd
);
706 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
708 #endif /* __ASSEMBLY__ */
711 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
713 * this macro returns the index of the entry in the pgd page which would
714 * control the given virtual address
716 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
719 * pgd_offset() returns a (pgd_t *)
720 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
722 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
724 * a shortcut which implies the use of the kernel's pgd, instead
727 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
730 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
731 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
735 extern int direct_gbpages
;
736 void init_mem_mapping(void);
737 void early_alloc_pgt_buf(void);
740 /* Realmode trampoline initialization. */
741 extern pgd_t trampoline_pgd_entry
;
742 static inline void __meminit
init_trampoline_default(void)
744 /* Default trampoline pgd value */
745 trampoline_pgd_entry
= init_level4_pgt
[pgd_index(__PAGE_OFFSET
)];
747 # ifdef CONFIG_RANDOMIZE_MEMORY
748 void __meminit
init_trampoline(void);
750 # define init_trampoline init_trampoline_default
753 static inline void init_trampoline(void) { }
756 /* local pte updates need not use xchg for locking */
757 static inline pte_t
native_local_ptep_get_and_clear(pte_t
*ptep
)
761 /* Pure native function needs no input for mm, addr */
762 native_pte_clear(NULL
, 0, ptep
);
766 static inline pmd_t
native_local_pmdp_get_and_clear(pmd_t
*pmdp
)
770 native_pmd_clear(pmdp
);
774 static inline void native_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
775 pte_t
*ptep
, pte_t pte
)
777 native_set_pte(ptep
, pte
);
780 static inline void native_set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
781 pmd_t
*pmdp
, pmd_t pmd
)
783 native_set_pmd(pmdp
, pmd
);
786 #ifndef CONFIG_PARAVIRT
788 * Rules for using pte_update - it must be called after any PTE update which
789 * has not been done using the set_pte / clear_pte interfaces. It is used by
790 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
791 * updates should either be sets, clears, or set_pte_atomic for P->P
792 * transitions, which means this hook should only be called for user PTEs.
793 * This hook implies a P->P protection or access change has taken place, which
794 * requires a subsequent TLB flush.
796 #define pte_update(mm, addr, ptep) do { } while (0)
800 * We only update the dirty/accessed state if we set
801 * the dirty bit by hand in the kernel, since the hardware
802 * will do the accessed bit for us, and we don't want to
803 * race with other CPU's that might be updating the dirty
804 * bit at the same time.
806 struct vm_area_struct
;
808 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
809 extern int ptep_set_access_flags(struct vm_area_struct
*vma
,
810 unsigned long address
, pte_t
*ptep
,
811 pte_t entry
, int dirty
);
813 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
814 extern int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
815 unsigned long addr
, pte_t
*ptep
);
817 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
818 extern int ptep_clear_flush_young(struct vm_area_struct
*vma
,
819 unsigned long address
, pte_t
*ptep
);
821 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
822 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
825 pte_t pte
= native_ptep_get_and_clear(ptep
);
826 pte_update(mm
, addr
, ptep
);
830 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
831 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
832 unsigned long addr
, pte_t
*ptep
,
838 * Full address destruction in progress; paravirt does not
839 * care about updates and native needs no locking
841 pte
= native_local_ptep_get_and_clear(ptep
);
843 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
848 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
849 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
850 unsigned long addr
, pte_t
*ptep
)
852 clear_bit(_PAGE_BIT_RW
, (unsigned long *)&ptep
->pte
);
853 pte_update(mm
, addr
, ptep
);
856 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
858 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
860 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
861 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
862 unsigned long address
, pmd_t
*pmdp
,
863 pmd_t entry
, int dirty
);
865 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
866 extern int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
867 unsigned long addr
, pmd_t
*pmdp
);
869 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
870 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
871 unsigned long address
, pmd_t
*pmdp
);
874 #define __HAVE_ARCH_PMD_WRITE
875 static inline int pmd_write(pmd_t pmd
)
877 return pmd_flags(pmd
) & _PAGE_RW
;
880 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
881 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
884 return native_pmdp_get_and_clear(pmdp
);
887 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
888 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
889 unsigned long addr
, pmd_t
*pmdp
)
891 clear_bit(_PAGE_BIT_RW
, (unsigned long *)pmdp
);
895 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
897 * dst - pointer to pgd range anwhere on a pgd page
899 * count - the number of pgds to copy.
901 * dst and src can be on the same page, but the range must not overlap,
902 * and must not cross a page boundary.
904 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
906 memcpy(dst
, src
, count
* sizeof(pgd_t
));
909 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
910 static inline int page_level_shift(enum pg_level level
)
912 return (PAGE_SHIFT
- PTE_SHIFT
) + level
* PTE_SHIFT
;
914 static inline unsigned long page_level_size(enum pg_level level
)
916 return 1UL << page_level_shift(level
);
918 static inline unsigned long page_level_mask(enum pg_level level
)
920 return ~(page_level_size(level
) - 1);
924 * The x86 doesn't have any external MMU info: the kernel page
925 * tables contain all the necessary information.
927 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
928 unsigned long addr
, pte_t
*ptep
)
931 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
932 unsigned long addr
, pmd_t
*pmd
)
936 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
937 static inline pte_t
pte_swp_mksoft_dirty(pte_t pte
)
939 return pte_set_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
942 static inline int pte_swp_soft_dirty(pte_t pte
)
944 return pte_flags(pte
) & _PAGE_SWP_SOFT_DIRTY
;
947 static inline pte_t
pte_swp_clear_soft_dirty(pte_t pte
)
949 return pte_clear_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
953 #define PKRU_AD_BIT 0x1
954 #define PKRU_WD_BIT 0x2
955 #define PKRU_BITS_PER_PKEY 2
957 static inline bool __pkru_allows_read(u32 pkru
, u16 pkey
)
959 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
960 return !(pkru
& (PKRU_AD_BIT
<< pkru_pkey_bits
));
963 static inline bool __pkru_allows_write(u32 pkru
, u16 pkey
)
965 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
967 * Access-disable disables writes too so we need to check
970 return !(pkru
& ((PKRU_AD_BIT
|PKRU_WD_BIT
) << pkru_pkey_bits
));
973 static inline u16
pte_flags_pkey(unsigned long pte_flags
)
975 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
976 /* ifdef to avoid doing 59-bit shift on 32-bit values */
977 return (pte_flags
& _PAGE_PKEY_MASK
) >> _PAGE_BIT_PKEY_BIT0
;
983 #include <asm-generic/pgtable.h>
984 #endif /* __ASSEMBLY__ */
986 #endif /* _ASM_X86_PGTABLE_H */