1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
7 #include <asm/pgtable_types.h>
10 * Macro to mark a page protection value as UC-
12 #define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
19 #include <asm/x86_init.h>
21 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
);
22 void ptdump_walk_pgd_level_checkwx(void);
24 #ifdef CONFIG_DEBUG_WX
25 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
27 #define debug_checkwx() do { } while (0)
31 * ZERO_PAGE is a global shared page that is always zero: used
32 * for zero-mapped memory areas etc..
34 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)]
36 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
38 extern spinlock_t pgd_lock
;
39 extern struct list_head pgd_list
;
41 extern struct mm_struct
*pgd_page_get_mm(struct page
*page
);
43 #ifdef CONFIG_PARAVIRT
44 #include <asm/paravirt.h>
45 #else /* !CONFIG_PARAVIRT */
46 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
47 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
48 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
50 #define set_pte_atomic(ptep, pte) \
51 native_set_pte_atomic(ptep, pte)
53 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
55 #ifndef __PAGETABLE_PUD_FOLDED
56 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
57 #define pgd_clear(pgd) native_pgd_clear(pgd)
61 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
64 #ifndef __PAGETABLE_PMD_FOLDED
65 #define pud_clear(pud) native_pud_clear(pud)
68 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
69 #define pmd_clear(pmd) native_pmd_clear(pmd)
71 #define pte_update(mm, addr, ptep) do { } while (0)
73 #define pgd_val(x) native_pgd_val(x)
74 #define __pgd(x) native_make_pgd(x)
76 #ifndef __PAGETABLE_PUD_FOLDED
77 #define pud_val(x) native_pud_val(x)
78 #define __pud(x) native_make_pud(x)
81 #ifndef __PAGETABLE_PMD_FOLDED
82 #define pmd_val(x) native_pmd_val(x)
83 #define __pmd(x) native_make_pmd(x)
86 #define pte_val(x) native_pte_val(x)
87 #define __pte(x) native_make_pte(x)
89 #define arch_end_context_switch(prev) do {} while(0)
91 #endif /* CONFIG_PARAVIRT */
94 * The following only work if pte_present() is true.
95 * Undefined behaviour if not..
97 static inline int pte_dirty(pte_t pte
)
99 return pte_flags(pte
) & _PAGE_DIRTY
;
102 static inline int pte_young(pte_t pte
)
104 return pte_flags(pte
) & _PAGE_ACCESSED
;
107 static inline int pmd_dirty(pmd_t pmd
)
109 return pmd_flags(pmd
) & _PAGE_DIRTY
;
112 static inline int pmd_young(pmd_t pmd
)
114 return pmd_flags(pmd
) & _PAGE_ACCESSED
;
117 static inline int pte_write(pte_t pte
)
119 return pte_flags(pte
) & _PAGE_RW
;
122 static inline int pte_huge(pte_t pte
)
124 return pte_flags(pte
) & _PAGE_PSE
;
127 static inline int pte_global(pte_t pte
)
129 return pte_flags(pte
) & _PAGE_GLOBAL
;
132 static inline int pte_exec(pte_t pte
)
134 return !(pte_flags(pte
) & _PAGE_NX
);
137 static inline int pte_special(pte_t pte
)
139 return pte_flags(pte
) & _PAGE_SPECIAL
;
142 static inline unsigned long pte_pfn(pte_t pte
)
144 return (pte_val(pte
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
147 static inline unsigned long pmd_pfn(pmd_t pmd
)
149 return (pmd_val(pmd
) & pmd_pfn_mask(pmd
)) >> PAGE_SHIFT
;
152 static inline unsigned long pud_pfn(pud_t pud
)
154 return (pud_val(pud
) & pud_pfn_mask(pud
)) >> PAGE_SHIFT
;
157 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
159 static inline int pmd_large(pmd_t pte
)
161 return pmd_flags(pte
) & _PAGE_PSE
;
164 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
165 static inline int pmd_trans_splitting(pmd_t pmd
)
167 return pmd_val(pmd
) & _PAGE_SPLITTING
;
170 static inline int pmd_trans_huge(pmd_t pmd
)
172 return pmd_val(pmd
) & _PAGE_PSE
;
175 static inline int has_transparent_hugepage(void)
179 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
181 static inline pte_t
pte_set_flags(pte_t pte
, pteval_t set
)
183 pteval_t v
= native_pte_val(pte
);
185 return native_make_pte(v
| set
);
188 static inline pte_t
pte_clear_flags(pte_t pte
, pteval_t clear
)
190 pteval_t v
= native_pte_val(pte
);
192 return native_make_pte(v
& ~clear
);
195 static inline pte_t
pte_mkclean(pte_t pte
)
197 return pte_clear_flags(pte
, _PAGE_DIRTY
);
200 static inline pte_t
pte_mkold(pte_t pte
)
202 return pte_clear_flags(pte
, _PAGE_ACCESSED
);
205 static inline pte_t
pte_wrprotect(pte_t pte
)
207 return pte_clear_flags(pte
, _PAGE_RW
);
210 static inline pte_t
pte_mkexec(pte_t pte
)
212 return pte_clear_flags(pte
, _PAGE_NX
);
215 static inline pte_t
pte_mkdirty(pte_t pte
)
217 return pte_set_flags(pte
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
220 static inline pte_t
pte_mkyoung(pte_t pte
)
222 return pte_set_flags(pte
, _PAGE_ACCESSED
);
225 static inline pte_t
pte_mkwrite(pte_t pte
)
227 return pte_set_flags(pte
, _PAGE_RW
);
230 static inline pte_t
pte_mkhuge(pte_t pte
)
232 return pte_set_flags(pte
, _PAGE_PSE
);
235 static inline pte_t
pte_clrhuge(pte_t pte
)
237 return pte_clear_flags(pte
, _PAGE_PSE
);
240 static inline pte_t
pte_mkglobal(pte_t pte
)
242 return pte_set_flags(pte
, _PAGE_GLOBAL
);
245 static inline pte_t
pte_clrglobal(pte_t pte
)
247 return pte_clear_flags(pte
, _PAGE_GLOBAL
);
250 static inline pte_t
pte_mkspecial(pte_t pte
)
252 return pte_set_flags(pte
, _PAGE_SPECIAL
);
255 static inline pmd_t
pmd_set_flags(pmd_t pmd
, pmdval_t set
)
257 pmdval_t v
= native_pmd_val(pmd
);
259 return __pmd(v
| set
);
262 static inline pmd_t
pmd_clear_flags(pmd_t pmd
, pmdval_t clear
)
264 pmdval_t v
= native_pmd_val(pmd
);
266 return __pmd(v
& ~clear
);
269 static inline pmd_t
pmd_mkold(pmd_t pmd
)
271 return pmd_clear_flags(pmd
, _PAGE_ACCESSED
);
274 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
276 return pmd_clear_flags(pmd
, _PAGE_RW
);
279 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
281 return pmd_set_flags(pmd
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
284 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
286 return pmd_set_flags(pmd
, _PAGE_PSE
);
289 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
291 return pmd_set_flags(pmd
, _PAGE_ACCESSED
);
294 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
296 return pmd_set_flags(pmd
, _PAGE_RW
);
299 static inline pmd_t
pmd_mknotpresent(pmd_t pmd
)
301 return pmd_clear_flags(pmd
, _PAGE_PRESENT
| _PAGE_PROTNONE
);
304 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
305 static inline int pte_soft_dirty(pte_t pte
)
307 return pte_flags(pte
) & _PAGE_SOFT_DIRTY
;
310 static inline int pmd_soft_dirty(pmd_t pmd
)
312 return pmd_flags(pmd
) & _PAGE_SOFT_DIRTY
;
315 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
317 return pte_set_flags(pte
, _PAGE_SOFT_DIRTY
);
320 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
322 return pmd_set_flags(pmd
, _PAGE_SOFT_DIRTY
);
325 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
327 return pte_clear_flags(pte
, _PAGE_SOFT_DIRTY
);
330 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
332 return pmd_clear_flags(pmd
, _PAGE_SOFT_DIRTY
);
335 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
338 * Mask out unsupported bits in a present pgprot. Non-present pgprots
339 * can use those bits for other purposes, so leave them be.
341 static inline pgprotval_t
massage_pgprot(pgprot_t pgprot
)
343 pgprotval_t protval
= pgprot_val(pgprot
);
345 if (protval
& _PAGE_PRESENT
)
346 protval
&= __supported_pte_mask
;
351 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
353 return __pte(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
354 massage_pgprot(pgprot
));
357 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
359 return __pmd(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
360 massage_pgprot(pgprot
));
363 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
365 pteval_t val
= pte_val(pte
);
368 * Chop off the NX bit (if present), and add the NX portion of
369 * the newprot (if present):
371 val
&= _PAGE_CHG_MASK
;
372 val
|= massage_pgprot(newprot
) & ~_PAGE_CHG_MASK
;
377 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
379 pmdval_t val
= pmd_val(pmd
);
381 val
&= _HPAGE_CHG_MASK
;
382 val
|= massage_pgprot(newprot
) & ~_HPAGE_CHG_MASK
;
387 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
388 #define pgprot_modify pgprot_modify
389 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
391 pgprotval_t preservebits
= pgprot_val(oldprot
) & _PAGE_CHG_MASK
;
392 pgprotval_t addbits
= pgprot_val(newprot
);
393 return __pgprot(preservebits
| addbits
);
396 #define pte_pgprot(x) __pgprot(pte_flags(x))
397 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
398 #define pud_pgprot(x) __pgprot(pud_flags(x))
400 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
402 static inline int is_new_memtype_allowed(u64 paddr
, unsigned long size
,
403 enum page_cache_mode pcm
,
404 enum page_cache_mode new_pcm
)
407 * PAT type is always WB for untracked ranges, so no need to check.
409 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ size
))
413 * Certain new memtypes are not allowed with certain
415 * - request is uncached, return cannot be write-back
416 * - request is write-combine, return cannot be write-back
417 * - request is write-through, return cannot be write-back
418 * - request is write-through, return cannot be write-combine
420 if ((pcm
== _PAGE_CACHE_MODE_UC_MINUS
&&
421 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
422 (pcm
== _PAGE_CACHE_MODE_WC
&&
423 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
424 (pcm
== _PAGE_CACHE_MODE_WT
&&
425 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
426 (pcm
== _PAGE_CACHE_MODE_WT
&&
427 new_pcm
== _PAGE_CACHE_MODE_WC
)) {
434 pmd_t
*populate_extra_pmd(unsigned long vaddr
);
435 pte_t
*populate_extra_pte(unsigned long vaddr
);
436 #endif /* __ASSEMBLY__ */
439 # include <asm/pgtable_32.h>
441 # include <asm/pgtable_64.h>
445 #include <linux/mm_types.h>
446 #include <linux/mmdebug.h>
447 #include <linux/log2.h>
449 static inline int pte_none(pte_t pte
)
454 #define __HAVE_ARCH_PTE_SAME
455 static inline int pte_same(pte_t a
, pte_t b
)
457 return a
.pte
== b
.pte
;
460 static inline int pte_present(pte_t a
)
462 return pte_flags(a
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
);
465 #define pte_accessible pte_accessible
466 static inline bool pte_accessible(struct mm_struct
*mm
, pte_t a
)
468 if (pte_flags(a
) & _PAGE_PRESENT
)
471 if ((pte_flags(a
) & _PAGE_PROTNONE
) &&
472 mm_tlb_flush_pending(mm
))
478 static inline int pte_hidden(pte_t pte
)
480 return pte_flags(pte
) & _PAGE_HIDDEN
;
483 static inline int pmd_present(pmd_t pmd
)
486 * Checking for _PAGE_PSE is needed too because
487 * split_huge_page will temporarily clear the present bit (but
488 * the _PAGE_PSE flag will remain set at all times while the
489 * _PAGE_PRESENT bit is clear).
491 return pmd_flags(pmd
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
| _PAGE_PSE
);
494 #ifdef CONFIG_NUMA_BALANCING
496 * These work without NUMA balancing but the kernel does not care. See the
497 * comment in include/asm-generic/pgtable.h
499 static inline int pte_protnone(pte_t pte
)
501 return (pte_flags(pte
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
505 static inline int pmd_protnone(pmd_t pmd
)
507 return (pmd_flags(pmd
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
510 #endif /* CONFIG_NUMA_BALANCING */
512 static inline int pmd_none(pmd_t pmd
)
514 /* Only check low word on 32-bit platforms, since it might be
515 out of sync with upper half. */
516 return (unsigned long)native_pmd_val(pmd
) == 0;
519 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
521 return (unsigned long)__va(pmd_val(pmd
) & pmd_pfn_mask(pmd
));
525 * Currently stuck as a macro due to indirect forward reference to
526 * linux/mmzone.h's __section_mem_map_addr() definition:
528 #define pmd_page(pmd) \
529 pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
532 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
534 * this macro returns the index of the entry in the pmd page which would
535 * control the given virtual address
537 static inline unsigned long pmd_index(unsigned long address
)
539 return (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
543 * Conversion functions: convert a page and protection to a page entry,
544 * and a page entry and page directory to the page they refer to.
546 * (Currently stuck as a macro because of indirect forward reference
547 * to linux/mm.h:page_to_nid())
549 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
552 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
554 * this function returns the index of the entry in the pte page which would
555 * control the given virtual address
557 static inline unsigned long pte_index(unsigned long address
)
559 return (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
562 static inline pte_t
*pte_offset_kernel(pmd_t
*pmd
, unsigned long address
)
564 return (pte_t
*)pmd_page_vaddr(*pmd
) + pte_index(address
);
567 static inline int pmd_bad(pmd_t pmd
)
569 return (pmd_flags(pmd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
572 static inline unsigned long pages_to_mb(unsigned long npg
)
574 return npg
>> (20 - PAGE_SHIFT
);
577 #if CONFIG_PGTABLE_LEVELS > 2
578 static inline int pud_none(pud_t pud
)
580 return native_pud_val(pud
) == 0;
583 static inline int pud_present(pud_t pud
)
585 return pud_flags(pud
) & _PAGE_PRESENT
;
588 static inline unsigned long pud_page_vaddr(pud_t pud
)
590 return (unsigned long)__va(pud_val(pud
) & pud_pfn_mask(pud
));
594 * Currently stuck as a macro due to indirect forward reference to
595 * linux/mmzone.h's __section_mem_map_addr() definition:
597 #define pud_page(pud) \
598 pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
600 /* Find an entry in the second-level page table.. */
601 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
603 return (pmd_t
*)pud_page_vaddr(*pud
) + pmd_index(address
);
606 static inline int pud_large(pud_t pud
)
608 return (pud_val(pud
) & (_PAGE_PSE
| _PAGE_PRESENT
)) ==
609 (_PAGE_PSE
| _PAGE_PRESENT
);
612 static inline int pud_bad(pud_t pud
)
614 return (pud_flags(pud
) & ~(_KERNPG_TABLE
| _PAGE_USER
)) != 0;
617 static inline int pud_large(pud_t pud
)
621 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
623 #if CONFIG_PGTABLE_LEVELS > 3
624 static inline int pgd_present(pgd_t pgd
)
626 return pgd_flags(pgd
) & _PAGE_PRESENT
;
629 static inline unsigned long pgd_page_vaddr(pgd_t pgd
)
631 return (unsigned long)__va((unsigned long)pgd_val(pgd
) & PTE_PFN_MASK
);
635 * Currently stuck as a macro due to indirect forward reference to
636 * linux/mmzone.h's __section_mem_map_addr() definition:
638 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
640 /* to find an entry in a page-table-directory. */
641 static inline unsigned long pud_index(unsigned long address
)
643 return (address
>> PUD_SHIFT
) & (PTRS_PER_PUD
- 1);
646 static inline pud_t
*pud_offset(pgd_t
*pgd
, unsigned long address
)
648 return (pud_t
*)pgd_page_vaddr(*pgd
) + pud_index(address
);
651 static inline int pgd_bad(pgd_t pgd
)
653 return (pgd_flags(pgd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
656 static inline int pgd_none(pgd_t pgd
)
658 return !native_pgd_val(pgd
);
660 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
662 #endif /* __ASSEMBLY__ */
665 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
667 * this macro returns the index of the entry in the pgd page which would
668 * control the given virtual address
670 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
673 * pgd_offset() returns a (pgd_t *)
674 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
676 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
678 * a shortcut which implies the use of the kernel's pgd, instead
681 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
684 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
685 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
689 extern int direct_gbpages
;
690 void init_mem_mapping(void);
691 void early_alloc_pgt_buf(void);
693 /* local pte updates need not use xchg for locking */
694 static inline pte_t
native_local_ptep_get_and_clear(pte_t
*ptep
)
698 /* Pure native function needs no input for mm, addr */
699 native_pte_clear(NULL
, 0, ptep
);
703 static inline pmd_t
native_local_pmdp_get_and_clear(pmd_t
*pmdp
)
707 native_pmd_clear(pmdp
);
711 static inline void native_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
712 pte_t
*ptep
, pte_t pte
)
714 native_set_pte(ptep
, pte
);
717 static inline void native_set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
718 pmd_t
*pmdp
, pmd_t pmd
)
720 native_set_pmd(pmdp
, pmd
);
723 #ifndef CONFIG_PARAVIRT
725 * Rules for using pte_update - it must be called after any PTE update which
726 * has not been done using the set_pte / clear_pte interfaces. It is used by
727 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
728 * updates should either be sets, clears, or set_pte_atomic for P->P
729 * transitions, which means this hook should only be called for user PTEs.
730 * This hook implies a P->P protection or access change has taken place, which
731 * requires a subsequent TLB flush.
733 #define pte_update(mm, addr, ptep) do { } while (0)
737 * We only update the dirty/accessed state if we set
738 * the dirty bit by hand in the kernel, since the hardware
739 * will do the accessed bit for us, and we don't want to
740 * race with other CPU's that might be updating the dirty
741 * bit at the same time.
743 struct vm_area_struct
;
745 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
746 extern int ptep_set_access_flags(struct vm_area_struct
*vma
,
747 unsigned long address
, pte_t
*ptep
,
748 pte_t entry
, int dirty
);
750 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
751 extern int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
752 unsigned long addr
, pte_t
*ptep
);
754 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
755 extern int ptep_clear_flush_young(struct vm_area_struct
*vma
,
756 unsigned long address
, pte_t
*ptep
);
758 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
759 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
762 pte_t pte
= native_ptep_get_and_clear(ptep
);
763 pte_update(mm
, addr
, ptep
);
767 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
768 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
769 unsigned long addr
, pte_t
*ptep
,
775 * Full address destruction in progress; paravirt does not
776 * care about updates and native needs no locking
778 pte
= native_local_ptep_get_and_clear(ptep
);
780 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
785 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
786 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
787 unsigned long addr
, pte_t
*ptep
)
789 clear_bit(_PAGE_BIT_RW
, (unsigned long *)&ptep
->pte
);
790 pte_update(mm
, addr
, ptep
);
793 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
795 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
797 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
798 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
799 unsigned long address
, pmd_t
*pmdp
,
800 pmd_t entry
, int dirty
);
802 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
803 extern int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
804 unsigned long addr
, pmd_t
*pmdp
);
806 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
807 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
808 unsigned long address
, pmd_t
*pmdp
);
811 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
812 extern void pmdp_splitting_flush(struct vm_area_struct
*vma
,
813 unsigned long addr
, pmd_t
*pmdp
);
815 #define __HAVE_ARCH_PMD_WRITE
816 static inline int pmd_write(pmd_t pmd
)
818 return pmd_flags(pmd
) & _PAGE_RW
;
821 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
822 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
825 return native_pmdp_get_and_clear(pmdp
);
828 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
829 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
830 unsigned long addr
, pmd_t
*pmdp
)
832 clear_bit(_PAGE_BIT_RW
, (unsigned long *)pmdp
);
836 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
838 * dst - pointer to pgd range anwhere on a pgd page
840 * count - the number of pgds to copy.
842 * dst and src can be on the same page, but the range must not overlap,
843 * and must not cross a page boundary.
845 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
847 memcpy(dst
, src
, count
* sizeof(pgd_t
));
850 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
851 static inline int page_level_shift(enum pg_level level
)
853 return (PAGE_SHIFT
- PTE_SHIFT
) + level
* PTE_SHIFT
;
855 static inline unsigned long page_level_size(enum pg_level level
)
857 return 1UL << page_level_shift(level
);
859 static inline unsigned long page_level_mask(enum pg_level level
)
861 return ~(page_level_size(level
) - 1);
865 * The x86 doesn't have any external MMU info: the kernel page
866 * tables contain all the necessary information.
868 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
869 unsigned long addr
, pte_t
*ptep
)
872 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
873 unsigned long addr
, pmd_t
*pmd
)
877 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
878 static inline pte_t
pte_swp_mksoft_dirty(pte_t pte
)
880 return pte_set_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
883 static inline int pte_swp_soft_dirty(pte_t pte
)
885 return pte_flags(pte
) & _PAGE_SWP_SOFT_DIRTY
;
888 static inline pte_t
pte_swp_clear_soft_dirty(pte_t pte
)
890 return pte_clear_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
894 #include <asm-generic/pgtable.h>
895 #endif /* __ASSEMBLY__ */
897 #endif /* _ASM_X86_PGTABLE_H */