1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/hugetlb.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
8 #include <asm/fixmap.h>
11 #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
14 #define PGALLOC_USER_GFP __GFP_HIGHMEM
16 #define PGALLOC_USER_GFP 0
19 gfp_t __userpte_alloc_gfp
= PGALLOC_GFP
| PGALLOC_USER_GFP
;
21 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
23 return (pte_t
*)__get_free_page(PGALLOC_GFP
& ~__GFP_ACCOUNT
);
26 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
30 pte
= alloc_pages(__userpte_alloc_gfp
, 0);
33 if (!pgtable_page_ctor(pte
)) {
40 static int __init
setup_userpte(char *arg
)
46 * "userpte=nohigh" disables allocation of user pagetables in
49 if (strcmp(arg
, "nohigh") == 0)
50 __userpte_alloc_gfp
&= ~__GFP_HIGHMEM
;
55 early_param("userpte", setup_userpte
);
57 void ___pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
)
59 pgtable_page_dtor(pte
);
60 paravirt_release_pte(page_to_pfn(pte
));
61 tlb_remove_table(tlb
, pte
);
64 #if CONFIG_PGTABLE_LEVELS > 2
65 void ___pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmd
)
67 struct page
*page
= virt_to_page(pmd
);
68 paravirt_release_pmd(__pa(pmd
) >> PAGE_SHIFT
);
70 * NOTE! For PAE, any changes to the top page-directory-pointer-table
71 * entries need a full cr3 reload to flush.
74 tlb
->need_flush_all
= 1;
76 pgtable_pmd_page_dtor(page
);
77 tlb_remove_table(tlb
, page
);
80 #if CONFIG_PGTABLE_LEVELS > 3
81 void ___pud_free_tlb(struct mmu_gather
*tlb
, pud_t
*pud
)
83 paravirt_release_pud(__pa(pud
) >> PAGE_SHIFT
);
84 tlb_remove_table(tlb
, virt_to_page(pud
));
87 #if CONFIG_PGTABLE_LEVELS > 4
88 void ___p4d_free_tlb(struct mmu_gather
*tlb
, p4d_t
*p4d
)
90 paravirt_release_p4d(__pa(p4d
) >> PAGE_SHIFT
);
91 tlb_remove_table(tlb
, virt_to_page(p4d
));
93 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
94 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
95 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
97 static inline void pgd_list_add(pgd_t
*pgd
)
99 struct page
*page
= virt_to_page(pgd
);
101 list_add(&page
->lru
, &pgd_list
);
104 static inline void pgd_list_del(pgd_t
*pgd
)
106 struct page
*page
= virt_to_page(pgd
);
108 list_del(&page
->lru
);
111 #define UNSHARED_PTRS_PER_PGD \
112 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
115 static void pgd_set_mm(pgd_t
*pgd
, struct mm_struct
*mm
)
117 BUILD_BUG_ON(sizeof(virt_to_page(pgd
)->index
) < sizeof(mm
));
118 virt_to_page(pgd
)->index
= (pgoff_t
)mm
;
121 struct mm_struct
*pgd_page_get_mm(struct page
*page
)
123 return (struct mm_struct
*)page
->index
;
126 static void pgd_ctor(struct mm_struct
*mm
, pgd_t
*pgd
)
128 /* If the pgd points to a shared pagetable level (either the
129 ptes in non-PAE, or shared PMD in PAE), then just copy the
130 references from swapper_pg_dir. */
131 if (CONFIG_PGTABLE_LEVELS
== 2 ||
132 (CONFIG_PGTABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
) ||
133 CONFIG_PGTABLE_LEVELS
>= 4) {
134 clone_pgd_range(pgd
+ KERNEL_PGD_BOUNDARY
,
135 swapper_pg_dir
+ KERNEL_PGD_BOUNDARY
,
139 /* list required to sync kernel mapping updates */
140 if (!SHARED_KERNEL_PMD
) {
146 static void pgd_dtor(pgd_t
*pgd
)
148 if (SHARED_KERNEL_PMD
)
151 spin_lock(&pgd_lock
);
153 spin_unlock(&pgd_lock
);
157 * List of all pgd's needed for non-PAE so it can invalidate entries
158 * in both cached and uncached pgd's; not needed for PAE since the
159 * kernel pmd is shared. If PAE were not to share the pmd a similar
160 * tactic would be needed. This is essentially codepath-based locking
161 * against pageattr.c; it is the unique case in which a valid change
162 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
163 * vmalloc faults work because attached pagetables are never freed.
167 #ifdef CONFIG_X86_PAE
169 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
170 * updating the top-level pagetable entries to guarantee the
171 * processor notices the update. Since this is expensive, and
172 * all 4 top-level entries are used almost immediately in a
173 * new process's life, we just pre-populate them here.
175 * Also, if we're in a paravirt environment where the kernel pmd is
176 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
177 * and initialize the kernel pmds here.
179 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
181 void pud_populate(struct mm_struct
*mm
, pud_t
*pudp
, pmd_t
*pmd
)
183 paravirt_alloc_pmd(mm
, __pa(pmd
) >> PAGE_SHIFT
);
185 /* Note: almost everything apart from _PAGE_PRESENT is
186 reserved at the pmd (PDPT) level. */
187 set_pud(pudp
, __pud(__pa(pmd
) | _PAGE_PRESENT
));
190 * According to Intel App note "TLBs, Paging-Structure Caches,
191 * and Their Invalidation", April 2007, document 317080-001,
192 * section 8.1: in PAE mode we explicitly have to flush the
193 * TLB via cr3 if the top-level pgd is changed...
197 #else /* !CONFIG_X86_PAE */
199 /* No need to prepopulate any pagetable entries in non-PAE modes. */
200 #define PREALLOCATED_PMDS 0
202 #endif /* CONFIG_X86_PAE */
204 static void free_pmds(struct mm_struct
*mm
, pmd_t
*pmds
[])
208 for(i
= 0; i
< PREALLOCATED_PMDS
; i
++)
210 pgtable_pmd_page_dtor(virt_to_page(pmds
[i
]));
211 free_page((unsigned long)pmds
[i
]);
216 static int preallocate_pmds(struct mm_struct
*mm
, pmd_t
*pmds
[])
220 gfp_t gfp
= PGALLOC_GFP
;
223 gfp
&= ~__GFP_ACCOUNT
;
225 for(i
= 0; i
< PREALLOCATED_PMDS
; i
++) {
226 pmd_t
*pmd
= (pmd_t
*)__get_free_page(gfp
);
229 if (pmd
&& !pgtable_pmd_page_ctor(virt_to_page(pmd
))) {
230 free_page((unsigned long)pmd
);
248 * Mop up any pmd pages which may still be attached to the pgd.
249 * Normally they will be freed by munmap/exit_mmap, but any pmd we
250 * preallocate which never got a corresponding vma will need to be
253 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
257 for(i
= 0; i
< PREALLOCATED_PMDS
; i
++) {
260 if (pgd_val(pgd
) != 0) {
261 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
263 pgdp
[i
] = native_make_pgd(0);
265 paravirt_release_pmd(pgd_val(pgd
) >> PAGE_SHIFT
);
272 static void pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
, pmd_t
*pmds
[])
278 if (PREALLOCATED_PMDS
== 0) /* Work around gcc-3.4.x bug */
281 p4d
= p4d_offset(pgd
, 0);
282 pud
= pud_offset(p4d
, 0);
284 for (i
= 0; i
< PREALLOCATED_PMDS
; i
++, pud
++) {
285 pmd_t
*pmd
= pmds
[i
];
287 if (i
>= KERNEL_PGD_BOUNDARY
)
288 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
289 sizeof(pmd_t
) * PTRS_PER_PMD
);
291 pud_populate(mm
, pud
, pmd
);
296 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
297 * assumes that pgd should be in one page.
299 * But kernel with PAE paging that is not running as a Xen domain
300 * only needs to allocate 32 bytes for pgd instead of one page.
302 #ifdef CONFIG_X86_PAE
304 #include <linux/slab.h>
306 #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
309 static struct kmem_cache
*pgd_cache
;
311 static int __init
pgd_cache_init(void)
314 * When PAE kernel is running as a Xen domain, it does not use
315 * shared kernel pmd. And this requires a whole page for pgd.
317 if (!SHARED_KERNEL_PMD
)
321 * when PAE kernel is not running as a Xen domain, it uses
322 * shared kernel pmd. Shared kernel pmd does not require a whole
323 * page for pgd. We are able to just allocate a 32-byte for pgd.
324 * During boot time, we create a 32-byte slab for pgd table allocation.
326 pgd_cache
= kmem_cache_create("pgd_cache", PGD_SIZE
, PGD_ALIGN
,
333 core_initcall(pgd_cache_init
);
335 static inline pgd_t
*_pgd_alloc(void)
338 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
339 * We allocate one page for pgd.
341 if (!SHARED_KERNEL_PMD
)
342 return (pgd_t
*)__get_free_page(PGALLOC_GFP
);
345 * Now PAE kernel is not running as a Xen domain. We can allocate
346 * a 32-byte slab for pgd to save memory space.
348 return kmem_cache_alloc(pgd_cache
, PGALLOC_GFP
);
351 static inline void _pgd_free(pgd_t
*pgd
)
353 if (!SHARED_KERNEL_PMD
)
354 free_page((unsigned long)pgd
);
356 kmem_cache_free(pgd_cache
, pgd
);
360 static inline pgd_t
*_pgd_alloc(void)
362 return (pgd_t
*)__get_free_pages(PGALLOC_GFP
, PGD_ALLOCATION_ORDER
);
365 static inline void _pgd_free(pgd_t
*pgd
)
367 free_pages((unsigned long)pgd
, PGD_ALLOCATION_ORDER
);
369 #endif /* CONFIG_X86_PAE */
371 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
374 pmd_t
*pmds
[PREALLOCATED_PMDS
];
383 if (preallocate_pmds(mm
, pmds
) != 0)
386 if (paravirt_pgd_alloc(mm
) != 0)
390 * Make sure that pre-populating the pmds is atomic with
391 * respect to anything walking the pgd_list, so that they
392 * never see a partially populated pgd.
394 spin_lock(&pgd_lock
);
397 pgd_prepopulate_pmd(mm
, pgd
, pmds
);
399 spin_unlock(&pgd_lock
);
411 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
413 pgd_mop_up_pmds(mm
, pgd
);
415 paravirt_pgd_free(mm
, pgd
);
420 * Used to set accessed or dirty bits in the page table entries
421 * on other architectures. On x86, the accessed and dirty bits
422 * are tracked by hardware. However, do_wp_page calls this function
423 * to also make the pte writeable at the same time the dirty bit is
424 * set. In that case we do actually need to write the PTE.
426 int ptep_set_access_flags(struct vm_area_struct
*vma
,
427 unsigned long address
, pte_t
*ptep
,
428 pte_t entry
, int dirty
)
430 int changed
= !pte_same(*ptep
, entry
);
432 if (changed
&& dirty
)
438 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
439 int pmdp_set_access_flags(struct vm_area_struct
*vma
,
440 unsigned long address
, pmd_t
*pmdp
,
441 pmd_t entry
, int dirty
)
443 int changed
= !pmd_same(*pmdp
, entry
);
445 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
447 if (changed
&& dirty
) {
450 * We had a write-protection fault here and changed the pmd
451 * to to more permissive. No need to flush the TLB for that,
452 * #PF is architecturally guaranteed to do that and in the
453 * worst-case we'll generate a spurious fault.
460 int pudp_set_access_flags(struct vm_area_struct
*vma
, unsigned long address
,
461 pud_t
*pudp
, pud_t entry
, int dirty
)
463 int changed
= !pud_same(*pudp
, entry
);
465 VM_BUG_ON(address
& ~HPAGE_PUD_MASK
);
467 if (changed
&& dirty
) {
470 * We had a write-protection fault here and changed the pud
471 * to to more permissive. No need to flush the TLB for that,
472 * #PF is architecturally guaranteed to do that and in the
473 * worst-case we'll generate a spurious fault.
481 int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
482 unsigned long addr
, pte_t
*ptep
)
486 if (pte_young(*ptep
))
487 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
488 (unsigned long *) &ptep
->pte
);
493 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
494 int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
495 unsigned long addr
, pmd_t
*pmdp
)
499 if (pmd_young(*pmdp
))
500 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
501 (unsigned long *)pmdp
);
505 int pudp_test_and_clear_young(struct vm_area_struct
*vma
,
506 unsigned long addr
, pud_t
*pudp
)
510 if (pud_young(*pudp
))
511 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
512 (unsigned long *)pudp
);
518 int ptep_clear_flush_young(struct vm_area_struct
*vma
,
519 unsigned long address
, pte_t
*ptep
)
522 * On x86 CPUs, clearing the accessed bit without a TLB flush
523 * doesn't cause data corruption. [ It could cause incorrect
524 * page aging and the (mistaken) reclaim of hot pages, but the
525 * chance of that should be relatively low. ]
527 * So as a performance optimization don't flush the TLB when
528 * clearing the accessed bit, it will eventually be flushed by
529 * a context switch or a VM operation anyway. [ In the rare
530 * event of it not getting flushed for a long time the delay
531 * shouldn't really matter because there's no real memory
532 * pressure for swapout to react to. ]
534 return ptep_test_and_clear_young(vma
, address
, ptep
);
537 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
538 int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
539 unsigned long address
, pmd_t
*pmdp
)
543 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
545 young
= pmdp_test_and_clear_young(vma
, address
, pmdp
);
547 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
554 * reserve_top_address - reserves a hole in the top of kernel address space
555 * @reserve - size of hole to reserve
557 * Can be used to relocate the fixmap area and poke a hole in the top
558 * of kernel address space to make room for a hypervisor.
560 void __init
reserve_top_address(unsigned long reserve
)
563 BUG_ON(fixmaps_set
> 0);
564 __FIXADDR_TOP
= round_down(-reserve
, 1 << PMD_SHIFT
) - PAGE_SIZE
;
565 printk(KERN_INFO
"Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
566 -reserve
, __FIXADDR_TOP
+ PAGE_SIZE
);
572 void __native_set_fixmap(enum fixed_addresses idx
, pte_t pte
)
574 unsigned long address
= __fix_to_virt(idx
);
578 * Ensure that the static initial page tables are covering the
581 BUILD_BUG_ON(__end_of_permanent_fixed_addresses
>
582 (FIXMAP_PMD_NUM
* PTRS_PER_PTE
));
585 if (idx
>= __end_of_fixed_addresses
) {
589 set_pte_vaddr(address
, pte
);
593 void native_set_fixmap(enum fixed_addresses idx
, phys_addr_t phys
,
596 __native_set_fixmap(idx
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
599 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
600 #ifdef CONFIG_X86_5LEVEL
602 * p4d_set_huge - setup kernel P4D mapping
604 * No 512GB pages yet -- always return 0
606 int p4d_set_huge(p4d_t
*p4d
, phys_addr_t addr
, pgprot_t prot
)
612 * p4d_clear_huge - clear kernel P4D mapping when it is set
614 * No 512GB pages yet -- always return 0
616 int p4d_clear_huge(p4d_t
*p4d
)
623 * pud_set_huge - setup kernel PUD mapping
625 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
626 * function sets up a huge page only if any of the following conditions are met:
628 * - MTRRs are disabled, or
630 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
632 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
633 * has no effect on the requested PAT memory type.
635 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
636 * page mapping attempt fails.
638 * Returns 1 on success and 0 on failure.
640 int pud_set_huge(pud_t
*pud
, phys_addr_t addr
, pgprot_t prot
)
644 mtrr
= mtrr_type_lookup(addr
, addr
+ PUD_SIZE
, &uniform
);
645 if ((mtrr
!= MTRR_TYPE_INVALID
) && (!uniform
) &&
646 (mtrr
!= MTRR_TYPE_WRBACK
))
649 /* Bail out if we are we on a populated non-leaf entry: */
650 if (pud_present(*pud
) && !pud_huge(*pud
))
653 prot
= pgprot_4k_2_large(prot
);
655 set_pte((pte_t
*)pud
, pfn_pte(
656 (u64
)addr
>> PAGE_SHIFT
,
657 __pgprot(pgprot_val(prot
) | _PAGE_PSE
)));
663 * pmd_set_huge - setup kernel PMD mapping
665 * See text over pud_set_huge() above.
667 * Returns 1 on success and 0 on failure.
669 int pmd_set_huge(pmd_t
*pmd
, phys_addr_t addr
, pgprot_t prot
)
673 mtrr
= mtrr_type_lookup(addr
, addr
+ PMD_SIZE
, &uniform
);
674 if ((mtrr
!= MTRR_TYPE_INVALID
) && (!uniform
) &&
675 (mtrr
!= MTRR_TYPE_WRBACK
)) {
676 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
677 __func__
, addr
, addr
+ PMD_SIZE
);
681 /* Bail out if we are we on a populated non-leaf entry: */
682 if (pmd_present(*pmd
) && !pmd_huge(*pmd
))
685 prot
= pgprot_4k_2_large(prot
);
687 set_pte((pte_t
*)pmd
, pfn_pte(
688 (u64
)addr
>> PAGE_SHIFT
,
689 __pgprot(pgprot_val(prot
) | _PAGE_PSE
)));
695 * pud_clear_huge - clear kernel PUD mapping when it is set
697 * Returns 1 on success and 0 on failure (no PUD map is found).
699 int pud_clear_huge(pud_t
*pud
)
701 if (pud_large(*pud
)) {
710 * pmd_clear_huge - clear kernel PMD mapping when it is set
712 * Returns 1 on success and 0 on failure (no PMD map is found).
714 int pmd_clear_huge(pmd_t
*pmd
)
716 if (pmd_large(*pmd
)) {
726 * pud_free_pmd_page - Clear pud entry and free pmd page.
727 * @pud: Pointer to a PUD.
728 * @addr: Virtual address associated with pud.
730 * Context: The pud range has been unmapped and TLB purged.
731 * Return: 1 if clearing the entry succeeded. 0 otherwise.
733 * NOTE: Callers must allow a single page allocation.
735 int pud_free_pmd_page(pud_t
*pud
, unsigned long addr
)
744 pmd
= (pmd_t
*)pud_page_vaddr(*pud
);
745 pmd_sv
= (pmd_t
*)__get_free_page(GFP_KERNEL
);
749 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
751 if (!pmd_none(pmd
[i
]))
757 /* INVLPG to clear all paging-structure caches */
758 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
-1);
760 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
761 if (!pmd_none(pmd_sv
[i
])) {
762 pte
= (pte_t
*)pmd_page_vaddr(pmd_sv
[i
]);
763 free_page((unsigned long)pte
);
767 free_page((unsigned long)pmd_sv
);
768 free_page((unsigned long)pmd
);
774 * pmd_free_pte_page - Clear pmd entry and free pte page.
775 * @pmd: Pointer to a PMD.
776 * @addr: Virtual address associated with pmd.
778 * Context: The pmd range has been unmapped and TLB purged.
779 * Return: 1 if clearing the entry succeeded. 0 otherwise.
781 int pmd_free_pte_page(pmd_t
*pmd
, unsigned long addr
)
788 pte
= (pte_t
*)pmd_page_vaddr(*pmd
);
791 /* INVLPG to clear all paging-structure caches */
792 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
-1);
794 free_page((unsigned long)pte
);
799 #else /* !CONFIG_X86_64 */
801 int pud_free_pmd_page(pud_t
*pud
, unsigned long addr
)
803 return pud_none(*pud
);
807 * Disable free page handling on x86-PAE. This assures that ioremap()
808 * does not update sync'd pmd entries. See vmalloc_sync_one().
810 int pmd_free_pte_page(pmd_t
*pmd
, unsigned long addr
)
812 return pmd_none(*pmd
);
815 #endif /* CONFIG_X86_64 */
816 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */