2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/moduleparam.h>
19 #include <linux/swap.h>
20 #include <linux/swapops.h>
21 #include <linux/kmemleak.h>
22 #include <asm/pgalloc.h>
24 #include <asm/setup.h>
25 #include <asm/hugetlb.h>
26 #include <asm/pte-walk.h>
28 bool hugetlb_disabled
= false;
30 #define hugepd_none(hpd) (hpd_val(hpd) == 0)
32 #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
33 __builtin_ffs(sizeof(void *)))
35 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
38 * Only called for hugetlbfs pages, hence can ignore THP and the
41 return __find_linux_pte(mm
->pgd
, addr
, NULL
, NULL
);
44 static int __hugepte_alloc(struct mm_struct
*mm
, hugepd_t
*hpdp
,
45 unsigned long address
, unsigned int pdshift
,
46 unsigned int pshift
, spinlock_t
*ptl
)
48 struct kmem_cache
*cachep
;
53 if (pshift
>= pdshift
) {
54 cachep
= PGT_CACHE(PTE_T_ORDER
);
55 num_hugepd
= 1 << (pshift
- pdshift
);
57 cachep
= PGT_CACHE(pdshift
- pshift
);
62 WARN_ONCE(1, "No page table cache created for hugetlb tables");
66 new = kmem_cache_alloc(cachep
, pgtable_gfp_flags(mm
, GFP_KERNEL
));
68 BUG_ON(pshift
> HUGEPD_SHIFT_MASK
);
69 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK
);
75 * Make sure other cpus find the hugepd set only after a
76 * properly initialized page table is visible to them.
77 * For more details look for comment in __pte_alloc().
83 * We have multiple higher-level entries that point to the same
84 * actual pte location. Fill in each as we go and backtrack on error.
85 * We need all of these so the DTLB pgtable walk code can find the
86 * right higher-level entry without knowing if it's a hugepage or not.
88 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++) {
89 if (unlikely(!hugepd_none(*hpdp
)))
91 hugepd_populate(hpdp
, new, pshift
);
93 /* If we bailed from the for loop early, an error occurred, clean up */
95 for (i
= i
- 1 ; i
>= 0; i
--, hpdp
--)
97 kmem_cache_free(cachep
, new);
106 * At this point we do the placement change only for BOOK3S 64. This would
107 * possibly work on other subarchs.
109 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
115 hugepd_t
*hpdp
= NULL
;
116 unsigned pshift
= __ffs(sz
);
117 unsigned pdshift
= PGDIR_SHIFT
;
121 pg
= pgd_offset(mm
, addr
);
122 p4
= p4d_offset(pg
, addr
);
124 #ifdef CONFIG_PPC_BOOK3S_64
125 if (pshift
== PGDIR_SHIFT
)
128 else if (pshift
> PUD_SHIFT
) {
130 * We need to use hugepd table
132 ptl
= &mm
->page_table_lock
;
133 hpdp
= (hugepd_t
*)p4
;
136 pu
= pud_alloc(mm
, p4
, addr
);
139 if (pshift
== PUD_SHIFT
)
141 else if (pshift
> PMD_SHIFT
) {
142 ptl
= pud_lockptr(mm
, pu
);
143 hpdp
= (hugepd_t
*)pu
;
146 pm
= pmd_alloc(mm
, pu
, addr
);
149 if (pshift
== PMD_SHIFT
)
153 ptl
= pmd_lockptr(mm
, pm
);
154 hpdp
= (hugepd_t
*)pm
;
159 if (pshift
>= PGDIR_SHIFT
) {
160 ptl
= &mm
->page_table_lock
;
161 hpdp
= (hugepd_t
*)p4
;
164 pu
= pud_alloc(mm
, p4
, addr
);
167 if (pshift
>= PUD_SHIFT
) {
168 ptl
= pud_lockptr(mm
, pu
);
169 hpdp
= (hugepd_t
*)pu
;
172 pm
= pmd_alloc(mm
, pu
, addr
);
175 ptl
= pmd_lockptr(mm
, pm
);
176 hpdp
= (hugepd_t
*)pm
;
183 if (IS_ENABLED(CONFIG_PPC_8xx
) && pshift
< PMD_SHIFT
)
184 return pte_alloc_map(mm
, (pmd_t
*)hpdp
, addr
);
186 BUG_ON(!hugepd_none(*hpdp
) && !hugepd_ok(*hpdp
));
188 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
,
189 pdshift
, pshift
, ptl
))
192 return hugepte_offset(*hpdp
, addr
, pdshift
);
195 #ifdef CONFIG_PPC_BOOK3S_64
197 * Tracks gpages after the device tree is scanned and before the
198 * huge_boot_pages list is ready on pseries.
200 #define MAX_NUMBER_GPAGES 1024
201 __initdata
static u64 gpage_freearray
[MAX_NUMBER_GPAGES
];
202 __initdata
static unsigned nr_gpages
;
205 * Build list of addresses of gigantic pages. This function is used in early
206 * boot before the buddy allocator is setup.
208 void __init
pseries_add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
)
212 while (number_of_pages
> 0) {
213 gpage_freearray
[nr_gpages
] = addr
;
220 int __init
pseries_alloc_bootmem_huge_page(struct hstate
*hstate
)
222 struct huge_bootmem_page
*m
;
225 m
= phys_to_virt(gpage_freearray
[--nr_gpages
]);
226 gpage_freearray
[nr_gpages
] = 0;
227 list_add(&m
->list
, &huge_boot_pages
);
234 int __init
alloc_bootmem_huge_page(struct hstate
*h
)
237 #ifdef CONFIG_PPC_BOOK3S_64
238 if (firmware_has_feature(FW_FEATURE_LPAR
) && !radix_enabled())
239 return pseries_alloc_bootmem_huge_page(h
);
241 return __alloc_bootmem_huge_page(h
);
244 #ifndef CONFIG_PPC_BOOK3S_64
245 #define HUGEPD_FREELIST_SIZE \
246 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
248 struct hugepd_freelist
{
254 static DEFINE_PER_CPU(struct hugepd_freelist
*, hugepd_freelist_cur
);
256 static void hugepd_free_rcu_callback(struct rcu_head
*head
)
258 struct hugepd_freelist
*batch
=
259 container_of(head
, struct hugepd_freelist
, rcu
);
262 for (i
= 0; i
< batch
->index
; i
++)
263 kmem_cache_free(PGT_CACHE(PTE_T_ORDER
), batch
->ptes
[i
]);
265 free_page((unsigned long)batch
);
268 static void hugepd_free(struct mmu_gather
*tlb
, void *hugepte
)
270 struct hugepd_freelist
**batchp
;
272 batchp
= &get_cpu_var(hugepd_freelist_cur
);
274 if (atomic_read(&tlb
->mm
->mm_users
) < 2 ||
275 mm_is_thread_local(tlb
->mm
)) {
276 kmem_cache_free(PGT_CACHE(PTE_T_ORDER
), hugepte
);
277 put_cpu_var(hugepd_freelist_cur
);
281 if (*batchp
== NULL
) {
282 *batchp
= (struct hugepd_freelist
*)__get_free_page(GFP_ATOMIC
);
283 (*batchp
)->index
= 0;
286 (*batchp
)->ptes
[(*batchp
)->index
++] = hugepte
;
287 if ((*batchp
)->index
== HUGEPD_FREELIST_SIZE
) {
288 call_rcu(&(*batchp
)->rcu
, hugepd_free_rcu_callback
);
291 put_cpu_var(hugepd_freelist_cur
);
294 static inline void hugepd_free(struct mmu_gather
*tlb
, void *hugepte
) {}
297 /* Return true when the entry to be freed maps more than the area being freed */
298 static bool range_is_outside_limits(unsigned long start
, unsigned long end
,
299 unsigned long floor
, unsigned long ceiling
,
302 if ((start
& mask
) < floor
)
309 return end
- 1 > ceiling
- 1;
312 static void free_hugepd_range(struct mmu_gather
*tlb
, hugepd_t
*hpdp
, int pdshift
,
313 unsigned long start
, unsigned long end
,
314 unsigned long floor
, unsigned long ceiling
)
316 pte_t
*hugepte
= hugepd_page(*hpdp
);
319 unsigned long pdmask
= ~((1UL << pdshift
) - 1);
320 unsigned int num_hugepd
= 1;
321 unsigned int shift
= hugepd_shift(*hpdp
);
323 /* Note: On fsl the hpdp may be the first of several */
325 num_hugepd
= 1 << (shift
- pdshift
);
327 if (range_is_outside_limits(start
, end
, floor
, ceiling
, pdmask
))
330 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++)
333 if (shift
>= pdshift
)
334 hugepd_free(tlb
, hugepte
);
336 pgtable_free_tlb(tlb
, hugepte
,
337 get_hugepd_cache_index(pdshift
- shift
));
340 static void hugetlb_free_pte_range(struct mmu_gather
*tlb
, pmd_t
*pmd
,
341 unsigned long addr
, unsigned long end
,
342 unsigned long floor
, unsigned long ceiling
)
344 pgtable_t token
= pmd_pgtable(*pmd
);
346 if (range_is_outside_limits(addr
, end
, floor
, ceiling
, PMD_MASK
))
350 pte_free_tlb(tlb
, token
, addr
);
351 mm_dec_nr_ptes(tlb
->mm
);
354 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
355 unsigned long addr
, unsigned long end
,
356 unsigned long floor
, unsigned long ceiling
)
366 pmd
= pmd_offset(pud
, addr
);
367 next
= pmd_addr_end(addr
, end
);
368 if (!is_hugepd(__hugepd(pmd_val(*pmd
)))) {
369 if (pmd_none_or_clear_bad(pmd
))
373 * if it is not hugepd pointer, we should already find
376 WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx
));
378 hugetlb_free_pte_range(tlb
, pmd
, addr
, end
, floor
, ceiling
);
383 * Increment next by the size of the huge mapping since
384 * there may be more than one entry at this level for a
385 * single hugepage, but all of them point to
386 * the same kmem cache that holds the hugepte.
388 more
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pmd
));
392 free_hugepd_range(tlb
, (hugepd_t
*)pmd
, PMD_SHIFT
,
393 addr
, next
, floor
, ceiling
);
394 } while (addr
= next
, addr
!= end
);
396 if (range_is_outside_limits(start
, end
, floor
, ceiling
, PUD_MASK
))
399 pmd
= pmd_offset(pud
, start
& PUD_MASK
);
401 pmd_free_tlb(tlb
, pmd
, start
& PUD_MASK
);
402 mm_dec_nr_pmds(tlb
->mm
);
405 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, p4d_t
*p4d
,
406 unsigned long addr
, unsigned long end
,
407 unsigned long floor
, unsigned long ceiling
)
415 pud
= pud_offset(p4d
, addr
);
416 next
= pud_addr_end(addr
, end
);
417 if (!is_hugepd(__hugepd(pud_val(*pud
)))) {
418 if (pud_none_or_clear_bad(pud
))
420 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
,
425 * Increment next by the size of the huge mapping since
426 * there may be more than one entry at this level for a
427 * single hugepage, but all of them point to
428 * the same kmem cache that holds the hugepte.
430 more
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pud
));
434 free_hugepd_range(tlb
, (hugepd_t
*)pud
, PUD_SHIFT
,
435 addr
, next
, floor
, ceiling
);
437 } while (addr
= next
, addr
!= end
);
439 if (range_is_outside_limits(start
, end
, floor
, ceiling
, PGDIR_MASK
))
442 pud
= pud_offset(p4d
, start
& PGDIR_MASK
);
444 pud_free_tlb(tlb
, pud
, start
& PGDIR_MASK
);
445 mm_dec_nr_puds(tlb
->mm
);
449 * This function frees user-level page tables of a process.
451 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
452 unsigned long addr
, unsigned long end
,
453 unsigned long floor
, unsigned long ceiling
)
460 * Because there are a number of different possible pagetable
461 * layouts for hugepage ranges, we limit knowledge of how
462 * things should be laid out to the allocation path
463 * (huge_pte_alloc(), above). Everything else works out the
464 * structure as it goes from information in the hugepd
465 * pointers. That means that we can't here use the
466 * optimization used in the normal page free_pgd_range(), of
467 * checking whether we're actually covering a large enough
468 * range to have to do anything at the top level of the walk
469 * instead of at the bottom.
471 * To make sense of this, you should probably go read the big
472 * block comment at the top of the normal free_pgd_range(),
477 next
= pgd_addr_end(addr
, end
);
478 pgd
= pgd_offset(tlb
->mm
, addr
);
479 p4d
= p4d_offset(pgd
, addr
);
480 if (!is_hugepd(__hugepd(pgd_val(*pgd
)))) {
481 if (p4d_none_or_clear_bad(p4d
))
483 hugetlb_free_pud_range(tlb
, p4d
, addr
, next
, floor
, ceiling
);
487 * Increment next by the size of the huge mapping since
488 * there may be more than one entry at the pgd level
489 * for a single hugepage, but all of them point to the
490 * same kmem cache that holds the hugepte.
492 more
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pgd
));
496 free_hugepd_range(tlb
, (hugepd_t
*)p4d
, PGDIR_SHIFT
,
497 addr
, next
, floor
, ceiling
);
499 } while (addr
= next
, addr
!= end
);
502 struct page
*follow_huge_pd(struct vm_area_struct
*vma
,
503 unsigned long address
, hugepd_t hpd
,
504 int flags
, int pdshift
)
508 struct page
*page
= NULL
;
510 int shift
= hugepd_shift(hpd
);
511 struct mm_struct
*mm
= vma
->vm_mm
;
515 * hugepage directory entries are protected by mm->page_table_lock
516 * Use this instead of huge_pte_lockptr
518 ptl
= &mm
->page_table_lock
;
521 ptep
= hugepte_offset(hpd
, address
, pdshift
);
522 if (pte_present(*ptep
)) {
523 mask
= (1UL << shift
) - 1;
524 page
= pte_page(*ptep
);
525 page
+= ((address
& mask
) >> PAGE_SHIFT
);
526 if (flags
& FOLL_GET
)
529 if (is_hugetlb_entry_migration(*ptep
)) {
531 __migration_entry_wait(mm
, ptep
, ptl
);
539 #ifdef CONFIG_PPC_MM_SLICES
540 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
541 unsigned long len
, unsigned long pgoff
,
544 struct hstate
*hstate
= hstate_file(file
);
545 int mmu_psize
= shift_to_mmu_psize(huge_page_shift(hstate
));
547 #ifdef CONFIG_PPC_RADIX_MMU
549 return radix__hugetlb_get_unmapped_area(file
, addr
, len
,
552 return slice_get_unmapped_area(addr
, len
, flags
, mmu_psize
, 1);
556 unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
558 /* With radix we don't use slice, so derive it from vma*/
559 if (IS_ENABLED(CONFIG_PPC_MM_SLICES
) && !radix_enabled()) {
560 unsigned int psize
= get_slice_psize(vma
->vm_mm
, vma
->vm_start
);
562 return 1UL << mmu_psize_to_shift(psize
);
564 return vma_kernel_pagesize(vma
);
567 bool __init
arch_hugetlb_valid_size(unsigned long size
)
569 int shift
= __ffs(size
);
572 /* Check that it is a page size supported by the hardware and
573 * that it fits within pagetable and slice limits. */
574 if (size
<= PAGE_SIZE
|| !is_power_of_2(size
))
577 mmu_psize
= check_and_get_huge_psize(shift
);
581 BUG_ON(mmu_psize_defs
[mmu_psize
].shift
!= shift
);
586 static int __init
add_huge_page_size(unsigned long long size
)
588 int shift
= __ffs(size
);
590 if (!arch_hugetlb_valid_size((unsigned long)size
))
593 hugetlb_add_hstate(shift
- PAGE_SHIFT
);
597 static int __init
hugetlbpage_init(void)
599 bool configured
= false;
602 if (hugetlb_disabled
) {
603 pr_info("HugeTLB support is disabled!\n");
607 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64
) && !radix_enabled() &&
608 !mmu_has_feature(MMU_FTR_16M_PAGE
))
611 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
615 if (!mmu_psize_defs
[psize
].shift
)
618 shift
= mmu_psize_to_shift(psize
);
620 #ifdef CONFIG_PPC_BOOK3S_64
621 if (shift
> PGDIR_SHIFT
)
623 else if (shift
> PUD_SHIFT
)
624 pdshift
= PGDIR_SHIFT
;
625 else if (shift
> PMD_SHIFT
)
630 if (shift
< PUD_SHIFT
)
632 else if (shift
< PGDIR_SHIFT
)
635 pdshift
= PGDIR_SHIFT
;
638 if (add_huge_page_size(1ULL << shift
) < 0)
641 * if we have pdshift and shift value same, we don't
642 * use pgt cache for hugepd.
644 if (pdshift
> shift
) {
645 if (!IS_ENABLED(CONFIG_PPC_8xx
))
646 pgtable_cache_add(pdshift
- shift
);
647 } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E
) ||
648 IS_ENABLED(CONFIG_PPC_8xx
)) {
649 pgtable_cache_add(PTE_T_ORDER
);
656 if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
))
657 hugetlbpage_init_default();
659 pr_info("Failed to initialize. Disabling HugeTLB");
664 arch_initcall(hugetlbpage_init
);
666 void flush_dcache_icache_hugepage(struct page
*page
)
671 BUG_ON(!PageCompound(page
));
673 for (i
= 0; i
< compound_nr(page
); i
++) {
674 if (!PageHighMem(page
)) {
675 __flush_dcache_icache(page_address(page
+i
));
677 start
= kmap_atomic(page
+i
);
678 __flush_dcache_icache(start
);
679 kunmap_atomic(start
);
684 void __init
gigantic_hugetlb_cma_reserve(void)
686 unsigned long order
= 0;
689 order
= PUD_SHIFT
- PAGE_SHIFT
;
690 else if (!firmware_has_feature(FW_FEATURE_LPAR
) && mmu_psize_defs
[MMU_PAGE_16G
].shift
)
692 * For pseries we do use ibm,expected#pages for reserving 16G pages.
694 order
= mmu_psize_to_shift(MMU_PAGE_16G
) - PAGE_SHIFT
;
697 VM_WARN_ON(order
< MAX_ORDER
);
698 hugetlb_cma_reserve(order
);