2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/moduleparam.h>
19 #include <linux/swap.h>
20 #include <linux/swapops.h>
21 #include <linux/kmemleak.h>
22 #include <asm/pgalloc.h>
24 #include <asm/setup.h>
25 #include <asm/hugetlb.h>
26 #include <asm/pte-walk.h>
27 #include <asm/firmware.h>
29 bool hugetlb_disabled
= false;
31 #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
32 __builtin_ffs(sizeof(void *)))
34 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
37 * Only called for hugetlbfs pages, hence can ignore THP and the
40 return __find_linux_pte(mm
->pgd
, addr
, NULL
, NULL
);
43 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
44 unsigned long addr
, unsigned long sz
)
52 p4d
= p4d_offset(pgd_offset(mm
, addr
), addr
);
53 if (!mm_pud_folded(mm
) && sz
>= P4D_SIZE
)
56 pud
= pud_alloc(mm
, p4d
, addr
);
59 if (!mm_pmd_folded(mm
) && sz
>= PUD_SIZE
)
62 pmd
= pmd_alloc(mm
, pud
, addr
);
67 /* On 8xx, all hugepages are handled as contiguous PTEs */
68 if (IS_ENABLED(CONFIG_PPC_8xx
)) {
71 for (i
= 0; i
< sz
/ PMD_SIZE
; i
++) {
72 if (!pte_alloc_huge(mm
, pmd
+ i
, addr
))
79 return pte_alloc_huge(mm
, pmd
, addr
);
82 #ifdef CONFIG_PPC_BOOK3S_64
84 * Tracks gpages after the device tree is scanned and before the
85 * huge_boot_pages list is ready on pseries.
87 #define MAX_NUMBER_GPAGES 1024
88 __initdata
static u64 gpage_freearray
[MAX_NUMBER_GPAGES
];
89 __initdata
static unsigned nr_gpages
;
92 * Build list of addresses of gigantic pages. This function is used in early
93 * boot before the buddy allocator is setup.
95 void __init
pseries_add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
)
99 while (number_of_pages
> 0) {
100 gpage_freearray
[nr_gpages
] = addr
;
107 static int __init
pseries_alloc_bootmem_huge_page(struct hstate
*hstate
)
109 struct huge_bootmem_page
*m
;
112 m
= phys_to_virt(gpage_freearray
[--nr_gpages
]);
113 gpage_freearray
[nr_gpages
] = 0;
114 list_add(&m
->list
, &huge_boot_pages
[0]);
119 bool __init
hugetlb_node_alloc_supported(void)
126 int __init
alloc_bootmem_huge_page(struct hstate
*h
, int nid
)
129 #ifdef CONFIG_PPC_BOOK3S_64
130 if (firmware_has_feature(FW_FEATURE_LPAR
) && !radix_enabled())
131 return pseries_alloc_bootmem_huge_page(h
);
133 return __alloc_bootmem_huge_page(h
, nid
);
136 bool __init
arch_hugetlb_valid_size(unsigned long size
)
138 int shift
= __ffs(size
);
141 /* Check that it is a page size supported by the hardware and
142 * that it fits within pagetable and slice limits. */
143 if (size
<= PAGE_SIZE
|| !is_power_of_2(size
))
146 mmu_psize
= check_and_get_huge_psize(shift
);
150 BUG_ON(mmu_psize_defs
[mmu_psize
].shift
!= shift
);
155 static int __init
add_huge_page_size(unsigned long long size
)
157 int shift
= __ffs(size
);
159 if (!arch_hugetlb_valid_size((unsigned long)size
))
162 hugetlb_add_hstate(shift
- PAGE_SHIFT
);
166 static int __init
hugetlbpage_init(void)
168 bool configured
= false;
171 if (hugetlb_disabled
) {
172 pr_info("HugeTLB support is disabled!\n");
176 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64
) && !radix_enabled() &&
177 !mmu_has_feature(MMU_FTR_16M_PAGE
))
180 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
183 if (!mmu_psize_defs
[psize
].shift
)
186 shift
= mmu_psize_to_shift(psize
);
188 if (add_huge_page_size(1ULL << shift
) < 0)
195 pr_info("Failed to initialize. Disabling HugeTLB");
200 arch_initcall(hugetlbpage_init
);
202 void __init
gigantic_hugetlb_cma_reserve(void)
204 unsigned long order
= 0;
207 order
= PUD_SHIFT
- PAGE_SHIFT
;
208 else if (!firmware_has_feature(FW_FEATURE_LPAR
) && mmu_psize_defs
[MMU_PAGE_16G
].shift
)
210 * For pseries we do use ibm,expected#pages for reserving 16G pages.
212 order
= mmu_psize_to_shift(MMU_PAGE_16G
) - PAGE_SHIFT
;
215 hugetlb_cma_reserve(order
);