3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/highmem.h>
38 #include <linux/idr.h>
39 #include <linux/nodemask.h>
40 #include <linux/module.h>
41 #include <linux/poison.h>
42 #include <linux/memblock.h>
43 #include <linux/hugetlb.h>
44 #include <linux/slab.h>
46 #include <asm/pgalloc.h>
51 #include <asm/mmu_context.h>
52 #include <asm/pgtable.h>
54 #include <asm/uaccess.h>
56 #include <asm/machdep.h>
59 #include <asm/processor.h>
60 #include <asm/mmzone.h>
61 #include <asm/cputable.h>
62 #include <asm/sections.h>
63 #include <asm/iommu.h>
68 #ifdef CONFIG_PPC_STD_MMU_64
69 #if PGTABLE_RANGE > USER_VSID_RANGE
70 #warning Limited user VSID range means pagetable space is wasted
73 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
74 #warning TASK_SIZE is smaller than it needs to be.
76 #endif /* CONFIG_PPC_STD_MMU_64 */
78 phys_addr_t memstart_addr
= ~0;
79 EXPORT_SYMBOL_GPL(memstart_addr
);
80 phys_addr_t kernstart_addr
;
81 EXPORT_SYMBOL_GPL(kernstart_addr
);
83 static void pgd_ctor(void *addr
)
85 memset(addr
, 0, PGD_TABLE_SIZE
);
88 static void pud_ctor(void *addr
)
90 memset(addr
, 0, PUD_TABLE_SIZE
);
93 static void pmd_ctor(void *addr
)
95 memset(addr
, 0, PMD_TABLE_SIZE
);
98 struct kmem_cache
*pgtable_cache
[MAX_PGTABLE_INDEX_SIZE
];
101 * Create a kmem_cache() for pagetables. This is not used for PTE
102 * pages - they're linked to struct page, come from the normal free
103 * pages pool and have a different entry size (see real_pte_t) to
104 * everything else. Caches created by this function are used for all
105 * the higher level pagetables, and for hugepage pagetables.
107 void pgtable_cache_add(unsigned shift
, void (*ctor
)(void *))
110 unsigned long table_size
= sizeof(void *) << shift
;
111 unsigned long align
= table_size
;
113 /* When batching pgtable pointers for RCU freeing, we store
114 * the index size in the low bits. Table alignment must be
115 * big enough to fit it.
117 * Likewise, hugeapge pagetable pointers contain a (different)
118 * shift value in the low bits. All tables must be aligned so
119 * as to leave enough 0 bits in the address to contain it. */
120 unsigned long minalign
= max(MAX_PGTABLE_INDEX_SIZE
+ 1,
121 HUGEPD_SHIFT_MASK
+ 1);
122 struct kmem_cache
*new;
124 /* It would be nice if this was a BUILD_BUG_ON(), but at the
125 * moment, gcc doesn't seem to recognize is_power_of_2 as a
126 * constant expression, so so much for that. */
127 BUG_ON(!is_power_of_2(minalign
));
128 BUG_ON((shift
< 1) || (shift
> MAX_PGTABLE_INDEX_SIZE
));
130 if (PGT_CACHE(shift
))
131 return; /* Already have a cache of this size */
133 align
= max_t(unsigned long, align
, minalign
);
134 name
= kasprintf(GFP_KERNEL
, "pgtable-2^%d", shift
);
135 new = kmem_cache_create(name
, table_size
, align
, 0, ctor
);
137 pgtable_cache
[shift
- 1] = new;
138 pr_debug("Allocated pgtable cache for order %d\n", shift
);
142 void pgtable_cache_init(void)
144 pgtable_cache_add(PGD_INDEX_SIZE
, pgd_ctor
);
145 pgtable_cache_add(PMD_CACHE_INDEX
, pmd_ctor
);
147 * In all current configs, when the PUD index exists it's the
148 * same size as either the pgd or pmd index except with THP enabled
151 if (PUD_INDEX_SIZE
&& !PGT_CACHE(PUD_INDEX_SIZE
))
152 pgtable_cache_add(PUD_INDEX_SIZE
, pud_ctor
);
154 if (!PGT_CACHE(PGD_INDEX_SIZE
) || !PGT_CACHE(PMD_CACHE_INDEX
))
155 panic("Couldn't allocate pgtable caches");
156 if (PUD_INDEX_SIZE
&& !PGT_CACHE(PUD_INDEX_SIZE
))
157 panic("Couldn't allocate pud pgtable caches");
160 #ifdef CONFIG_SPARSEMEM_VMEMMAP
162 * Given an address within the vmemmap, determine the pfn of the page that
163 * represents the start of the section it is within. Note that we have to
164 * do this by hand as the proffered address may not be correctly aligned.
165 * Subtraction of non-aligned pointers produces undefined results.
167 static unsigned long __meminit
vmemmap_section_start(unsigned long page
)
169 unsigned long offset
= page
- ((unsigned long)(vmemmap
));
171 /* Return the pfn of the start of the section. */
172 return (offset
/ sizeof(struct page
)) & PAGE_SECTION_MASK
;
176 * Check if this vmemmap page is already initialised. If any section
177 * which overlaps this vmemmap page is initialised then this page is
178 * initialised already.
180 static int __meminit
vmemmap_populated(unsigned long start
, int page_size
)
182 unsigned long end
= start
+ page_size
;
183 start
= (unsigned long)(pfn_to_page(vmemmap_section_start(start
)));
185 for (; start
< end
; start
+= (PAGES_PER_SECTION
* sizeof(struct page
)))
186 if (pfn_valid(page_to_pfn((struct page
*)start
)))
192 /* On hash-based CPUs, the vmemmap is bolted in the hash table.
194 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
195 * the vmalloc space using normal page tables, though the size of
196 * pages encoded in the PTEs can be different
199 #ifdef CONFIG_PPC_BOOK3E
200 static int __meminit
vmemmap_create_mapping(unsigned long start
,
201 unsigned long page_size
,
204 /* Create a PTE encoding without page size */
205 unsigned long i
, flags
= _PAGE_PRESENT
| _PAGE_ACCESSED
|
208 /* PTEs only contain page size encodings up to 32M */
209 BUG_ON(mmu_psize_defs
[mmu_vmemmap_psize
].enc
> 0xf);
211 /* Encode the size in the PTE */
212 flags
|= mmu_psize_defs
[mmu_vmemmap_psize
].enc
<< 8;
214 /* For each PTE for that area, map things. Note that we don't
215 * increment phys because all PTEs are of the large size and
216 * thus must have the low bits clear
218 for (i
= 0; i
< page_size
; i
+= PAGE_SIZE
)
219 BUG_ON(map_kernel_page(start
+ i
, phys
, flags
));
224 #ifdef CONFIG_MEMORY_HOTPLUG
225 static void vmemmap_remove_mapping(unsigned long start
,
226 unsigned long page_size
)
230 #else /* CONFIG_PPC_BOOK3E */
231 static int __meminit
vmemmap_create_mapping(unsigned long start
,
232 unsigned long page_size
,
235 int rc
= htab_bolt_mapping(start
, start
+ page_size
, phys
,
236 pgprot_val(PAGE_KERNEL
),
237 mmu_vmemmap_psize
, mmu_kernel_ssize
);
239 int rc2
= htab_remove_mapping(start
, start
+ page_size
,
242 BUG_ON(rc2
&& (rc2
!= -ENOENT
));
247 #ifdef CONFIG_MEMORY_HOTPLUG
248 static void vmemmap_remove_mapping(unsigned long start
,
249 unsigned long page_size
)
251 int rc
= htab_remove_mapping(start
, start
+ page_size
,
254 BUG_ON((rc
< 0) && (rc
!= -ENOENT
));
255 WARN_ON(rc
== -ENOENT
);
259 #endif /* CONFIG_PPC_BOOK3E */
261 struct vmemmap_backing
*vmemmap_list
;
262 static struct vmemmap_backing
*next
;
264 static int num_freed
;
266 static __meminit
struct vmemmap_backing
* vmemmap_list_alloc(int node
)
268 struct vmemmap_backing
*vmem_back
;
269 /* get from freed entries first */
278 /* allocate a page when required and hand out chunks */
280 next
= vmemmap_alloc_block(PAGE_SIZE
, node
);
281 if (unlikely(!next
)) {
285 num_left
= PAGE_SIZE
/ sizeof(struct vmemmap_backing
);
293 static __meminit
void vmemmap_list_populate(unsigned long phys
,
297 struct vmemmap_backing
*vmem_back
;
299 vmem_back
= vmemmap_list_alloc(node
);
300 if (unlikely(!vmem_back
)) {
305 vmem_back
->phys
= phys
;
306 vmem_back
->virt_addr
= start
;
307 vmem_back
->list
= vmemmap_list
;
309 vmemmap_list
= vmem_back
;
312 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
314 unsigned long page_size
= 1 << mmu_psize_defs
[mmu_vmemmap_psize
].shift
;
316 /* Align to the page size of the linear mapping. */
317 start
= _ALIGN_DOWN(start
, page_size
);
319 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start
, end
, node
);
321 for (; start
< end
; start
+= page_size
) {
325 if (vmemmap_populated(start
, page_size
))
328 p
= vmemmap_alloc_block(page_size
, node
);
332 vmemmap_list_populate(__pa(p
), start
, node
);
334 pr_debug(" * %016lx..%016lx allocated at %p\n",
335 start
, start
+ page_size
, p
);
337 rc
= vmemmap_create_mapping(start
, page_size
, __pa(p
));
340 "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
349 #ifdef CONFIG_MEMORY_HOTPLUG
350 static unsigned long vmemmap_list_free(unsigned long start
)
352 struct vmemmap_backing
*vmem_back
, *vmem_back_prev
;
354 vmem_back_prev
= vmem_back
= vmemmap_list
;
356 /* look for it with prev pointer recorded */
357 for (; vmem_back
; vmem_back
= vmem_back
->list
) {
358 if (vmem_back
->virt_addr
== start
)
360 vmem_back_prev
= vmem_back
;
363 if (unlikely(!vmem_back
)) {
368 /* remove it from vmemmap_list */
369 if (vmem_back
== vmemmap_list
) /* remove head */
370 vmemmap_list
= vmem_back
->list
;
372 vmem_back_prev
->list
= vmem_back
->list
;
374 /* next point to this freed entry */
375 vmem_back
->list
= next
;
379 return vmem_back
->phys
;
382 void __ref
vmemmap_free(unsigned long start
, unsigned long end
)
384 unsigned long page_size
= 1 << mmu_psize_defs
[mmu_vmemmap_psize
].shift
;
386 start
= _ALIGN_DOWN(start
, page_size
);
388 pr_debug("vmemmap_free %lx...%lx\n", start
, end
);
390 for (; start
< end
; start
+= page_size
) {
394 * the section has already be marked as invalid, so
395 * vmemmap_populated() true means some other sections still
396 * in this page, so skip it.
398 if (vmemmap_populated(start
, page_size
))
401 addr
= vmemmap_list_free(start
);
403 struct page
*page
= pfn_to_page(addr
>> PAGE_SHIFT
);
405 if (PageReserved(page
)) {
406 /* allocated from bootmem */
407 if (page_size
< PAGE_SIZE
) {
409 * this shouldn't happen, but if it is
410 * the case, leave the memory there
414 unsigned int nr_pages
=
415 1 << get_order(page_size
);
417 free_reserved_page(page
++);
420 free_pages((unsigned long)(__va(addr
)),
421 get_order(page_size
));
423 vmemmap_remove_mapping(start
, page_size
);
428 void register_page_bootmem_memmap(unsigned long section_nr
,
429 struct page
*start_page
, unsigned long size
)
434 * We do not have access to the sparsemem vmemmap, so we fallback to
435 * walking the list of sparsemem blocks which we already maintain for
436 * the sake of crashdump. In the long run, we might want to maintain
437 * a tree if performance of that linear walk becomes a problem.
439 * realmode_pfn_to_page functions can fail due to:
440 * 1) As real sparsemem blocks do not lay in RAM continously (they
441 * are in virtual address space which is not available in the real mode),
442 * the requested page struct can be split between blocks so get_page/put_page
444 * 2) When huge pages are used, the get_page/put_page API will fail
445 * in real mode as the linked addresses in the page struct are virtual
448 struct page
*realmode_pfn_to_page(unsigned long pfn
)
450 struct vmemmap_backing
*vmem_back
;
452 unsigned long page_size
= 1 << mmu_psize_defs
[mmu_vmemmap_psize
].shift
;
453 unsigned long pg_va
= (unsigned long) pfn_to_page(pfn
);
455 for (vmem_back
= vmemmap_list
; vmem_back
; vmem_back
= vmem_back
->list
) {
456 if (pg_va
< vmem_back
->virt_addr
)
459 /* After vmemmap_list entry free is possible, need check all */
460 if ((pg_va
+ sizeof(struct page
)) <=
461 (vmem_back
->virt_addr
+ page_size
)) {
462 page
= (struct page
*) (vmem_back
->phys
+ pg_va
-
463 vmem_back
->virt_addr
);
468 /* Probably that page struct is split between real pages */
471 EXPORT_SYMBOL_GPL(realmode_pfn_to_page
);
473 #elif defined(CONFIG_FLATMEM)
475 struct page
*realmode_pfn_to_page(unsigned long pfn
)
477 struct page
*page
= pfn_to_page(pfn
);
480 EXPORT_SYMBOL_GPL(realmode_pfn_to_page
);
482 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */