Linux 3.17-rc2
[linux/fpc-iii.git] / arch / powerpc / mm / init_64.c
blob253b4b971c8afa261d110d653e15495a8c55dc6c
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #undef DEBUG
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
31 #include <linux/mm.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/bootmem.h>
38 #include <linux/highmem.h>
39 #include <linux/idr.h>
40 #include <linux/nodemask.h>
41 #include <linux/module.h>
42 #include <linux/poison.h>
43 #include <linux/memblock.h>
44 #include <linux/hugetlb.h>
45 #include <linux/slab.h>
47 #include <asm/pgalloc.h>
48 #include <asm/page.h>
49 #include <asm/prom.h>
50 #include <asm/rtas.h>
51 #include <asm/io.h>
52 #include <asm/mmu_context.h>
53 #include <asm/pgtable.h>
54 #include <asm/mmu.h>
55 #include <asm/uaccess.h>
56 #include <asm/smp.h>
57 #include <asm/machdep.h>
58 #include <asm/tlb.h>
59 #include <asm/eeh.h>
60 #include <asm/processor.h>
61 #include <asm/mmzone.h>
62 #include <asm/cputable.h>
63 #include <asm/sections.h>
64 #include <asm/iommu.h>
65 #include <asm/vdso.h>
67 #include "mmu_decl.h"
69 #ifdef CONFIG_PPC_STD_MMU_64
70 #if PGTABLE_RANGE > USER_VSID_RANGE
71 #warning Limited user VSID range means pagetable space is wasted
72 #endif
74 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
75 #warning TASK_SIZE is smaller than it needs to be.
76 #endif
77 #endif /* CONFIG_PPC_STD_MMU_64 */
79 phys_addr_t memstart_addr = ~0;
80 EXPORT_SYMBOL_GPL(memstart_addr);
81 phys_addr_t kernstart_addr;
82 EXPORT_SYMBOL_GPL(kernstart_addr);
84 static void pgd_ctor(void *addr)
86 memset(addr, 0, PGD_TABLE_SIZE);
89 static void pmd_ctor(void *addr)
91 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
92 memset(addr, 0, PMD_TABLE_SIZE * 2);
93 #else
94 memset(addr, 0, PMD_TABLE_SIZE);
95 #endif
98 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
101 * Create a kmem_cache() for pagetables. This is not used for PTE
102 * pages - they're linked to struct page, come from the normal free
103 * pages pool and have a different entry size (see real_pte_t) to
104 * everything else. Caches created by this function are used for all
105 * the higher level pagetables, and for hugepage pagetables.
107 void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
109 char *name;
110 unsigned long table_size = sizeof(void *) << shift;
111 unsigned long align = table_size;
113 /* When batching pgtable pointers for RCU freeing, we store
114 * the index size in the low bits. Table alignment must be
115 * big enough to fit it.
117 * Likewise, hugeapge pagetable pointers contain a (different)
118 * shift value in the low bits. All tables must be aligned so
119 * as to leave enough 0 bits in the address to contain it. */
120 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
121 HUGEPD_SHIFT_MASK + 1);
122 struct kmem_cache *new;
124 /* It would be nice if this was a BUILD_BUG_ON(), but at the
125 * moment, gcc doesn't seem to recognize is_power_of_2 as a
126 * constant expression, so so much for that. */
127 BUG_ON(!is_power_of_2(minalign));
128 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
130 if (PGT_CACHE(shift))
131 return; /* Already have a cache of this size */
133 align = max_t(unsigned long, align, minalign);
134 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
135 new = kmem_cache_create(name, table_size, align, 0, ctor);
136 pgtable_cache[shift - 1] = new;
137 pr_debug("Allocated pgtable cache for order %d\n", shift);
141 void pgtable_cache_init(void)
143 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
144 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
145 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
146 panic("Couldn't allocate pgtable caches");
147 /* In all current configs, when the PUD index exists it's the
148 * same size as either the pgd or pmd index. Verify that the
149 * initialization above has also created a PUD cache. This
150 * will need re-examiniation if we add new possibilities for
151 * the pagetable layout. */
152 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
155 #ifdef CONFIG_SPARSEMEM_VMEMMAP
157 * Given an address within the vmemmap, determine the pfn of the page that
158 * represents the start of the section it is within. Note that we have to
159 * do this by hand as the proffered address may not be correctly aligned.
160 * Subtraction of non-aligned pointers produces undefined results.
162 static unsigned long __meminit vmemmap_section_start(unsigned long page)
164 unsigned long offset = page - ((unsigned long)(vmemmap));
166 /* Return the pfn of the start of the section. */
167 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
171 * Check if this vmemmap page is already initialised. If any section
172 * which overlaps this vmemmap page is initialised then this page is
173 * initialised already.
175 static int __meminit vmemmap_populated(unsigned long start, int page_size)
177 unsigned long end = start + page_size;
178 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
180 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
181 if (pfn_valid(page_to_pfn((struct page *)start)))
182 return 1;
184 return 0;
187 /* On hash-based CPUs, the vmemmap is bolted in the hash table.
189 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
190 * the vmalloc space using normal page tables, though the size of
191 * pages encoded in the PTEs can be different
194 #ifdef CONFIG_PPC_BOOK3E
195 static void __meminit vmemmap_create_mapping(unsigned long start,
196 unsigned long page_size,
197 unsigned long phys)
199 /* Create a PTE encoding without page size */
200 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
201 _PAGE_KERNEL_RW;
203 /* PTEs only contain page size encodings up to 32M */
204 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
206 /* Encode the size in the PTE */
207 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
209 /* For each PTE for that area, map things. Note that we don't
210 * increment phys because all PTEs are of the large size and
211 * thus must have the low bits clear
213 for (i = 0; i < page_size; i += PAGE_SIZE)
214 BUG_ON(map_kernel_page(start + i, phys, flags));
217 #ifdef CONFIG_MEMORY_HOTPLUG
218 static void vmemmap_remove_mapping(unsigned long start,
219 unsigned long page_size)
222 #endif
223 #else /* CONFIG_PPC_BOOK3E */
224 static void __meminit vmemmap_create_mapping(unsigned long start,
225 unsigned long page_size,
226 unsigned long phys)
228 int mapped = htab_bolt_mapping(start, start + page_size, phys,
229 pgprot_val(PAGE_KERNEL),
230 mmu_vmemmap_psize,
231 mmu_kernel_ssize);
232 BUG_ON(mapped < 0);
235 #ifdef CONFIG_MEMORY_HOTPLUG
236 extern int htab_remove_mapping(unsigned long vstart, unsigned long vend,
237 int psize, int ssize);
239 static void vmemmap_remove_mapping(unsigned long start,
240 unsigned long page_size)
242 int mapped = htab_remove_mapping(start, start + page_size,
243 mmu_vmemmap_psize,
244 mmu_kernel_ssize);
245 BUG_ON(mapped < 0);
247 #endif
249 #endif /* CONFIG_PPC_BOOK3E */
251 struct vmemmap_backing *vmemmap_list;
252 static struct vmemmap_backing *next;
253 static int num_left;
254 static int num_freed;
256 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
258 struct vmemmap_backing *vmem_back;
259 /* get from freed entries first */
260 if (num_freed) {
261 num_freed--;
262 vmem_back = next;
263 next = next->list;
265 return vmem_back;
268 /* allocate a page when required and hand out chunks */
269 if (!num_left) {
270 next = vmemmap_alloc_block(PAGE_SIZE, node);
271 if (unlikely(!next)) {
272 WARN_ON(1);
273 return NULL;
275 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
278 num_left--;
280 return next++;
283 static __meminit void vmemmap_list_populate(unsigned long phys,
284 unsigned long start,
285 int node)
287 struct vmemmap_backing *vmem_back;
289 vmem_back = vmemmap_list_alloc(node);
290 if (unlikely(!vmem_back)) {
291 WARN_ON(1);
292 return;
295 vmem_back->phys = phys;
296 vmem_back->virt_addr = start;
297 vmem_back->list = vmemmap_list;
299 vmemmap_list = vmem_back;
302 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
304 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
306 /* Align to the page size of the linear mapping. */
307 start = _ALIGN_DOWN(start, page_size);
309 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
311 for (; start < end; start += page_size) {
312 void *p;
314 if (vmemmap_populated(start, page_size))
315 continue;
317 p = vmemmap_alloc_block(page_size, node);
318 if (!p)
319 return -ENOMEM;
321 vmemmap_list_populate(__pa(p), start, node);
323 pr_debug(" * %016lx..%016lx allocated at %p\n",
324 start, start + page_size, p);
326 vmemmap_create_mapping(start, page_size, __pa(p));
329 return 0;
332 #ifdef CONFIG_MEMORY_HOTPLUG
333 static unsigned long vmemmap_list_free(unsigned long start)
335 struct vmemmap_backing *vmem_back, *vmem_back_prev;
337 vmem_back_prev = vmem_back = vmemmap_list;
339 /* look for it with prev pointer recorded */
340 for (; vmem_back; vmem_back = vmem_back->list) {
341 if (vmem_back->virt_addr == start)
342 break;
343 vmem_back_prev = vmem_back;
346 if (unlikely(!vmem_back)) {
347 WARN_ON(1);
348 return 0;
351 /* remove it from vmemmap_list */
352 if (vmem_back == vmemmap_list) /* remove head */
353 vmemmap_list = vmem_back->list;
354 else
355 vmem_back_prev->list = vmem_back->list;
357 /* next point to this freed entry */
358 vmem_back->list = next;
359 next = vmem_back;
360 num_freed++;
362 return vmem_back->phys;
365 void __ref vmemmap_free(unsigned long start, unsigned long end)
367 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
369 start = _ALIGN_DOWN(start, page_size);
371 pr_debug("vmemmap_free %lx...%lx\n", start, end);
373 for (; start < end; start += page_size) {
374 unsigned long addr;
377 * the section has already be marked as invalid, so
378 * vmemmap_populated() true means some other sections still
379 * in this page, so skip it.
381 if (vmemmap_populated(start, page_size))
382 continue;
384 addr = vmemmap_list_free(start);
385 if (addr) {
386 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
388 if (PageReserved(page)) {
389 /* allocated from bootmem */
390 if (page_size < PAGE_SIZE) {
392 * this shouldn't happen, but if it is
393 * the case, leave the memory there
395 WARN_ON_ONCE(1);
396 } else {
397 unsigned int nr_pages =
398 1 << get_order(page_size);
399 while (nr_pages--)
400 free_reserved_page(page++);
402 } else
403 free_pages((unsigned long)(__va(addr)),
404 get_order(page_size));
406 vmemmap_remove_mapping(start, page_size);
410 #endif
411 void register_page_bootmem_memmap(unsigned long section_nr,
412 struct page *start_page, unsigned long size)
417 * We do not have access to the sparsemem vmemmap, so we fallback to
418 * walking the list of sparsemem blocks which we already maintain for
419 * the sake of crashdump. In the long run, we might want to maintain
420 * a tree if performance of that linear walk becomes a problem.
422 * realmode_pfn_to_page functions can fail due to:
423 * 1) As real sparsemem blocks do not lay in RAM continously (they
424 * are in virtual address space which is not available in the real mode),
425 * the requested page struct can be split between blocks so get_page/put_page
426 * may fail.
427 * 2) When huge pages are used, the get_page/put_page API will fail
428 * in real mode as the linked addresses in the page struct are virtual
429 * too.
431 struct page *realmode_pfn_to_page(unsigned long pfn)
433 struct vmemmap_backing *vmem_back;
434 struct page *page;
435 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
436 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
438 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
439 if (pg_va < vmem_back->virt_addr)
440 continue;
442 /* After vmemmap_list entry free is possible, need check all */
443 if ((pg_va + sizeof(struct page)) <=
444 (vmem_back->virt_addr + page_size)) {
445 page = (struct page *) (vmem_back->phys + pg_va -
446 vmem_back->virt_addr);
447 return page;
451 /* Probably that page struct is split between real pages */
452 return NULL;
454 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
456 #elif defined(CONFIG_FLATMEM)
458 struct page *realmode_pfn_to_page(unsigned long pfn)
460 struct page *page = pfn_to_page(pfn);
461 return page;
463 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
465 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */