3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/signal.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/mman.h>
30 #include <linux/swap.h>
31 #include <linux/stddef.h>
32 #include <linux/vmalloc.h>
33 #include <linux/init.h>
34 #include <linux/delay.h>
35 #include <linux/bootmem.h>
36 #include <linux/highmem.h>
37 #include <linux/idr.h>
38 #include <linux/nodemask.h>
39 #include <linux/module.h>
40 #include <linux/poison.h>
41 #include <linux/lmb.h>
43 #include <asm/pgalloc.h>
48 #include <asm/mmu_context.h>
49 #include <asm/pgtable.h>
51 #include <asm/uaccess.h>
53 #include <asm/machdep.h>
56 #include <asm/processor.h>
57 #include <asm/mmzone.h>
58 #include <asm/cputable.h>
59 #include <asm/sections.h>
60 #include <asm/system.h>
61 #include <asm/iommu.h>
62 #include <asm/abs_addr.h>
67 #if PGTABLE_RANGE > USER_VSID_RANGE
68 #warning Limited user VSID range means pagetable space is wasted
71 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
72 #warning TASK_SIZE is smaller than it needs to be.
75 phys_addr_t memstart_addr
= ~0;
76 phys_addr_t kernstart_addr
;
78 void free_initmem(void)
82 addr
= (unsigned long)__init_begin
;
83 for (; addr
< (unsigned long)__init_end
; addr
+= PAGE_SIZE
) {
84 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
85 ClearPageReserved(virt_to_page(addr
));
86 init_page_count(virt_to_page(addr
));
90 printk ("Freeing unused kernel memory: %luk freed\n",
91 ((unsigned long)__init_end
- (unsigned long)__init_begin
) >> 10);
94 #ifdef CONFIG_BLK_DEV_INITRD
95 void free_initrd_mem(unsigned long start
, unsigned long end
)
98 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
99 for (; start
< end
; start
+= PAGE_SIZE
) {
100 ClearPageReserved(virt_to_page(start
));
101 init_page_count(virt_to_page(start
));
108 #ifdef CONFIG_PROC_KCORE
109 static struct kcore_list kcore_vmem
;
111 static int __init
setup_kcore(void)
115 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
116 unsigned long base
, size
;
117 struct kcore_list
*kcore_mem
;
119 base
= lmb
.memory
.region
[i
].base
;
120 size
= lmb
.memory
.region
[i
].size
;
122 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
123 kcore_mem
= kmalloc(sizeof(struct kcore_list
), GFP_ATOMIC
);
125 panic("%s: kmalloc failed\n", __func__
);
127 kclist_add(kcore_mem
, __va(base
), size
);
130 kclist_add(&kcore_vmem
, (void *)VMALLOC_START
, VMALLOC_END
-VMALLOC_START
);
134 module_init(setup_kcore
);
137 static void zero_ctor(struct kmem_cache
*cache
, void *addr
)
139 memset(addr
, 0, kmem_cache_size(cache
));
142 static const unsigned int pgtable_cache_size
[2] = {
143 PGD_TABLE_SIZE
, PMD_TABLE_SIZE
145 static const char *pgtable_cache_name
[ARRAY_SIZE(pgtable_cache_size
)] = {
146 #ifdef CONFIG_PPC_64K_PAGES
147 "pgd_cache", "pmd_cache",
149 "pgd_cache", "pud_pmd_cache",
150 #endif /* CONFIG_PPC_64K_PAGES */
153 #ifdef CONFIG_HUGETLB_PAGE
154 /* Hugepages need one extra cache, initialized in hugetlbpage.c. We
155 * can't put into the tables above, because HPAGE_SHIFT is not compile
157 struct kmem_cache
*pgtable_cache
[ARRAY_SIZE(pgtable_cache_size
)+1];
159 struct kmem_cache
*pgtable_cache
[ARRAY_SIZE(pgtable_cache_size
)];
162 void pgtable_cache_init(void)
166 for (i
= 0; i
< ARRAY_SIZE(pgtable_cache_size
); i
++) {
167 int size
= pgtable_cache_size
[i
];
168 const char *name
= pgtable_cache_name
[i
];
170 pr_debug("Allocating page table cache %s (#%d) "
171 "for size: %08x...\n", name
, i
, size
);
172 pgtable_cache
[i
] = kmem_cache_create(name
,
179 #ifdef CONFIG_SPARSEMEM_VMEMMAP
181 * Given an address within the vmemmap, determine the pfn of the page that
182 * represents the start of the section it is within. Note that we have to
183 * do this by hand as the proffered address may not be correctly aligned.
184 * Subtraction of non-aligned pointers produces undefined results.
186 unsigned long __meminit
vmemmap_section_start(unsigned long page
)
188 unsigned long offset
= page
- ((unsigned long)(vmemmap
));
190 /* Return the pfn of the start of the section. */
191 return (offset
/ sizeof(struct page
)) & PAGE_SECTION_MASK
;
195 * Check if this vmemmap page is already initialised. If any section
196 * which overlaps this vmemmap page is initialised then this page is
197 * initialised already.
199 int __meminit
vmemmap_populated(unsigned long start
, int page_size
)
201 unsigned long end
= start
+ page_size
;
203 for (; start
< end
; start
+= (PAGES_PER_SECTION
* sizeof(struct page
)))
204 if (pfn_valid(vmemmap_section_start(start
)))
210 int __meminit
vmemmap_populate(struct page
*start_page
,
211 unsigned long nr_pages
, int node
)
213 unsigned long mode_rw
;
214 unsigned long start
= (unsigned long)start_page
;
215 unsigned long end
= (unsigned long)(start_page
+ nr_pages
);
216 unsigned long page_size
= 1 << mmu_psize_defs
[mmu_linear_psize
].shift
;
218 mode_rw
= _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_COHERENT
| PP_RWXX
;
220 /* Align to the page size of the linear mapping. */
221 start
= _ALIGN_DOWN(start
, page_size
);
223 for (; start
< end
; start
+= page_size
) {
227 if (vmemmap_populated(start
, page_size
))
230 p
= vmemmap_alloc_block(page_size
, node
);
234 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
237 mapped
= htab_bolt_mapping(start
, start
+ page_size
,
238 __pa(p
), mode_rw
, mmu_linear_psize
,