Merge branch 'fixes-modulesplit' into fixes
[zen-stable.git] / arch / unicore32 / mm / init.c
blob3b379cddbc641cced14c4472a4d7838852a47935
1 /*
2 * linux/arch/unicore32/mm/init.c
4 * Copyright (C) 2010 GUAN Xue-tao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/highmem.h>
19 #include <linux/gfp.h>
20 #include <linux/memblock.h>
21 #include <linux/sort.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/export.h>
25 #include <asm/sections.h>
26 #include <asm/setup.h>
27 #include <asm/sizes.h>
28 #include <asm/tlb.h>
29 #include <mach/map.h>
31 #include "mm.h"
33 static unsigned long phys_initrd_start __initdata = 0x01000000;
34 static unsigned long phys_initrd_size __initdata = SZ_8M;
36 static int __init early_initrd(char *p)
38 unsigned long start, size;
39 char *endp;
41 start = memparse(p, &endp);
42 if (*endp == ',') {
43 size = memparse(endp + 1, NULL);
45 phys_initrd_start = start;
46 phys_initrd_size = size;
48 return 0;
50 early_param("initrd", early_initrd);
53 * This keeps memory configuration data used by a couple memory
54 * initialization functions, as well as show_mem() for the skipping
55 * of holes in the memory map. It is populated by uc32_add_memory().
57 struct meminfo meminfo;
59 void show_mem(unsigned int filter)
61 int free = 0, total = 0, reserved = 0;
62 int shared = 0, cached = 0, slab = 0, i;
63 struct meminfo *mi = &meminfo;
65 printk(KERN_DEFAULT "Mem-info:\n");
66 show_free_areas(filter);
68 for_each_bank(i, mi) {
69 struct membank *bank = &mi->bank[i];
70 unsigned int pfn1, pfn2;
71 struct page *page, *end;
73 pfn1 = bank_pfn_start(bank);
74 pfn2 = bank_pfn_end(bank);
76 page = pfn_to_page(pfn1);
77 end = pfn_to_page(pfn2 - 1) + 1;
79 do {
80 total++;
81 if (PageReserved(page))
82 reserved++;
83 else if (PageSwapCache(page))
84 cached++;
85 else if (PageSlab(page))
86 slab++;
87 else if (!page_count(page))
88 free++;
89 else
90 shared += page_count(page) - 1;
91 page++;
92 } while (page < end);
95 printk(KERN_DEFAULT "%d pages of RAM\n", total);
96 printk(KERN_DEFAULT "%d free pages\n", free);
97 printk(KERN_DEFAULT "%d reserved pages\n", reserved);
98 printk(KERN_DEFAULT "%d slab pages\n", slab);
99 printk(KERN_DEFAULT "%d pages shared\n", shared);
100 printk(KERN_DEFAULT "%d pages swap cached\n", cached);
103 static void __init find_limits(unsigned long *min, unsigned long *max_low,
104 unsigned long *max_high)
106 struct meminfo *mi = &meminfo;
107 int i;
109 *min = -1UL;
110 *max_low = *max_high = 0;
112 for_each_bank(i, mi) {
113 struct membank *bank = &mi->bank[i];
114 unsigned long start, end;
116 start = bank_pfn_start(bank);
117 end = bank_pfn_end(bank);
119 if (*min > start)
120 *min = start;
121 if (*max_high < end)
122 *max_high = end;
123 if (bank->highmem)
124 continue;
125 if (*max_low < end)
126 *max_low = end;
130 static void __init uc32_bootmem_init(unsigned long start_pfn,
131 unsigned long end_pfn)
133 struct memblock_region *reg;
134 unsigned int boot_pages;
135 phys_addr_t bitmap;
136 pg_data_t *pgdat;
139 * Allocate the bootmem bitmap page. This must be in a region
140 * of memory which has already been mapped.
142 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
143 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
144 __pfn_to_phys(end_pfn));
147 * Initialise the bootmem allocator, handing the
148 * memory banks over to bootmem.
150 node_set_online(0);
151 pgdat = NODE_DATA(0);
152 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
154 /* Free the lowmem regions from memblock into bootmem. */
155 for_each_memblock(memory, reg) {
156 unsigned long start = memblock_region_memory_base_pfn(reg);
157 unsigned long end = memblock_region_memory_end_pfn(reg);
159 if (end >= end_pfn)
160 end = end_pfn;
161 if (start >= end)
162 break;
164 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
167 /* Reserve the lowmem memblock reserved regions in bootmem. */
168 for_each_memblock(reserved, reg) {
169 unsigned long start = memblock_region_reserved_base_pfn(reg);
170 unsigned long end = memblock_region_reserved_end_pfn(reg);
172 if (end >= end_pfn)
173 end = end_pfn;
174 if (start >= end)
175 break;
177 reserve_bootmem(__pfn_to_phys(start),
178 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
182 static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
183 unsigned long max_high)
185 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
186 struct memblock_region *reg;
189 * initialise the zones.
191 memset(zone_size, 0, sizeof(zone_size));
194 * The memory size has already been determined. If we need
195 * to do anything fancy with the allocation of this memory
196 * to the zones, now is the time to do it.
198 zone_size[0] = max_low - min;
201 * Calculate the size of the holes.
202 * holes = node_size - sum(bank_sizes)
204 memcpy(zhole_size, zone_size, sizeof(zhole_size));
205 for_each_memblock(memory, reg) {
206 unsigned long start = memblock_region_memory_base_pfn(reg);
207 unsigned long end = memblock_region_memory_end_pfn(reg);
209 if (start < max_low) {
210 unsigned long low_end = min(end, max_low);
211 zhole_size[0] -= low_end - start;
216 * Adjust the sizes according to any special requirements for
217 * this machine type.
219 arch_adjust_zones(zone_size, zhole_size);
221 free_area_init_node(0, zone_size, min, zhole_size);
224 int pfn_valid(unsigned long pfn)
226 return memblock_is_memory(pfn << PAGE_SHIFT);
228 EXPORT_SYMBOL(pfn_valid);
230 static void uc32_memory_present(void)
234 static int __init meminfo_cmp(const void *_a, const void *_b)
236 const struct membank *a = _a, *b = _b;
237 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
238 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
241 void __init uc32_memblock_init(struct meminfo *mi)
243 int i;
245 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
246 meminfo_cmp, NULL);
248 memblock_init();
249 for (i = 0; i < mi->nr_banks; i++)
250 memblock_add(mi->bank[i].start, mi->bank[i].size);
252 /* Register the kernel text, kernel data and initrd with memblock. */
253 memblock_reserve(__pa(_text), _end - _text);
255 #ifdef CONFIG_BLK_DEV_INITRD
256 if (phys_initrd_size) {
257 memblock_reserve(phys_initrd_start, phys_initrd_size);
259 /* Now convert initrd to virtual addresses */
260 initrd_start = __phys_to_virt(phys_initrd_start);
261 initrd_end = initrd_start + phys_initrd_size;
263 #endif
265 uc32_mm_memblock_reserve();
267 memblock_analyze();
268 memblock_dump_all();
271 void __init bootmem_init(void)
273 unsigned long min, max_low, max_high;
275 max_low = max_high = 0;
277 find_limits(&min, &max_low, &max_high);
279 uc32_bootmem_init(min, max_low);
281 #ifdef CONFIG_SWIOTLB
282 swiotlb_init(1);
283 #endif
285 * Sparsemem tries to allocate bootmem in memory_present(),
286 * so must be done after the fixed reservations
288 uc32_memory_present();
291 * sparse_init() needs the bootmem allocator up and running.
293 sparse_init();
296 * Now free the memory - free_area_init_node needs
297 * the sparse mem_map arrays initialized by sparse_init()
298 * for memmap_init_zone(), otherwise all PFNs are invalid.
300 uc32_bootmem_free(min, max_low, max_high);
302 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
305 * This doesn't seem to be used by the Linux memory manager any
306 * more, but is used by ll_rw_block. If we can get rid of it, we
307 * also get rid of some of the stuff above as well.
309 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
310 * the system, not the maximum PFN.
312 max_low_pfn = max_low - PHYS_PFN_OFFSET;
313 max_pfn = max_high - PHYS_PFN_OFFSET;
316 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
318 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
320 for (; pfn < end; pfn++) {
321 struct page *page = pfn_to_page(pfn);
322 ClearPageReserved(page);
323 init_page_count(page);
324 __free_page(page);
325 pages++;
328 if (size && s)
329 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
331 return pages;
334 static inline void
335 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
337 struct page *start_pg, *end_pg;
338 unsigned long pg, pgend;
341 * Convert start_pfn/end_pfn to a struct page pointer.
343 start_pg = pfn_to_page(start_pfn - 1) + 1;
344 end_pg = pfn_to_page(end_pfn);
347 * Convert to physical addresses, and
348 * round start upwards and end downwards.
350 pg = PAGE_ALIGN(__pa(start_pg));
351 pgend = __pa(end_pg) & PAGE_MASK;
354 * If there are free pages between these,
355 * free the section of the memmap array.
357 if (pg < pgend)
358 free_bootmem(pg, pgend - pg);
362 * The mem_map array can get very big. Free the unused area of the memory map.
364 static void __init free_unused_memmap(struct meminfo *mi)
366 unsigned long bank_start, prev_bank_end = 0;
367 unsigned int i;
370 * This relies on each bank being in address order.
371 * The banks are sorted previously in bootmem_init().
373 for_each_bank(i, mi) {
374 struct membank *bank = &mi->bank[i];
376 bank_start = bank_pfn_start(bank);
379 * If we had a previous bank, and there is a space
380 * between the current bank and the previous, free it.
382 if (prev_bank_end && prev_bank_end < bank_start)
383 free_memmap(prev_bank_end, bank_start);
386 * Align up here since the VM subsystem insists that the
387 * memmap entries are valid from the bank end aligned to
388 * MAX_ORDER_NR_PAGES.
390 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
395 * mem_init() marks the free areas in the mem_map and tells us how much
396 * memory is free. This is done after various parts of the system have
397 * claimed their memory after the kernel image.
399 void __init mem_init(void)
401 unsigned long reserved_pages, free_pages;
402 struct memblock_region *reg;
403 int i;
405 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
407 /* this will put all unused low memory onto the freelists */
408 free_unused_memmap(&meminfo);
410 totalram_pages += free_all_bootmem();
412 reserved_pages = free_pages = 0;
414 for_each_bank(i, &meminfo) {
415 struct membank *bank = &meminfo.bank[i];
416 unsigned int pfn1, pfn2;
417 struct page *page, *end;
419 pfn1 = bank_pfn_start(bank);
420 pfn2 = bank_pfn_end(bank);
422 page = pfn_to_page(pfn1);
423 end = pfn_to_page(pfn2 - 1) + 1;
425 do {
426 if (PageReserved(page))
427 reserved_pages++;
428 else if (!page_count(page))
429 free_pages++;
430 page++;
431 } while (page < end);
435 * Since our memory may not be contiguous, calculate the
436 * real number of pages we have in this system
438 printk(KERN_INFO "Memory:");
439 num_physpages = 0;
440 for_each_memblock(memory, reg) {
441 unsigned long pages = memblock_region_memory_end_pfn(reg) -
442 memblock_region_memory_base_pfn(reg);
443 num_physpages += pages;
444 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
446 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
448 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
449 nr_free_pages() << (PAGE_SHIFT-10),
450 free_pages << (PAGE_SHIFT-10),
451 reserved_pages << (PAGE_SHIFT-10),
452 totalhigh_pages << (PAGE_SHIFT-10));
454 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
455 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
456 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
457 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
458 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
459 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
460 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
461 " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
463 VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
464 DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
465 VMALLOC_START, VMALLOC_END,
466 DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
467 PAGE_OFFSET, (unsigned long)high_memory,
468 DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
469 MODULES_VADDR, MODULES_END,
470 DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
472 __init_begin, __init_end,
473 DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
474 _stext, _etext,
475 DIV_ROUND_UP((_etext - _stext), SZ_1K),
476 _sdata, _edata,
477 DIV_ROUND_UP((_edata - _sdata), SZ_1K));
479 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
480 BUG_ON(TASK_SIZE > MODULES_VADDR);
482 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
484 * On a machine this small we won't get
485 * anywhere without overcommit, so turn
486 * it on by default.
488 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
492 void free_initmem(void)
494 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
495 __phys_to_pfn(__pa(__init_end)),
496 "init");
499 #ifdef CONFIG_BLK_DEV_INITRD
501 static int keep_initrd;
503 void free_initrd_mem(unsigned long start, unsigned long end)
505 if (!keep_initrd)
506 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
507 __phys_to_pfn(__pa(end)),
508 "initrd");
511 static int __init keepinitrd_setup(char *__unused)
513 keep_initrd = 1;
514 return 1;
517 __setup("keepinitrd", keepinitrd_setup);
518 #endif