Merge remote-tracking branch 'moduleh/module.h-split'
[linux-2.6/next.git] / arch / arm / mm / init.c
blobecad787b588326ce782df9286a2bd664ba743c8a
1 /*
2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/initrd.h>
19 #include <linux/of_fdt.h>
20 #include <linux/highmem.h>
21 #include <linux/gfp.h>
22 #include <linux/memblock.h>
23 #include <linux/sort.h>
25 #include <asm/mach-types.h>
26 #include <asm/prom.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/sizes.h>
30 #include <asm/tlb.h>
31 #include <asm/fixmap.h>
33 #include <asm/mach/arch.h>
34 #include <asm/mach/map.h>
36 #include "mm.h"
38 static unsigned long phys_initrd_start __initdata = 0;
39 static unsigned long phys_initrd_size __initdata = 0;
41 static int __init early_initrd(char *p)
43 unsigned long start, size;
44 char *endp;
46 start = memparse(p, &endp);
47 if (*endp == ',') {
48 size = memparse(endp + 1, NULL);
50 phys_initrd_start = start;
51 phys_initrd_size = size;
53 return 0;
55 early_param("initrd", early_initrd);
57 static int __init parse_tag_initrd(const struct tag *tag)
59 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
60 "please update your bootloader.\n");
61 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
62 phys_initrd_size = tag->u.initrd.size;
63 return 0;
66 __tagtable(ATAG_INITRD, parse_tag_initrd);
68 static int __init parse_tag_initrd2(const struct tag *tag)
70 phys_initrd_start = tag->u.initrd.start;
71 phys_initrd_size = tag->u.initrd.size;
72 return 0;
75 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
77 #ifdef CONFIG_OF_FLATTREE
78 void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
80 phys_initrd_start = start;
81 phys_initrd_size = end - start;
83 #endif /* CONFIG_OF_FLATTREE */
86 * This keeps memory configuration data used by a couple memory
87 * initialization functions, as well as show_mem() for the skipping
88 * of holes in the memory map. It is populated by arm_add_memory().
90 struct meminfo meminfo;
92 void show_mem(unsigned int filter)
94 int free = 0, total = 0, reserved = 0;
95 int shared = 0, cached = 0, slab = 0, i;
96 struct meminfo * mi = &meminfo;
98 printk("Mem-info:\n");
99 show_free_areas(filter);
101 for_each_bank (i, mi) {
102 struct membank *bank = &mi->bank[i];
103 unsigned int pfn1, pfn2;
104 struct page *page, *end;
106 pfn1 = bank_pfn_start(bank);
107 pfn2 = bank_pfn_end(bank);
109 page = pfn_to_page(pfn1);
110 end = pfn_to_page(pfn2 - 1) + 1;
112 do {
113 total++;
114 if (PageReserved(page))
115 reserved++;
116 else if (PageSwapCache(page))
117 cached++;
118 else if (PageSlab(page))
119 slab++;
120 else if (!page_count(page))
121 free++;
122 else
123 shared += page_count(page) - 1;
124 page++;
125 } while (page < end);
128 printk("%d pages of RAM\n", total);
129 printk("%d free pages\n", free);
130 printk("%d reserved pages\n", reserved);
131 printk("%d slab pages\n", slab);
132 printk("%d pages shared\n", shared);
133 printk("%d pages swap cached\n", cached);
136 static void __init find_limits(unsigned long *min, unsigned long *max_low,
137 unsigned long *max_high)
139 struct meminfo *mi = &meminfo;
140 int i;
142 *min = -1UL;
143 *max_low = *max_high = 0;
145 for_each_bank (i, mi) {
146 struct membank *bank = &mi->bank[i];
147 unsigned long start, end;
149 start = bank_pfn_start(bank);
150 end = bank_pfn_end(bank);
152 if (*min > start)
153 *min = start;
154 if (*max_high < end)
155 *max_high = end;
156 if (bank->highmem)
157 continue;
158 if (*max_low < end)
159 *max_low = end;
163 static void __init arm_bootmem_init(unsigned long start_pfn,
164 unsigned long end_pfn)
166 struct memblock_region *reg;
167 unsigned int boot_pages;
168 phys_addr_t bitmap;
169 pg_data_t *pgdat;
172 * Allocate the bootmem bitmap page. This must be in a region
173 * of memory which has already been mapped.
175 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
176 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
177 __pfn_to_phys(end_pfn));
180 * Initialise the bootmem allocator, handing the
181 * memory banks over to bootmem.
183 node_set_online(0);
184 pgdat = NODE_DATA(0);
185 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
187 /* Free the lowmem regions from memblock into bootmem. */
188 for_each_memblock(memory, reg) {
189 unsigned long start = memblock_region_memory_base_pfn(reg);
190 unsigned long end = memblock_region_memory_end_pfn(reg);
192 if (end >= end_pfn)
193 end = end_pfn;
194 if (start >= end)
195 break;
197 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
200 /* Reserve the lowmem memblock reserved regions in bootmem. */
201 for_each_memblock(reserved, reg) {
202 unsigned long start = memblock_region_reserved_base_pfn(reg);
203 unsigned long end = memblock_region_reserved_end_pfn(reg);
205 if (end >= end_pfn)
206 end = end_pfn;
207 if (start >= end)
208 break;
210 reserve_bootmem(__pfn_to_phys(start),
211 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
215 #ifdef CONFIG_ZONE_DMA
217 unsigned long arm_dma_zone_size __read_mostly;
218 EXPORT_SYMBOL(arm_dma_zone_size);
221 * The DMA mask corresponding to the maximum bus address allocatable
222 * using GFP_DMA. The default here places no restriction on DMA
223 * allocations. This must be the smallest DMA mask in the system,
224 * so a successful GFP_DMA allocation will always satisfy this.
226 u32 arm_dma_limit;
228 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
229 unsigned long dma_size)
231 if (size[0] <= dma_size)
232 return;
234 size[ZONE_NORMAL] = size[0] - dma_size;
235 size[ZONE_DMA] = dma_size;
236 hole[ZONE_NORMAL] = hole[0];
237 hole[ZONE_DMA] = 0;
239 #endif
241 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
242 unsigned long max_high)
244 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
245 struct memblock_region *reg;
248 * initialise the zones.
250 memset(zone_size, 0, sizeof(zone_size));
253 * The memory size has already been determined. If we need
254 * to do anything fancy with the allocation of this memory
255 * to the zones, now is the time to do it.
257 zone_size[0] = max_low - min;
258 #ifdef CONFIG_HIGHMEM
259 zone_size[ZONE_HIGHMEM] = max_high - max_low;
260 #endif
263 * Calculate the size of the holes.
264 * holes = node_size - sum(bank_sizes)
266 memcpy(zhole_size, zone_size, sizeof(zhole_size));
267 for_each_memblock(memory, reg) {
268 unsigned long start = memblock_region_memory_base_pfn(reg);
269 unsigned long end = memblock_region_memory_end_pfn(reg);
271 if (start < max_low) {
272 unsigned long low_end = min(end, max_low);
273 zhole_size[0] -= low_end - start;
275 #ifdef CONFIG_HIGHMEM
276 if (end > max_low) {
277 unsigned long high_start = max(start, max_low);
278 zhole_size[ZONE_HIGHMEM] -= end - high_start;
280 #endif
283 #ifdef CONFIG_ZONE_DMA
285 * Adjust the sizes according to any special requirements for
286 * this machine type.
288 if (arm_dma_zone_size) {
289 arm_adjust_dma_zone(zone_size, zhole_size,
290 arm_dma_zone_size >> PAGE_SHIFT);
291 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
292 } else
293 arm_dma_limit = 0xffffffff;
294 #endif
296 free_area_init_node(0, zone_size, min, zhole_size);
299 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
300 int pfn_valid(unsigned long pfn)
302 return memblock_is_memory(pfn << PAGE_SHIFT);
304 EXPORT_SYMBOL(pfn_valid);
305 #endif
307 #ifndef CONFIG_SPARSEMEM
308 static void arm_memory_present(void)
311 #else
312 static void arm_memory_present(void)
314 struct memblock_region *reg;
316 for_each_memblock(memory, reg)
317 memory_present(0, memblock_region_memory_base_pfn(reg),
318 memblock_region_memory_end_pfn(reg));
320 #endif
322 static int __init meminfo_cmp(const void *_a, const void *_b)
324 const struct membank *a = _a, *b = _b;
325 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
326 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
329 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
331 int i;
333 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
335 memblock_init();
336 for (i = 0; i < mi->nr_banks; i++)
337 memblock_add(mi->bank[i].start, mi->bank[i].size);
339 /* Register the kernel text, kernel data and initrd with memblock. */
340 #ifdef CONFIG_XIP_KERNEL
341 memblock_reserve(__pa(_sdata), _end - _sdata);
342 #else
343 memblock_reserve(__pa(_stext), _end - _stext);
344 #endif
345 #ifdef CONFIG_BLK_DEV_INITRD
346 if (phys_initrd_size &&
347 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
348 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
349 phys_initrd_start, phys_initrd_size);
350 phys_initrd_start = phys_initrd_size = 0;
352 if (phys_initrd_size &&
353 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
354 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
355 phys_initrd_start, phys_initrd_size);
356 phys_initrd_start = phys_initrd_size = 0;
358 if (phys_initrd_size) {
359 memblock_reserve(phys_initrd_start, phys_initrd_size);
361 /* Now convert initrd to virtual addresses */
362 initrd_start = __phys_to_virt(phys_initrd_start);
363 initrd_end = initrd_start + phys_initrd_size;
365 #endif
367 arm_mm_memblock_reserve();
368 arm_dt_memblock_reserve();
370 /* reserve any platform specific memblock areas */
371 if (mdesc->reserve)
372 mdesc->reserve();
374 memblock_analyze();
375 memblock_dump_all();
378 void __init bootmem_init(void)
380 unsigned long min, max_low, max_high;
382 max_low = max_high = 0;
384 find_limits(&min, &max_low, &max_high);
386 arm_bootmem_init(min, max_low);
389 * Sparsemem tries to allocate bootmem in memory_present(),
390 * so must be done after the fixed reservations
392 arm_memory_present();
395 * sparse_init() needs the bootmem allocator up and running.
397 sparse_init();
400 * Now free the memory - free_area_init_node needs
401 * the sparse mem_map arrays initialized by sparse_init()
402 * for memmap_init_zone(), otherwise all PFNs are invalid.
404 arm_bootmem_free(min, max_low, max_high);
406 high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
409 * This doesn't seem to be used by the Linux memory manager any
410 * more, but is used by ll_rw_block. If we can get rid of it, we
411 * also get rid of some of the stuff above as well.
413 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
414 * the system, not the maximum PFN.
416 max_low_pfn = max_low - PHYS_PFN_OFFSET;
417 max_pfn = max_high - PHYS_PFN_OFFSET;
420 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
422 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
424 for (; pfn < end; pfn++) {
425 struct page *page = pfn_to_page(pfn);
426 ClearPageReserved(page);
427 init_page_count(page);
428 __free_page(page);
429 pages++;
432 if (size && s)
433 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
435 return pages;
439 * Poison init memory with an undefined instruction (ARM) or a branch to an
440 * undefined instruction (Thumb).
442 static inline void poison_init_mem(void *s, size_t count)
444 u32 *p = (u32 *)s;
445 for (; count != 0; count -= 4)
446 *p++ = 0xe7fddef0;
449 static inline void
450 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
452 struct page *start_pg, *end_pg;
453 unsigned long pg, pgend;
456 * Convert start_pfn/end_pfn to a struct page pointer.
458 start_pg = pfn_to_page(start_pfn - 1) + 1;
459 end_pg = pfn_to_page(end_pfn - 1) + 1;
462 * Convert to physical addresses, and
463 * round start upwards and end downwards.
465 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
466 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
469 * If there are free pages between these,
470 * free the section of the memmap array.
472 if (pg < pgend)
473 free_bootmem(pg, pgend - pg);
477 * The mem_map array can get very big. Free the unused area of the memory map.
479 static void __init free_unused_memmap(struct meminfo *mi)
481 unsigned long bank_start, prev_bank_end = 0;
482 unsigned int i;
485 * This relies on each bank being in address order.
486 * The banks are sorted previously in bootmem_init().
488 for_each_bank(i, mi) {
489 struct membank *bank = &mi->bank[i];
491 bank_start = bank_pfn_start(bank);
493 #ifdef CONFIG_SPARSEMEM
495 * Take care not to free memmap entries that don't exist
496 * due to SPARSEMEM sections which aren't present.
498 bank_start = min(bank_start,
499 ALIGN(prev_bank_end, PAGES_PER_SECTION));
500 #endif
502 * If we had a previous bank, and there is a space
503 * between the current bank and the previous, free it.
505 if (prev_bank_end && prev_bank_end < bank_start)
506 free_memmap(prev_bank_end, bank_start);
509 * Align up here since the VM subsystem insists that the
510 * memmap entries are valid from the bank end aligned to
511 * MAX_ORDER_NR_PAGES.
513 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
516 #ifdef CONFIG_SPARSEMEM
517 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
518 free_memmap(prev_bank_end,
519 ALIGN(prev_bank_end, PAGES_PER_SECTION));
520 #endif
523 static void __init free_highpages(void)
525 #ifdef CONFIG_HIGHMEM
526 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
527 struct memblock_region *mem, *res;
529 /* set highmem page free */
530 for_each_memblock(memory, mem) {
531 unsigned long start = memblock_region_memory_base_pfn(mem);
532 unsigned long end = memblock_region_memory_end_pfn(mem);
534 /* Ignore complete lowmem entries */
535 if (end <= max_low)
536 continue;
538 /* Truncate partial highmem entries */
539 if (start < max_low)
540 start = max_low;
542 /* Find and exclude any reserved regions */
543 for_each_memblock(reserved, res) {
544 unsigned long res_start, res_end;
546 res_start = memblock_region_reserved_base_pfn(res);
547 res_end = memblock_region_reserved_end_pfn(res);
549 if (res_end < start)
550 continue;
551 if (res_start < start)
552 res_start = start;
553 if (res_start > end)
554 res_start = end;
555 if (res_end > end)
556 res_end = end;
557 if (res_start != start)
558 totalhigh_pages += free_area(start, res_start,
559 NULL);
560 start = res_end;
561 if (start == end)
562 break;
565 /* And now free anything which remains */
566 if (start < end)
567 totalhigh_pages += free_area(start, end, NULL);
569 totalram_pages += totalhigh_pages;
570 #endif
574 * mem_init() marks the free areas in the mem_map and tells us how much
575 * memory is free. This is done after various parts of the system have
576 * claimed their memory after the kernel image.
578 void __init mem_init(void)
580 unsigned long reserved_pages, free_pages;
581 struct memblock_region *reg;
582 int i;
583 #ifdef CONFIG_HAVE_TCM
584 /* These pointers are filled in on TCM detection */
585 extern u32 dtcm_end;
586 extern u32 itcm_end;
587 #endif
589 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
591 /* this will put all unused low memory onto the freelists */
592 free_unused_memmap(&meminfo);
594 totalram_pages += free_all_bootmem();
596 #ifdef CONFIG_SA1111
597 /* now that our DMA memory is actually so designated, we can free it */
598 totalram_pages += free_area(PHYS_PFN_OFFSET,
599 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
600 #endif
602 free_highpages();
604 reserved_pages = free_pages = 0;
606 for_each_bank(i, &meminfo) {
607 struct membank *bank = &meminfo.bank[i];
608 unsigned int pfn1, pfn2;
609 struct page *page, *end;
611 pfn1 = bank_pfn_start(bank);
612 pfn2 = bank_pfn_end(bank);
614 page = pfn_to_page(pfn1);
615 end = pfn_to_page(pfn2 - 1) + 1;
617 do {
618 if (PageReserved(page))
619 reserved_pages++;
620 else if (!page_count(page))
621 free_pages++;
622 page++;
623 } while (page < end);
627 * Since our memory may not be contiguous, calculate the
628 * real number of pages we have in this system
630 printk(KERN_INFO "Memory:");
631 num_physpages = 0;
632 for_each_memblock(memory, reg) {
633 unsigned long pages = memblock_region_memory_end_pfn(reg) -
634 memblock_region_memory_base_pfn(reg);
635 num_physpages += pages;
636 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
638 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
640 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
641 nr_free_pages() << (PAGE_SHIFT-10),
642 free_pages << (PAGE_SHIFT-10),
643 reserved_pages << (PAGE_SHIFT-10),
644 totalhigh_pages << (PAGE_SHIFT-10));
646 #define MLK(b, t) b, t, ((t) - (b)) >> 10
647 #define MLM(b, t) b, t, ((t) - (b)) >> 20
648 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
650 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
651 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
652 #ifdef CONFIG_HAVE_TCM
653 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
654 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
655 #endif
656 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
657 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
658 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
659 #ifdef CONFIG_HIGHMEM
660 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
661 #endif
662 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
663 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
664 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
665 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
666 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
668 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
669 (PAGE_SIZE)),
670 #ifdef CONFIG_HAVE_TCM
671 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
672 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
673 #endif
674 MLK(FIXADDR_START, FIXADDR_TOP),
675 MLM(VMALLOC_START, VMALLOC_END),
676 MLM(PAGE_OFFSET, (unsigned long)high_memory),
677 #ifdef CONFIG_HIGHMEM
678 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
679 (PAGE_SIZE)),
680 #endif
681 MLM(MODULES_VADDR, MODULES_END),
683 MLK_ROUNDUP(_text, _etext),
684 MLK_ROUNDUP(__init_begin, __init_end),
685 MLK_ROUNDUP(_sdata, _edata),
686 MLK_ROUNDUP(__bss_start, __bss_stop));
688 #undef MLK
689 #undef MLM
690 #undef MLK_ROUNDUP
693 * Check boundaries twice: Some fundamental inconsistencies can
694 * be detected at build time already.
696 #ifdef CONFIG_MMU
697 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
698 BUG_ON(TASK_SIZE > MODULES_VADDR);
699 #endif
701 #ifdef CONFIG_HIGHMEM
702 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
703 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
704 #endif
706 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
707 extern int sysctl_overcommit_memory;
709 * On a machine this small we won't get
710 * anywhere without overcommit, so turn
711 * it on by default.
713 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
717 void free_initmem(void)
719 #ifdef CONFIG_HAVE_TCM
720 extern char __tcm_start, __tcm_end;
722 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
723 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
724 __phys_to_pfn(__pa(&__tcm_end)),
725 "TCM link");
726 #endif
728 poison_init_mem(__init_begin, __init_end - __init_begin);
729 if (!machine_is_integrator() && !machine_is_cintegrator())
730 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
731 __phys_to_pfn(__pa(__init_end)),
732 "init");
735 #ifdef CONFIG_BLK_DEV_INITRD
737 static int keep_initrd;
739 void free_initrd_mem(unsigned long start, unsigned long end)
741 if (!keep_initrd) {
742 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
743 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
744 __phys_to_pfn(__pa(end)),
745 "initrd");
749 static int __init keepinitrd_setup(char *__unused)
751 keep_initrd = 1;
752 return 1;
755 __setup("keepinitrd", keepinitrd_setup);
756 #endif