2 * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2006 Atmark Techno, Inc.
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
10 #include <linux/dma-map-ops.h>
11 #include <linux/memblock.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h> /* mem_init */
15 #include <linux/initrd.h>
16 #include <linux/pagemap.h>
17 #include <linux/pfn.h>
18 #include <linux/slab.h>
19 #include <linux/swap.h>
20 #include <linux/export.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgalloc.h>
25 #include <asm/sections.h>
27 #include <asm/fixmap.h>
29 /* Use for MMU and noMMU because of PCI generic code */
35 * Initialize the bootmem system and give it all the memory we
38 unsigned long memory_start
;
39 EXPORT_SYMBOL(memory_start
);
40 unsigned long memory_size
;
41 EXPORT_SYMBOL(memory_size
);
42 unsigned long lowmem_size
;
44 EXPORT_SYMBOL(min_low_pfn
);
45 EXPORT_SYMBOL(max_low_pfn
);
48 static void __init
highmem_init(void)
50 pr_debug("%x\n", (u32
)PKMAP_BASE
);
51 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
52 pkmap_page_table
= virt_to_kpte(PKMAP_BASE
);
55 static void highmem_setup(void)
59 for (pfn
= max_low_pfn
; pfn
< max_pfn
; ++pfn
) {
60 struct page
*page
= pfn_to_page(pfn
);
62 /* FIXME not sure about */
63 if (!memblock_is_reserved(pfn
<< PAGE_SHIFT
))
64 free_highmem_page(page
);
67 #endif /* CONFIG_HIGHMEM */
70 * paging_init() sets up the page tables - in fact we've already done this.
72 static void __init
paging_init(void)
74 unsigned long zones_size
[MAX_NR_ZONES
];
78 for (idx
= 0; idx
< __end_of_fixed_addresses
; idx
++)
81 /* Clean every zones */
82 memset(zones_size
, 0, sizeof(zones_size
));
87 zones_size
[ZONE_DMA
] = max_low_pfn
;
88 zones_size
[ZONE_HIGHMEM
] = max_pfn
;
90 zones_size
[ZONE_DMA
] = max_pfn
;
93 /* We don't have holes in memory map */
94 free_area_init(zones_size
);
97 void __init
setup_memory(void)
101 * start: base phys address of kernel - page align
102 * end: base phys address of kernel - page align
104 * min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
106 * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
109 /* memory start is from the kernel end (aligned) to higher addr */
110 min_low_pfn
= memory_start
>> PAGE_SHIFT
; /* minimum for allocation */
111 /* RAM is assumed contiguous */
112 max_mapnr
= memory_size
>> PAGE_SHIFT
;
113 max_low_pfn
= ((u64
)memory_start
+ (u64
)lowmem_size
) >> PAGE_SHIFT
;
114 max_pfn
= ((u64
)memory_start
+ (u64
)memory_size
) >> PAGE_SHIFT
;
116 pr_info("%s: max_mapnr: %#lx\n", __func__
, max_mapnr
);
117 pr_info("%s: min_low_pfn: %#lx\n", __func__
, min_low_pfn
);
118 pr_info("%s: max_low_pfn: %#lx\n", __func__
, max_low_pfn
);
119 pr_info("%s: max_pfn: %#lx\n", __func__
, max_pfn
);
124 void __init
mem_init(void)
126 high_memory
= (void *)__va(memory_start
+ lowmem_size
- 1);
128 /* this will put all memory onto the freelists */
130 #ifdef CONFIG_HIGHMEM
134 mem_init_print_info(NULL
);
138 int page_is_ram(unsigned long pfn
)
140 return pfn
< max_low_pfn
;
144 * Check for command-line options that affect what MMU_init will do.
146 static void mm_cmdline_setup(void)
148 unsigned long maxmem
= 0;
151 /* Look for mem= option on command line */
152 p
= strstr(cmd_line
, "mem=");
155 maxmem
= memparse(p
, &p
);
156 if (maxmem
&& memory_size
> maxmem
) {
157 memory_size
= maxmem
;
158 memblock
.memory
.regions
[0].size
= memory_size
;
164 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
166 static void __init
mmu_init_hw(void)
169 * The Zone Protection Register (ZPR) defines how protection will
170 * be applied to every page which is a member of a given zone. At
171 * present, we utilize only two of the zones.
172 * The zone index bits (of ZSEL) in the PTE are used for software
173 * indicators, except the LSB. For user access, zone 1 is used,
174 * for kernel access, zone 0 is used. We set all but zone 1
175 * to zero, allowing only kernel access as indicated in the PTE.
176 * For zone 1, we set a 01 binary (a value of 10 will not work)
177 * to allow user access as indicated in the PTE. This also allows
178 * kernel access as indicated in the PTE.
180 __asm__
__volatile__ ("ori r11, r0, 0x10000000;" \
186 * MMU_init sets up the basic memory mappings for the kernel,
187 * including both RAM and possibly some I/O regions,
188 * and sets up the page tables and the MMU hardware ready to go.
191 /* called from head.S */
192 asmlinkage
void __init
mmu_init(void)
194 unsigned int kstart
, ksize
;
196 if (!memblock
.reserved
.cnt
) {
197 pr_emerg("Error memory count\n");
198 machine_restart(NULL
);
201 if ((u32
) memblock
.memory
.regions
[0].size
< 0x400000) {
202 pr_emerg("Memory must be greater than 4MB\n");
203 machine_restart(NULL
);
206 if ((u32
) memblock
.memory
.regions
[0].size
< kernel_tlb
) {
207 pr_emerg("Kernel size is greater than memory node\n");
208 machine_restart(NULL
);
211 /* Find main memory where the kernel is */
212 memory_start
= (u32
) memblock
.memory
.regions
[0].base
;
213 lowmem_size
= memory_size
= (u32
) memblock
.memory
.regions
[0].size
;
215 if (lowmem_size
> CONFIG_LOWMEM_SIZE
) {
216 lowmem_size
= CONFIG_LOWMEM_SIZE
;
217 #ifndef CONFIG_HIGHMEM
218 memory_size
= lowmem_size
;
222 mm_cmdline_setup(); /* FIXME parse args from command line - not used */
225 * Map out the kernel text/data/bss from the available physical
228 kstart
= __pa(CONFIG_KERNEL_START
); /* kernel start */
230 ksize
= PAGE_ALIGN(((u32
)_end
- (u32
)CONFIG_KERNEL_START
));
231 memblock_reserve(kstart
, ksize
);
233 #if defined(CONFIG_BLK_DEV_INITRD)
234 /* Remove the init RAM disk from the available memory. */
237 size
= initrd_end
- initrd_start
;
238 memblock_reserve(__virt_to_phys(initrd_start
), size
);
240 #endif /* CONFIG_BLK_DEV_INITRD */
242 /* Initialize the MMU hardware */
245 /* Map in all of RAM starting at CONFIG_KERNEL_START */
248 /* Extend vmalloc and ioremap area as big as possible */
249 #ifdef CONFIG_HIGHMEM
250 ioremap_base
= ioremap_bot
= PKMAP_BASE
;
252 ioremap_base
= ioremap_bot
= FIXADDR_START
;
255 /* Initialize the context management stuff */
258 /* Shortly after that, the entire linear mapping will be available */
259 /* This will also cause that unflatten device tree will be allocated
260 * inside 768MB limit */
261 memblock_set_current_limit(memory_start
+ lowmem_size
- 1);
265 /* CMA initialization */
266 dma_contiguous_reserve(memory_start
+ lowmem_size
- 1);
269 /* This is only called until mem_init is done. */
270 void __init
*early_get_page(void)
273 * Mem start + kernel_tlb -> here is limit
274 * because of mem mapping from head.S
276 return memblock_alloc_try_nid_raw(PAGE_SIZE
, PAGE_SIZE
,
277 MEMBLOCK_LOW_LIMIT
, memory_start
+ kernel_tlb
,
281 void * __ref
zalloc_maybe_bootmem(size_t size
, gfp_t mask
)
286 p
= kzalloc(size
, mask
);
288 p
= memblock_alloc(size
, SMP_CACHE_BYTES
);
290 panic("%s: Failed to allocate %zu bytes\n",