2 * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2006 Atmark Techno, Inc.
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
10 #include <linux/dma-map-ops.h>
11 #include <linux/memblock.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h> /* mem_init */
15 #include <linux/initrd.h>
16 #include <linux/of_fdt.h>
17 #include <linux/pagemap.h>
18 #include <linux/pfn.h>
19 #include <linux/slab.h>
20 #include <linux/swap.h>
21 #include <linux/export.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgalloc.h>
26 #include <asm/sections.h>
28 #include <asm/fixmap.h>
30 /* Use for MMU and noMMU because of PCI generic code */
36 * Initialize the bootmem system and give it all the memory we
39 unsigned long memory_start
;
40 EXPORT_SYMBOL(memory_start
);
41 unsigned long memory_size
;
42 EXPORT_SYMBOL(memory_size
);
43 unsigned long lowmem_size
;
45 EXPORT_SYMBOL(min_low_pfn
);
46 EXPORT_SYMBOL(max_low_pfn
);
49 static void __init
highmem_init(void)
51 pr_debug("%x\n", (u32
)PKMAP_BASE
);
52 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
53 pkmap_page_table
= virt_to_kpte(PKMAP_BASE
);
56 static void __meminit
highmem_setup(void)
60 for (pfn
= max_low_pfn
; pfn
< max_pfn
; ++pfn
) {
61 struct page
*page
= pfn_to_page(pfn
);
63 /* FIXME not sure about */
64 if (!memblock_is_reserved(pfn
<< PAGE_SHIFT
))
65 free_highmem_page(page
);
68 #endif /* CONFIG_HIGHMEM */
71 * paging_init() sets up the page tables - in fact we've already done this.
73 static void __init
paging_init(void)
75 unsigned long zones_size
[MAX_NR_ZONES
];
79 for (idx
= 0; idx
< __end_of_fixed_addresses
; idx
++)
82 /* Clean every zones */
83 memset(zones_size
, 0, sizeof(zones_size
));
88 zones_size
[ZONE_DMA
] = max_low_pfn
;
89 zones_size
[ZONE_HIGHMEM
] = max_pfn
;
91 zones_size
[ZONE_DMA
] = max_pfn
;
94 /* We don't have holes in memory map */
95 free_area_init(zones_size
);
98 void __init
setup_memory(void)
102 * start: base phys address of kernel - page align
103 * end: base phys address of kernel - page align
105 * min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
107 * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
110 /* memory start is from the kernel end (aligned) to higher addr */
111 min_low_pfn
= memory_start
>> PAGE_SHIFT
; /* minimum for allocation */
112 /* RAM is assumed contiguous */
113 max_mapnr
= memory_size
>> PAGE_SHIFT
;
114 max_low_pfn
= ((u64
)memory_start
+ (u64
)lowmem_size
) >> PAGE_SHIFT
;
115 max_pfn
= ((u64
)memory_start
+ (u64
)memory_size
) >> PAGE_SHIFT
;
117 pr_info("%s: max_mapnr: %#lx\n", __func__
, max_mapnr
);
118 pr_info("%s: min_low_pfn: %#lx\n", __func__
, min_low_pfn
);
119 pr_info("%s: max_low_pfn: %#lx\n", __func__
, max_low_pfn
);
120 pr_info("%s: max_pfn: %#lx\n", __func__
, max_pfn
);
125 void __init
mem_init(void)
127 high_memory
= (void *)__va(memory_start
+ lowmem_size
- 1);
129 /* this will put all memory onto the freelists */
131 #ifdef CONFIG_HIGHMEM
138 int page_is_ram(unsigned long pfn
)
140 return pfn
< max_low_pfn
;
144 * Check for command-line options that affect what MMU_init will do.
146 static void mm_cmdline_setup(void)
148 unsigned long maxmem
= 0;
151 /* Look for mem= option on command line */
152 p
= strstr(cmd_line
, "mem=");
155 maxmem
= memparse(p
, &p
);
156 if (maxmem
&& memory_size
> maxmem
) {
157 memory_size
= maxmem
;
158 memblock
.memory
.regions
[0].size
= memory_size
;
164 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
166 static void __init
mmu_init_hw(void)
169 * The Zone Protection Register (ZPR) defines how protection will
170 * be applied to every page which is a member of a given zone. At
171 * present, we utilize only two of the zones.
172 * The zone index bits (of ZSEL) in the PTE are used for software
173 * indicators, except the LSB. For user access, zone 1 is used,
174 * for kernel access, zone 0 is used. We set all but zone 1
175 * to zero, allowing only kernel access as indicated in the PTE.
176 * For zone 1, we set a 01 binary (a value of 10 will not work)
177 * to allow user access as indicated in the PTE. This also allows
178 * kernel access as indicated in the PTE.
180 __asm__
__volatile__ ("ori r11, r0, 0x10000000;" \
186 * MMU_init sets up the basic memory mappings for the kernel,
187 * including both RAM and possibly some I/O regions,
188 * and sets up the page tables and the MMU hardware ready to go.
191 /* called from head.S */
192 asmlinkage
void __init
mmu_init(void)
194 unsigned int kstart
, ksize
;
196 if ((u32
) memblock
.memory
.regions
[0].size
< 0x400000) {
197 pr_emerg("Memory must be greater than 4MB\n");
198 machine_restart(NULL
);
201 if ((u32
) memblock
.memory
.regions
[0].size
< kernel_tlb
) {
202 pr_emerg("Kernel size is greater than memory node\n");
203 machine_restart(NULL
);
206 /* Find main memory where the kernel is */
207 memory_start
= (u32
) memblock
.memory
.regions
[0].base
;
208 lowmem_size
= memory_size
= (u32
) memblock
.memory
.regions
[0].size
;
210 if (lowmem_size
> CONFIG_LOWMEM_SIZE
) {
211 lowmem_size
= CONFIG_LOWMEM_SIZE
;
212 #ifndef CONFIG_HIGHMEM
213 memory_size
= lowmem_size
;
217 mm_cmdline_setup(); /* FIXME parse args from command line - not used */
220 * Map out the kernel text/data/bss from the available physical
223 kstart
= __pa(CONFIG_KERNEL_START
); /* kernel start */
225 ksize
= PAGE_ALIGN(((u32
)_end
- (u32
)CONFIG_KERNEL_START
));
226 memblock_reserve(kstart
, ksize
);
228 #if defined(CONFIG_BLK_DEV_INITRD)
229 /* Remove the init RAM disk from the available memory. */
232 size
= initrd_end
- initrd_start
;
233 memblock_reserve(__virt_to_phys(initrd_start
), size
);
235 #endif /* CONFIG_BLK_DEV_INITRD */
237 /* Initialize the MMU hardware */
240 /* Map in all of RAM starting at CONFIG_KERNEL_START */
243 /* Extend vmalloc and ioremap area as big as possible */
244 #ifdef CONFIG_HIGHMEM
245 ioremap_base
= ioremap_bot
= PKMAP_BASE
;
247 ioremap_base
= ioremap_bot
= FIXADDR_START
;
250 /* Initialize the context management stuff */
253 /* Shortly after that, the entire linear mapping will be available */
254 /* This will also cause that unflatten device tree will be allocated
255 * inside 768MB limit */
256 memblock_set_current_limit(memory_start
+ lowmem_size
- 1);
260 early_init_fdt_scan_reserved_mem();
262 /* CMA initialization */
263 dma_contiguous_reserve(memory_start
+ lowmem_size
- 1);
268 static const pgprot_t protection_map
[16] = {
269 [VM_NONE
] = PAGE_NONE
,
270 [VM_READ
] = PAGE_READONLY_X
,
271 [VM_WRITE
] = PAGE_COPY
,
272 [VM_WRITE
| VM_READ
] = PAGE_COPY_X
,
273 [VM_EXEC
] = PAGE_READONLY
,
274 [VM_EXEC
| VM_READ
] = PAGE_READONLY_X
,
275 [VM_EXEC
| VM_WRITE
] = PAGE_COPY
,
276 [VM_EXEC
| VM_WRITE
| VM_READ
] = PAGE_COPY_X
,
277 [VM_SHARED
] = PAGE_NONE
,
278 [VM_SHARED
| VM_READ
] = PAGE_READONLY_X
,
279 [VM_SHARED
| VM_WRITE
] = PAGE_SHARED
,
280 [VM_SHARED
| VM_WRITE
| VM_READ
] = PAGE_SHARED_X
,
281 [VM_SHARED
| VM_EXEC
] = PAGE_READONLY
,
282 [VM_SHARED
| VM_EXEC
| VM_READ
] = PAGE_READONLY_X
,
283 [VM_SHARED
| VM_EXEC
| VM_WRITE
] = PAGE_SHARED
,
284 [VM_SHARED
| VM_EXEC
| VM_WRITE
| VM_READ
] = PAGE_SHARED_X
286 DECLARE_VM_GET_PAGE_PROT