x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / metag / mm / init.c
blob188d4d9fbed4d9418dcbb94e556d8a155814d16e
1 /*
2 * Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies
4 */
6 #include <linux/export.h>
7 #include <linux/mm.h>
8 #include <linux/swap.h>
9 #include <linux/init.h>
10 #include <linux/bootmem.h>
11 #include <linux/pagemap.h>
12 #include <linux/percpu.h>
13 #include <linux/memblock.h>
14 #include <linux/initrd.h>
15 #include <linux/sched/task.h>
17 #include <asm/setup.h>
18 #include <asm/page.h>
19 #include <asm/pgalloc.h>
20 #include <asm/mmu.h>
21 #include <asm/mmu_context.h>
22 #include <asm/sections.h>
23 #include <asm/tlb.h>
24 #include <asm/user_gateway.h>
25 #include <asm/mmzone.h>
26 #include <asm/fixmap.h>
28 unsigned long pfn_base;
29 EXPORT_SYMBOL(pfn_base);
31 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
33 unsigned long empty_zero_page;
34 EXPORT_SYMBOL(empty_zero_page);
36 extern char __user_gateway_start;
37 extern char __user_gateway_end;
39 void *gateway_page;
42 * Insert the gateway page into a set of page tables, creating the
43 * page tables if necessary.
45 static void insert_gateway_page(pgd_t *pgd, unsigned long address)
47 pud_t *pud;
48 pmd_t *pmd;
49 pte_t *pte;
51 BUG_ON(!pgd_present(*pgd));
53 pud = pud_offset(pgd, address);
54 BUG_ON(!pud_present(*pud));
56 pmd = pmd_offset(pud, address);
57 if (!pmd_present(*pmd)) {
58 pte = alloc_bootmem_pages(PAGE_SIZE);
59 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
62 pte = pte_offset_kernel(pmd, address);
63 set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
66 /* Alloc and map a page in a known location accessible to userspace. */
67 static void __init user_gateway_init(void)
69 unsigned long address = USER_GATEWAY_PAGE;
70 int offset = pgd_index(address);
71 pgd_t *pgd;
73 gateway_page = alloc_bootmem_pages(PAGE_SIZE);
75 pgd = swapper_pg_dir + offset;
76 insert_gateway_page(pgd, address);
78 #ifdef CONFIG_METAG_META12
80 * Insert the gateway page into our current page tables even
81 * though we've already inserted it into our reference page
82 * table (swapper_pg_dir). This is because with a META1 mmu we
83 * copy just the user address range and not the gateway page
84 * entry on context switch, see switch_mmu().
86 pgd = (pgd_t *)mmu_get_base() + offset;
87 insert_gateway_page(pgd, address);
88 #endif /* CONFIG_METAG_META12 */
90 BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE);
92 gateway_page += (address & ~PAGE_MASK);
94 memcpy(gateway_page, &__user_gateway_start,
95 &__user_gateway_end - &__user_gateway_start);
98 * We don't need to flush the TLB here, there should be no mapping
99 * present at boot for this address and only valid mappings are in
100 * the TLB (apart from on Meta 1.x, but those cached invalid
101 * mappings should be impossible to hit here).
103 * We don't flush the code cache here even though we have written
104 * code through the data cache and they may not be coherent. At
105 * this point we assume there is no stale data in the code cache
106 * for this address so there is no need to flush.
110 static void __init allocate_pgdat(unsigned int nid)
112 unsigned long start_pfn, end_pfn;
113 #ifdef CONFIG_NEED_MULTIPLE_NODES
114 unsigned long phys;
115 #endif
117 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
119 #ifdef CONFIG_NEED_MULTIPLE_NODES
120 phys = __memblock_alloc_base(sizeof(struct pglist_data),
121 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
122 /* Retry with all of system memory */
123 if (!phys)
124 phys = __memblock_alloc_base(sizeof(struct pglist_data),
125 SMP_CACHE_BYTES,
126 memblock_end_of_DRAM());
127 if (!phys)
128 panic("Can't allocate pgdat for node %d\n", nid);
130 NODE_DATA(nid) = __va(phys);
131 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
133 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
134 #endif
136 NODE_DATA(nid)->node_start_pfn = start_pfn;
137 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
140 static void __init bootmem_init_one_node(unsigned int nid)
142 unsigned long total_pages, paddr;
143 unsigned long end_pfn;
144 struct pglist_data *p;
146 p = NODE_DATA(nid);
148 /* Nothing to do.. */
149 if (!p->node_spanned_pages)
150 return;
152 end_pfn = pgdat_end_pfn(p);
153 #ifdef CONFIG_HIGHMEM
154 if (end_pfn > max_low_pfn)
155 end_pfn = max_low_pfn;
156 #endif
158 total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);
160 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
161 if (!paddr)
162 panic("Can't allocate bootmap for nid[%d]\n", nid);
164 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
166 free_bootmem_with_active_regions(nid, end_pfn);
169 * XXX Handle initial reservations for the system memory node
170 * only for the moment, we'll refactor this later for handling
171 * reservations in other nodes.
173 if (nid == 0) {
174 struct memblock_region *reg;
176 /* Reserve the sections we're already using. */
177 for_each_memblock(reserved, reg) {
178 unsigned long size = reg->size;
180 #ifdef CONFIG_HIGHMEM
181 /* ...but not highmem */
182 if (PFN_DOWN(reg->base) >= highstart_pfn)
183 continue;
185 if (PFN_UP(reg->base + size) > highstart_pfn)
186 size = (highstart_pfn - PFN_DOWN(reg->base))
187 << PAGE_SHIFT;
188 #endif
190 reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
194 sparse_memory_present_with_active_regions(nid);
197 static void __init do_init_bootmem(void)
199 struct memblock_region *reg;
200 int i;
202 /* Add active regions with valid PFNs. */
203 for_each_memblock(memory, reg) {
204 unsigned long start_pfn, end_pfn;
205 start_pfn = memblock_region_memory_base_pfn(reg);
206 end_pfn = memblock_region_memory_end_pfn(reg);
207 memblock_set_node(PFN_PHYS(start_pfn),
208 PFN_PHYS(end_pfn - start_pfn),
209 &memblock.memory, 0);
212 /* All of system RAM sits in node 0 for the non-NUMA case */
213 allocate_pgdat(0);
214 node_set_online(0);
216 soc_mem_setup();
218 for_each_online_node(i)
219 bootmem_init_one_node(i);
221 sparse_init();
224 extern char _heap_start[];
226 static void __init init_and_reserve_mem(void)
228 unsigned long start_pfn, heap_start;
229 u64 base = min_low_pfn << PAGE_SHIFT;
230 u64 size = (max_low_pfn << PAGE_SHIFT) - base;
232 heap_start = (unsigned long) &_heap_start;
234 memblock_add(base, size);
237 * Partially used pages are not usable - thus
238 * we are rounding upwards:
240 start_pfn = PFN_UP(__pa(heap_start));
243 * Reserve the kernel text.
245 memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base);
247 #ifdef CONFIG_HIGHMEM
249 * Add & reserve highmem, so page structures are initialised.
251 base = highstart_pfn << PAGE_SHIFT;
252 size = (highend_pfn << PAGE_SHIFT) - base;
253 if (size) {
254 memblock_add(base, size);
255 memblock_reserve(base, size);
257 #endif
260 #ifdef CONFIG_HIGHMEM
262 * Ensure we have allocated page tables in swapper_pg_dir for the
263 * fixed mappings range from 'start' to 'end'.
265 static void __init allocate_pgtables(unsigned long start, unsigned long end)
267 pgd_t *pgd;
268 pmd_t *pmd;
269 pte_t *pte;
270 int i, j;
271 unsigned long vaddr;
273 vaddr = start;
274 i = pgd_index(vaddr);
275 j = pmd_index(vaddr);
276 pgd = swapper_pg_dir + i;
278 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
279 pmd = (pmd_t *)pgd;
280 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
281 vaddr += PMD_SIZE;
283 if (!pmd_none(*pmd))
284 continue;
286 pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
287 pmd_populate_kernel(&init_mm, pmd, pte);
289 j = 0;
293 static void __init fixedrange_init(void)
295 unsigned long vaddr, end;
296 pgd_t *pgd;
297 pud_t *pud;
298 pmd_t *pmd;
299 pte_t *pte;
302 * Fixed mappings:
304 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
305 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
306 allocate_pgtables(vaddr, end);
309 * Permanent kmaps:
311 vaddr = PKMAP_BASE;
312 allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP);
314 pgd = swapper_pg_dir + pgd_index(vaddr);
315 pud = pud_offset(pgd, vaddr);
316 pmd = pmd_offset(pud, vaddr);
317 pte = pte_offset_kernel(pmd, vaddr);
318 pkmap_page_table = pte;
320 #endif /* CONFIG_HIGHMEM */
323 * paging_init() continues the virtual memory environment setup which
324 * was begun by the code in arch/metag/kernel/setup.c.
326 void __init paging_init(unsigned long mem_end)
328 unsigned long max_zone_pfns[MAX_NR_ZONES];
329 int nid;
331 init_and_reserve_mem();
333 memblock_allow_resize();
335 memblock_dump_all();
337 nodes_clear(node_online_map);
339 init_new_context(&init_task, &init_mm);
341 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
343 do_init_bootmem();
344 mmu_init(mem_end);
346 #ifdef CONFIG_HIGHMEM
347 fixedrange_init();
348 kmap_init();
349 #endif
351 /* Initialize the zero page to a bootmem page, already zeroed. */
352 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
354 user_gateway_init();
356 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
358 for_each_online_node(nid) {
359 pg_data_t *pgdat = NODE_DATA(nid);
360 unsigned long low, start_pfn;
362 start_pfn = pgdat->bdata->node_min_pfn;
363 low = pgdat->bdata->node_low_pfn;
365 if (max_zone_pfns[ZONE_NORMAL] < low)
366 max_zone_pfns[ZONE_NORMAL] = low;
368 #ifdef CONFIG_HIGHMEM
369 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
370 #endif
371 pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
372 nid, start_pfn, low);
375 free_area_init_nodes(max_zone_pfns);
378 void __init mem_init(void)
380 #ifdef CONFIG_HIGHMEM
381 unsigned long tmp;
384 * Explicitly reset zone->managed_pages because highmem pages are
385 * freed before calling free_all_bootmem();
387 reset_all_zones_managed_pages();
388 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
389 free_highmem_page(pfn_to_page(tmp));
390 #endif /* CONFIG_HIGHMEM */
392 free_all_bootmem();
393 mem_init_print_info(NULL);
396 void free_initmem(void)
398 free_initmem_default(POISON_FREE_INITMEM);
401 #ifdef CONFIG_BLK_DEV_INITRD
402 void free_initrd_mem(unsigned long start, unsigned long end)
404 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
405 "initrd");
407 #endif