clk: samsung: Add bus clock for GPU/G3D on Exynos4412
[linux/fpc-iii.git] / arch / sh / mm / init.c
blob5aeb4d7099a16a21cb07609e3839856bd001a330
1 /*
2 * linux/arch/sh/mm/init.c
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2002 - 2011 Paul Mundt
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
9 */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/memblock.h>
15 #include <linux/proc_fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/percpu.h>
18 #include <linux/io.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/export.h>
21 #include <asm/mmu_context.h>
22 #include <asm/mmzone.h>
23 #include <asm/kexec.h>
24 #include <asm/tlb.h>
25 #include <asm/cacheflush.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/cache.h>
29 #include <linux/sizes.h>
31 pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 void __init generic_mem_init(void)
35 memblock_add(__MEMORY_START, __MEMORY_SIZE);
38 void __init __weak plat_mem_setup(void)
40 /* Nothing to see here, move along. */
43 #ifdef CONFIG_MMU
44 static pte_t *__get_pte_phys(unsigned long addr)
46 pgd_t *pgd;
47 pud_t *pud;
48 pmd_t *pmd;
50 pgd = pgd_offset_k(addr);
51 if (pgd_none(*pgd)) {
52 pgd_ERROR(*pgd);
53 return NULL;
56 pud = pud_alloc(NULL, pgd, addr);
57 if (unlikely(!pud)) {
58 pud_ERROR(*pud);
59 return NULL;
62 pmd = pmd_alloc(NULL, pud, addr);
63 if (unlikely(!pmd)) {
64 pmd_ERROR(*pmd);
65 return NULL;
68 return pte_offset_kernel(pmd, addr);
71 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
73 pte_t *pte;
75 pte = __get_pte_phys(addr);
76 if (!pte_none(*pte)) {
77 pte_ERROR(*pte);
78 return;
81 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
82 local_flush_tlb_one(get_asid(), addr);
84 if (pgprot_val(prot) & _PAGE_WIRED)
85 tlb_wire_entry(NULL, addr, *pte);
88 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
90 pte_t *pte;
92 pte = __get_pte_phys(addr);
94 if (pgprot_val(prot) & _PAGE_WIRED)
95 tlb_unwire_entry();
97 set_pte(pte, pfn_pte(0, __pgprot(0)));
98 local_flush_tlb_one(get_asid(), addr);
101 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
103 unsigned long address = __fix_to_virt(idx);
105 if (idx >= __end_of_fixed_addresses) {
106 BUG();
107 return;
110 set_pte_phys(address, phys, prot);
113 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
115 unsigned long address = __fix_to_virt(idx);
117 if (idx >= __end_of_fixed_addresses) {
118 BUG();
119 return;
122 clear_pte_phys(address, prot);
125 static pmd_t * __init one_md_table_init(pud_t *pud)
127 if (pud_none(*pud)) {
128 pmd_t *pmd;
130 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
131 if (!pmd)
132 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
133 __func__, PAGE_SIZE, PAGE_SIZE);
134 pud_populate(&init_mm, pud, pmd);
135 BUG_ON(pmd != pmd_offset(pud, 0));
138 return pmd_offset(pud, 0);
141 static pte_t * __init one_page_table_init(pmd_t *pmd)
143 if (pmd_none(*pmd)) {
144 pte_t *pte;
146 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
147 if (!pte)
148 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
149 __func__, PAGE_SIZE, PAGE_SIZE);
150 pmd_populate_kernel(&init_mm, pmd, pte);
151 BUG_ON(pte != pte_offset_kernel(pmd, 0));
154 return pte_offset_kernel(pmd, 0);
157 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
158 unsigned long vaddr, pte_t *lastpte)
160 return pte;
163 void __init page_table_range_init(unsigned long start, unsigned long end,
164 pgd_t *pgd_base)
166 pgd_t *pgd;
167 pud_t *pud;
168 pmd_t *pmd;
169 pte_t *pte = NULL;
170 int i, j, k;
171 unsigned long vaddr;
173 vaddr = start;
174 i = __pgd_offset(vaddr);
175 j = __pud_offset(vaddr);
176 k = __pmd_offset(vaddr);
177 pgd = pgd_base + i;
179 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
180 pud = (pud_t *)pgd;
181 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
182 pmd = one_md_table_init(pud);
183 #ifndef __PAGETABLE_PMD_FOLDED
184 pmd += k;
185 #endif
186 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
187 pte = page_table_kmap_check(one_page_table_init(pmd),
188 pmd, vaddr, pte);
189 vaddr += PMD_SIZE;
191 k = 0;
193 j = 0;
196 #endif /* CONFIG_MMU */
198 void __init allocate_pgdat(unsigned int nid)
200 unsigned long start_pfn, end_pfn;
202 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
204 #ifdef CONFIG_NEED_MULTIPLE_NODES
205 NODE_DATA(nid) = memblock_alloc_try_nid(
206 sizeof(struct pglist_data),
207 SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
208 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
209 if (!NODE_DATA(nid))
210 panic("Can't allocate pgdat for node %d\n", nid);
211 #endif
213 NODE_DATA(nid)->node_start_pfn = start_pfn;
214 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
217 static void __init do_init_bootmem(void)
219 struct memblock_region *reg;
221 /* Add active regions with valid PFNs. */
222 for_each_memblock(memory, reg) {
223 unsigned long start_pfn, end_pfn;
224 start_pfn = memblock_region_memory_base_pfn(reg);
225 end_pfn = memblock_region_memory_end_pfn(reg);
226 __add_active_range(0, start_pfn, end_pfn);
229 /* All of system RAM sits in node 0 for the non-NUMA case */
230 allocate_pgdat(0);
231 node_set_online(0);
233 plat_mem_setup();
235 for_each_memblock(memory, reg) {
236 int nid = memblock_get_region_node(reg);
238 memory_present(nid, memblock_region_memory_base_pfn(reg),
239 memblock_region_memory_end_pfn(reg));
241 sparse_init();
244 static void __init early_reserve_mem(void)
246 unsigned long start_pfn;
247 u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
248 u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
251 * Partially used pages are not usable - thus
252 * we are rounding upwards:
254 start_pfn = PFN_UP(__pa(_end));
257 * Reserve the kernel text and Reserve the bootmem bitmap. We do
258 * this in two steps (first step was init_bootmem()), because
259 * this catches the (definitely buggy) case of us accidentally
260 * initializing the bootmem allocator with an invalid RAM area.
262 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
265 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
267 if (CONFIG_ZERO_PAGE_OFFSET != 0)
268 memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
271 * Handle additional early reservations
273 check_for_initrd();
274 reserve_crashkernel();
277 void __init paging_init(void)
279 unsigned long max_zone_pfns[MAX_NR_ZONES];
280 unsigned long vaddr, end;
282 sh_mv.mv_mem_init();
284 early_reserve_mem();
287 * Once the early reservations are out of the way, give the
288 * platforms a chance to kick out some memory.
290 if (sh_mv.mv_mem_reserve)
291 sh_mv.mv_mem_reserve();
293 memblock_enforce_memory_limit(memory_limit);
294 memblock_allow_resize();
296 memblock_dump_all();
299 * Determine low and high memory ranges:
301 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
302 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
304 nodes_clear(node_online_map);
306 memory_start = (unsigned long)__va(__MEMORY_START);
307 memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
309 uncached_init();
310 pmb_init();
311 do_init_bootmem();
312 ioremap_fixed_init();
314 /* We don't need to map the kernel through the TLB, as
315 * it is permanatly mapped using P1. So clear the
316 * entire pgd. */
317 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
319 /* Set an initial value for the MMU.TTB so we don't have to
320 * check for a null value. */
321 set_TTB(swapper_pg_dir);
324 * Populate the relevant portions of swapper_pg_dir so that
325 * we can use the fixmap entries without calling kmalloc.
326 * pte's will be filled in by __set_fixmap().
328 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
329 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
330 page_table_range_init(vaddr, end, swapper_pg_dir);
332 kmap_coherent_init();
334 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
335 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
336 free_area_init_nodes(max_zone_pfns);
339 unsigned int mem_init_done = 0;
341 void __init mem_init(void)
343 pg_data_t *pgdat;
345 high_memory = NULL;
346 for_each_online_pgdat(pgdat)
347 high_memory = max_t(void *, high_memory,
348 __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
350 memblock_free_all();
352 /* Set this up early, so we can take care of the zero page */
353 cpu_cache_init();
355 /* clear the zero-page */
356 memset(empty_zero_page, 0, PAGE_SIZE);
357 __flush_wback_region(empty_zero_page, PAGE_SIZE);
359 vsyscall_init();
361 mem_init_print_info(NULL);
362 pr_info("virtual kernel memory layout:\n"
363 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
364 #ifdef CONFIG_HIGHMEM
365 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
366 #endif
367 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
368 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
369 #ifdef CONFIG_UNCACHED_MAPPING
370 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
371 #endif
372 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
373 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
374 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
375 FIXADDR_START, FIXADDR_TOP,
376 (FIXADDR_TOP - FIXADDR_START) >> 10,
378 #ifdef CONFIG_HIGHMEM
379 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
380 (LAST_PKMAP*PAGE_SIZE) >> 10,
381 #endif
383 (unsigned long)VMALLOC_START, VMALLOC_END,
384 (VMALLOC_END - VMALLOC_START) >> 20,
386 (unsigned long)memory_start, (unsigned long)high_memory,
387 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
389 #ifdef CONFIG_UNCACHED_MAPPING
390 uncached_start, uncached_end, uncached_size >> 20,
391 #endif
393 (unsigned long)&__init_begin, (unsigned long)&__init_end,
394 ((unsigned long)&__init_end -
395 (unsigned long)&__init_begin) >> 10,
397 (unsigned long)&_etext, (unsigned long)&_edata,
398 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
400 (unsigned long)&_text, (unsigned long)&_etext,
401 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
403 mem_init_done = 1;
406 #ifdef CONFIG_MEMORY_HOTPLUG
407 int arch_add_memory(int nid, u64 start, u64 size,
408 struct mhp_restrictions *restrictions)
410 unsigned long start_pfn = PFN_DOWN(start);
411 unsigned long nr_pages = size >> PAGE_SHIFT;
412 int ret;
414 /* We only have ZONE_NORMAL, so this is easy.. */
415 ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
416 if (unlikely(ret))
417 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
419 return ret;
422 #ifdef CONFIG_NUMA
423 int memory_add_physaddr_to_nid(u64 addr)
425 /* Node 0 for now.. */
426 return 0;
428 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
429 #endif
431 #ifdef CONFIG_MEMORY_HOTREMOVE
432 void arch_remove_memory(int nid, u64 start, u64 size,
433 struct vmem_altmap *altmap)
435 unsigned long start_pfn = PFN_DOWN(start);
436 unsigned long nr_pages = size >> PAGE_SHIFT;
437 struct zone *zone;
439 zone = page_zone(pfn_to_page(start_pfn));
440 __remove_pages(zone, start_pfn, nr_pages, altmap);
442 #endif
443 #endif /* CONFIG_MEMORY_HOTPLUG */