Linux 6.13-rc4
[linux.git] / arch / sh / mm / init.c
blob2a88b0c9e70f468b127afda31b98e3a55c39c584
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/sh/mm/init.c
5 * Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2002 - 2011 Paul Mundt
8 * Based on linux/arch/i386/mm/init.c:
9 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/gfp.h>
15 #include <linux/memblock.h>
16 #include <linux/proc_fs.h>
17 #include <linux/pagemap.h>
18 #include <linux/percpu.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/export.h>
22 #include <asm/mmu_context.h>
23 #include <asm/mmzone.h>
24 #include <asm/kexec.h>
25 #include <asm/tlb.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/cache.h>
30 #include <asm/pgalloc.h>
31 #include <linux/sizes.h>
32 #include "ioremap.h"
34 pgd_t swapper_pg_dir[PTRS_PER_PGD];
36 void __init generic_mem_init(void)
38 memblock_add(__MEMORY_START, __MEMORY_SIZE);
41 void __init __weak plat_mem_setup(void)
43 /* Nothing to see here, move along. */
46 #ifdef CONFIG_MMU
47 static pte_t *__get_pte_phys(unsigned long addr)
49 pgd_t *pgd;
50 p4d_t *p4d;
51 pud_t *pud;
52 pmd_t *pmd;
54 pgd = pgd_offset_k(addr);
55 if (pgd_none(*pgd)) {
56 pgd_ERROR(*pgd);
57 return NULL;
60 p4d = p4d_alloc(NULL, pgd, addr);
61 if (unlikely(!p4d)) {
62 p4d_ERROR(*p4d);
63 return NULL;
66 pud = pud_alloc(NULL, p4d, addr);
67 if (unlikely(!pud)) {
68 pud_ERROR(*pud);
69 return NULL;
72 pmd = pmd_alloc(NULL, pud, addr);
73 if (unlikely(!pmd)) {
74 pmd_ERROR(*pmd);
75 return NULL;
78 return pte_offset_kernel(pmd, addr);
81 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
83 pte_t *pte;
85 pte = __get_pte_phys(addr);
86 if (!pte_none(*pte)) {
87 pte_ERROR(*pte);
88 return;
91 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
92 local_flush_tlb_one(get_asid(), addr);
94 if (pgprot_val(prot) & _PAGE_WIRED)
95 tlb_wire_entry(NULL, addr, *pte);
98 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
100 pte_t *pte;
102 pte = __get_pte_phys(addr);
104 if (pgprot_val(prot) & _PAGE_WIRED)
105 tlb_unwire_entry();
107 set_pte(pte, pfn_pte(0, __pgprot(0)));
108 local_flush_tlb_one(get_asid(), addr);
111 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
113 unsigned long address = __fix_to_virt(idx);
115 if (idx >= __end_of_fixed_addresses) {
116 BUG();
117 return;
120 set_pte_phys(address, phys, prot);
123 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
125 unsigned long address = __fix_to_virt(idx);
127 if (idx >= __end_of_fixed_addresses) {
128 BUG();
129 return;
132 clear_pte_phys(address, prot);
135 static pmd_t * __init one_md_table_init(pud_t *pud)
137 if (pud_none(*pud)) {
138 pmd_t *pmd;
140 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
141 if (!pmd)
142 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
143 __func__, PAGE_SIZE, PAGE_SIZE);
144 pud_populate(&init_mm, pud, pmd);
145 BUG_ON(pmd != pmd_offset(pud, 0));
148 return pmd_offset(pud, 0);
151 static pte_t * __init one_page_table_init(pmd_t *pmd)
153 if (pmd_none(*pmd)) {
154 pte_t *pte;
156 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
157 if (!pte)
158 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
159 __func__, PAGE_SIZE, PAGE_SIZE);
160 pmd_populate_kernel(&init_mm, pmd, pte);
161 BUG_ON(pte != pte_offset_kernel(pmd, 0));
164 return pte_offset_kernel(pmd, 0);
167 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
168 unsigned long vaddr, pte_t *lastpte)
170 return pte;
173 void __init page_table_range_init(unsigned long start, unsigned long end,
174 pgd_t *pgd_base)
176 pgd_t *pgd;
177 pud_t *pud;
178 pmd_t *pmd;
179 pte_t *pte = NULL;
180 int i, j, k;
181 unsigned long vaddr;
183 vaddr = start;
184 i = pgd_index(vaddr);
185 j = pud_index(vaddr);
186 k = pmd_index(vaddr);
187 pgd = pgd_base + i;
189 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
190 pud = (pud_t *)pgd;
191 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
192 pmd = one_md_table_init(pud);
193 #ifndef __PAGETABLE_PMD_FOLDED
194 pmd += k;
195 #endif
196 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
197 pte = page_table_kmap_check(one_page_table_init(pmd),
198 pmd, vaddr, pte);
199 vaddr += PMD_SIZE;
201 k = 0;
203 j = 0;
206 #endif /* CONFIG_MMU */
208 void __init allocate_pgdat(unsigned int nid)
210 unsigned long start_pfn, end_pfn;
212 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
214 #ifdef CONFIG_NUMA
215 alloc_node_data(nid);
216 #endif
218 NODE_DATA(nid)->node_start_pfn = start_pfn;
219 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
222 static void __init do_init_bootmem(void)
224 unsigned long start_pfn, end_pfn;
225 int i;
227 /* Add active regions with valid PFNs. */
228 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
229 __add_active_range(0, start_pfn, end_pfn);
231 /* All of system RAM sits in node 0 for the non-NUMA case */
232 allocate_pgdat(0);
233 node_set_online(0);
235 plat_mem_setup();
237 sparse_init();
240 static void __init early_reserve_mem(void)
242 unsigned long start_pfn;
243 u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
244 u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
247 * Partially used pages are not usable - thus
248 * we are rounding upwards:
250 start_pfn = PFN_UP(__pa(_end));
253 * Reserve the kernel text and Reserve the bootmem bitmap. We do
254 * this in two steps (first step was init_bootmem()), because
255 * this catches the (definitely buggy) case of us accidentally
256 * initializing the bootmem allocator with an invalid RAM area.
258 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
261 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
263 if (CONFIG_ZERO_PAGE_OFFSET != 0)
264 memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
267 * Handle additional early reservations
269 check_for_initrd();
270 reserve_crashkernel();
273 void __init paging_init(void)
275 unsigned long max_zone_pfns[MAX_NR_ZONES];
276 unsigned long vaddr, end;
278 sh_mv.mv_mem_init();
280 early_reserve_mem();
283 * Once the early reservations are out of the way, give the
284 * platforms a chance to kick out some memory.
286 if (sh_mv.mv_mem_reserve)
287 sh_mv.mv_mem_reserve();
289 memblock_enforce_memory_limit(memory_limit);
290 memblock_allow_resize();
292 memblock_dump_all();
295 * Determine low and high memory ranges:
297 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
298 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
299 set_max_mapnr(max_low_pfn - min_low_pfn);
301 nodes_clear(node_online_map);
303 memory_start = (unsigned long)__va(__MEMORY_START);
304 memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
306 uncached_init();
307 pmb_init();
308 do_init_bootmem();
309 ioremap_fixed_init();
311 /* We don't need to map the kernel through the TLB, as
312 * it is permanatly mapped using P1. So clear the
313 * entire pgd. */
314 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
316 /* Set an initial value for the MMU.TTB so we don't have to
317 * check for a null value. */
318 set_TTB(swapper_pg_dir);
321 * Populate the relevant portions of swapper_pg_dir so that
322 * we can use the fixmap entries without calling kmalloc.
323 * pte's will be filled in by __set_fixmap().
325 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
326 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
327 page_table_range_init(vaddr, end, swapper_pg_dir);
329 kmap_coherent_init();
331 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
332 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
333 free_area_init(max_zone_pfns);
336 unsigned int mem_init_done = 0;
338 void __init mem_init(void)
340 pg_data_t *pgdat;
342 high_memory = NULL;
343 for_each_online_pgdat(pgdat)
344 high_memory = max_t(void *, high_memory,
345 __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
347 memblock_free_all();
349 /* Set this up early, so we can take care of the zero page */
350 cpu_cache_init();
352 /* clear the zero-page */
353 memset(empty_zero_page, 0, PAGE_SIZE);
354 __flush_wback_region(empty_zero_page, PAGE_SIZE);
356 vsyscall_init();
358 pr_info("virtual kernel memory layout:\n"
359 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
360 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
361 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
362 #ifdef CONFIG_UNCACHED_MAPPING
363 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
364 #endif
365 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
366 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
367 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
368 FIXADDR_START, FIXADDR_TOP,
369 (FIXADDR_TOP - FIXADDR_START) >> 10,
371 (unsigned long)VMALLOC_START, VMALLOC_END,
372 (VMALLOC_END - VMALLOC_START) >> 20,
374 (unsigned long)memory_start, (unsigned long)high_memory,
375 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
377 #ifdef CONFIG_UNCACHED_MAPPING
378 uncached_start, uncached_end, uncached_size >> 20,
379 #endif
381 (unsigned long)&__init_begin, (unsigned long)&__init_end,
382 ((unsigned long)&__init_end -
383 (unsigned long)&__init_begin) >> 10,
385 (unsigned long)&_etext, (unsigned long)&_edata,
386 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
388 (unsigned long)&_text, (unsigned long)&_etext,
389 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
391 mem_init_done = 1;