treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / nds32 / mm / init.c
blob0be3833f6814e3f5f8e5caf8e829ddd96e64b050
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 1995-2005 Russell King
3 // Copyright (C) 2012 ARM Ltd.
4 // Copyright (C) 2013-2017 Andes Technology Corporation
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
8 #include <linux/swap.h>
9 #include <linux/init.h>
10 #include <linux/memblock.h>
11 #include <linux/mman.h>
12 #include <linux/nodemask.h>
13 #include <linux/initrd.h>
14 #include <linux/highmem.h>
16 #include <asm/sections.h>
17 #include <asm/setup.h>
18 #include <asm/tlb.h>
19 #include <asm/page.h>
21 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
22 DEFINE_SPINLOCK(anon_alias_lock);
23 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
26 * empty_zero_page is a special page that is used for
27 * zero-initialized data and COW.
29 struct page *empty_zero_page;
30 EXPORT_SYMBOL(empty_zero_page);
32 static void __init zone_sizes_init(void)
34 unsigned long zones_size[MAX_NR_ZONES];
36 /* Clear the zone sizes */
37 memset(zones_size, 0, sizeof(zones_size));
39 zones_size[ZONE_NORMAL] = max_low_pfn;
40 #ifdef CONFIG_HIGHMEM
41 zones_size[ZONE_HIGHMEM] = max_pfn;
42 #endif
43 free_area_init(zones_size);
48 * Map all physical memory under high_memory into kernel's address space.
50 * This is explicitly coded for two-level page tables, so if you need
51 * something else then this needs to change.
53 static void __init map_ram(void)
55 unsigned long v, p, e;
56 pgd_t *pge;
57 p4d_t *p4e;
58 pud_t *pue;
59 pmd_t *pme;
60 pte_t *pte;
61 /* These mark extents of read-only kernel pages...
62 * ...from vmlinux.lds.S
65 p = (u32) memblock_start_of_DRAM() & PAGE_MASK;
66 e = min((u32) memblock_end_of_DRAM(), (u32) __pa(high_memory));
68 v = (u32) __va(p);
69 pge = pgd_offset_k(v);
71 while (p < e) {
72 int j;
73 p4e = p4d_offset(pge, v);
74 pue = pud_offset(p4e, v);
75 pme = pmd_offset(pue, v);
77 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
78 panic("%s: Kernel hardcoded for "
79 "two-level page tables", __func__);
82 /* Alloc one page for holding PTE's... */
83 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
84 if (!pte)
85 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
86 __func__, PAGE_SIZE, PAGE_SIZE);
87 set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
89 /* Fill the newly allocated page with PTE'S */
90 for (j = 0; p < e && j < PTRS_PER_PTE;
91 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
92 /* Create mapping between p and v. */
93 /* TODO: more fine grant for page access permission */
94 set_pte(pte, __pte(p + pgprot_val(PAGE_KERNEL)));
97 pge++;
100 static pmd_t *fixmap_pmd_p;
101 static void __init fixedrange_init(void)
103 unsigned long vaddr;
104 pgd_t *pgd;
105 p4d_t *p4d;
106 pud_t *pud;
107 pmd_t *pmd;
108 #ifdef CONFIG_HIGHMEM
109 pte_t *pte;
110 #endif /* CONFIG_HIGHMEM */
113 * Fixed mappings:
115 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
116 pgd = swapper_pg_dir + pgd_index(vaddr);
117 p4d = p4d_offset(pgd, vaddr);
118 pud = pud_offset(p4d, vaddr);
119 pmd = pmd_offset(pud, vaddr);
120 fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
121 if (!fixmap_pmd_p)
122 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
123 __func__, PAGE_SIZE, PAGE_SIZE);
124 set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
126 #ifdef CONFIG_HIGHMEM
128 * Permanent kmaps:
130 vaddr = PKMAP_BASE;
132 pgd = swapper_pg_dir + pgd_index(vaddr);
133 p4d = p4d_offset(pgd, vaddr);
134 pud = pud_offset(p4d, vaddr);
135 pmd = pmd_offset(pud, vaddr);
136 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
137 if (!pte)
138 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
139 __func__, PAGE_SIZE, PAGE_SIZE);
140 set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
141 pkmap_page_table = pte;
142 #endif /* CONFIG_HIGHMEM */
146 * paging_init() sets up the page tables, initialises the zone memory
147 * maps, and sets up the zero page, bad page and bad page tables.
149 void __init paging_init(void)
151 int i;
152 void *zero_page;
154 pr_info("Setting up paging and PTEs.\n");
155 /* clear out the init_mm.pgd that will contain the kernel's mappings */
156 for (i = 0; i < PTRS_PER_PGD; i++)
157 swapper_pg_dir[i] = __pgd(1);
159 map_ram();
161 fixedrange_init();
163 /* allocate space for empty_zero_page */
164 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
165 if (!zero_page)
166 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
167 __func__, PAGE_SIZE, PAGE_SIZE);
168 zone_sizes_init();
170 empty_zero_page = virt_to_page(zero_page);
171 flush_dcache_page(empty_zero_page);
174 static inline void __init free_highmem(void)
176 #ifdef CONFIG_HIGHMEM
177 unsigned long pfn;
178 for (pfn = PFN_UP(__pa(high_memory)); pfn < max_pfn; pfn++) {
179 phys_addr_t paddr = (phys_addr_t) pfn << PAGE_SHIFT;
180 if (!memblock_is_reserved(paddr))
181 free_highmem_page(pfn_to_page(pfn));
183 #endif
186 static void __init set_max_mapnr_init(void)
188 max_mapnr = max_pfn;
192 * mem_init() marks the free areas in the mem_map and tells us how much
193 * memory is free. This is done after various parts of the system have
194 * claimed their memory after the kernel image.
196 void __init mem_init(void)
198 phys_addr_t memory_start = memblock_start_of_DRAM();
199 BUG_ON(!mem_map);
200 set_max_mapnr_init();
202 free_highmem();
204 /* this will put all low memory onto the freelists */
205 memblock_free_all();
206 mem_init_print_info(NULL);
208 pr_info("virtual kernel memory layout:\n"
209 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
210 #ifdef CONFIG_HIGHMEM
211 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
212 #endif
213 " consist : 0x%08lx - 0x%08lx (%4ld MB)\n"
214 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
215 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
216 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
217 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
218 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
219 FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10,
220 #ifdef CONFIG_HIGHMEM
221 PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
222 (LAST_PKMAP * PAGE_SIZE) >> 10,
223 #endif
224 CONSISTENT_BASE, CONSISTENT_END,
225 ((CONSISTENT_END) - (CONSISTENT_BASE)) >> 20, VMALLOC_START,
226 (unsigned long)VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20,
227 (unsigned long)__va(memory_start), (unsigned long)high_memory,
228 ((unsigned long)high_memory -
229 (unsigned long)__va(memory_start)) >> 20,
230 (unsigned long)&__init_begin, (unsigned long)&__init_end,
231 ((unsigned long)&__init_end -
232 (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext,
233 (unsigned long)&_edata,
234 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
235 (unsigned long)&_text, (unsigned long)&_etext,
236 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
239 * Check boundaries twice: Some fundamental inconsistencies can
240 * be detected at build time already.
242 #ifdef CONFIG_HIGHMEM
243 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
244 BUILD_BUG_ON((CONSISTENT_END) > PKMAP_BASE);
245 #endif
246 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
247 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
249 #ifdef CONFIG_HIGHMEM
250 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
251 BUG_ON(CONSISTENT_END > PKMAP_BASE);
252 #endif
253 BUG_ON(VMALLOC_END > CONSISTENT_BASE);
254 BUG_ON(VMALLOC_START >= VMALLOC_END);
255 BUG_ON((unsigned long)high_memory > VMALLOC_START);
257 return;
260 void __set_fixmap(enum fixed_addresses idx,
261 phys_addr_t phys, pgprot_t flags)
263 unsigned long addr = __fix_to_virt(idx);
264 pte_t *pte;
266 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
268 pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];
270 if (pgprot_val(flags)) {
271 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
272 } else {
273 pte_clear(&init_mm, addr, pte);
274 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);