cgroup: Limit event generation frequency
[linux/fpc-iii.git] / arch / nds32 / mm / init.c
blob93ee0160720bfd2a44f23da640328dc2d9068400
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 1995-2005 Russell King
3 // Copyright (C) 2012 ARM Ltd.
4 // Copyright (C) 2013-2017 Andes Technology Corporation
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
8 #include <linux/swap.h>
9 #include <linux/init.h>
10 #include <linux/bootmem.h>
11 #include <linux/mman.h>
12 #include <linux/nodemask.h>
13 #include <linux/initrd.h>
14 #include <linux/highmem.h>
15 #include <linux/memblock.h>
17 #include <asm/sections.h>
18 #include <asm/setup.h>
19 #include <asm/tlb.h>
20 #include <asm/page.h>
22 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
23 DEFINE_SPINLOCK(anon_alias_lock);
24 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
25 extern unsigned long phys_initrd_start;
26 extern unsigned long phys_initrd_size;
29 * empty_zero_page is a special page that is used for
30 * zero-initialized data and COW.
32 struct page *empty_zero_page;
34 static void __init zone_sizes_init(void)
36 unsigned long zones_size[MAX_NR_ZONES];
38 /* Clear the zone sizes */
39 memset(zones_size, 0, sizeof(zones_size));
41 zones_size[ZONE_NORMAL] = max_low_pfn;
42 #ifdef CONFIG_HIGHMEM
43 zones_size[ZONE_HIGHMEM] = max_pfn;
44 #endif
45 free_area_init(zones_size);
50 * Map all physical memory under high_memory into kernel's address space.
52 * This is explicitly coded for two-level page tables, so if you need
53 * something else then this needs to change.
55 static void __init map_ram(void)
57 unsigned long v, p, e;
58 pgd_t *pge;
59 pud_t *pue;
60 pmd_t *pme;
61 pte_t *pte;
62 /* These mark extents of read-only kernel pages...
63 * ...from vmlinux.lds.S
66 p = (u32) memblock_start_of_DRAM() & PAGE_MASK;
67 e = min((u32) memblock_end_of_DRAM(), (u32) __pa(high_memory));
69 v = (u32) __va(p);
70 pge = pgd_offset_k(v);
72 while (p < e) {
73 int j;
74 pue = pud_offset(pge, v);
75 pme = pmd_offset(pue, v);
77 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
78 panic("%s: Kernel hardcoded for "
79 "two-level page tables", __func__);
82 /* Alloc one page for holding PTE's... */
83 pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
84 memset(pte, 0, PAGE_SIZE);
85 set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
87 /* Fill the newly allocated page with PTE'S */
88 for (j = 0; p < e && j < PTRS_PER_PTE;
89 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
90 /* Create mapping between p and v. */
91 /* TODO: more fine grant for page access permission */
92 set_pte(pte, __pte(p + pgprot_val(PAGE_KERNEL)));
95 pge++;
98 static pmd_t *fixmap_pmd_p;
99 static void __init fixedrange_init(void)
101 unsigned long vaddr;
102 pgd_t *pgd;
103 pud_t *pud;
104 pmd_t *pmd;
105 #ifdef CONFIG_HIGHMEM
106 pte_t *pte;
107 #endif /* CONFIG_HIGHMEM */
110 * Fixed mappings:
112 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
113 pgd = swapper_pg_dir + pgd_index(vaddr);
114 pud = pud_offset(pgd, vaddr);
115 pmd = pmd_offset(pud, vaddr);
116 fixmap_pmd_p = (pmd_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
117 memset(fixmap_pmd_p, 0, PAGE_SIZE);
118 set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
120 #ifdef CONFIG_HIGHMEM
122 * Permanent kmaps:
124 vaddr = PKMAP_BASE;
126 pgd = swapper_pg_dir + pgd_index(vaddr);
127 pud = pud_offset(pgd, vaddr);
128 pmd = pmd_offset(pud, vaddr);
129 pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
130 memset(pte, 0, PAGE_SIZE);
131 set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
132 pkmap_page_table = pte;
133 #endif /* CONFIG_HIGHMEM */
137 * paging_init() sets up the page tables, initialises the zone memory
138 * maps, and sets up the zero page, bad page and bad page tables.
140 void __init paging_init(void)
142 int i;
143 void *zero_page;
145 pr_info("Setting up paging and PTEs.\n");
146 /* clear out the init_mm.pgd that will contain the kernel's mappings */
147 for (i = 0; i < PTRS_PER_PGD; i++)
148 swapper_pg_dir[i] = __pgd(1);
150 map_ram();
152 fixedrange_init();
154 /* allocate space for empty_zero_page */
155 zero_page = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
156 memset(zero_page, 0, PAGE_SIZE);
157 zone_sizes_init();
159 empty_zero_page = virt_to_page(zero_page);
160 flush_dcache_page(empty_zero_page);
163 static inline void __init free_highmem(void)
165 #ifdef CONFIG_HIGHMEM
166 unsigned long pfn;
167 for (pfn = PFN_UP(__pa(high_memory)); pfn < max_pfn; pfn++) {
168 phys_addr_t paddr = (phys_addr_t) pfn << PAGE_SHIFT;
169 if (!memblock_is_reserved(paddr))
170 free_highmem_page(pfn_to_page(pfn));
172 #endif
175 static void __init set_max_mapnr_init(void)
177 max_mapnr = max_pfn;
181 * mem_init() marks the free areas in the mem_map and tells us how much
182 * memory is free. This is done after various parts of the system have
183 * claimed their memory after the kernel image.
185 void __init mem_init(void)
187 phys_addr_t memory_start = memblock_start_of_DRAM();
188 BUG_ON(!mem_map);
189 set_max_mapnr_init();
191 free_highmem();
193 /* this will put all low memory onto the freelists */
194 free_all_bootmem();
195 mem_init_print_info(NULL);
197 pr_info("virtual kernel memory layout:\n"
198 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
199 #ifdef CONFIG_HIGHMEM
200 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
201 #endif
202 " consist : 0x%08lx - 0x%08lx (%4ld MB)\n"
203 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
204 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
205 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
206 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
207 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
208 FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10,
209 #ifdef CONFIG_HIGHMEM
210 PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
211 (LAST_PKMAP * PAGE_SIZE) >> 10,
212 #endif
213 CONSISTENT_BASE, CONSISTENT_END,
214 ((CONSISTENT_END) - (CONSISTENT_BASE)) >> 20, VMALLOC_START,
215 (unsigned long)VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20,
216 (unsigned long)__va(memory_start), (unsigned long)high_memory,
217 ((unsigned long)high_memory -
218 (unsigned long)__va(memory_start)) >> 20,
219 (unsigned long)&__init_begin, (unsigned long)&__init_end,
220 ((unsigned long)&__init_end -
221 (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext,
222 (unsigned long)&_edata,
223 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
224 (unsigned long)&_text, (unsigned long)&_etext,
225 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
228 * Check boundaries twice: Some fundamental inconsistencies can
229 * be detected at build time already.
231 #ifdef CONFIG_HIGHMEM
232 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
233 BUILD_BUG_ON((CONSISTENT_END) > PKMAP_BASE);
234 #endif
235 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
236 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
238 #ifdef CONFIG_HIGHMEM
239 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
240 BUG_ON(CONSISTENT_END > PKMAP_BASE);
241 #endif
242 BUG_ON(VMALLOC_END > CONSISTENT_BASE);
243 BUG_ON(VMALLOC_START >= VMALLOC_END);
244 BUG_ON((unsigned long)high_memory > VMALLOC_START);
246 return;
249 void free_initmem(void)
251 free_initmem_default(-1);
254 #ifdef CONFIG_BLK_DEV_INITRD
255 void free_initrd_mem(unsigned long start, unsigned long end)
257 free_reserved_area((void *)start, (void *)end, -1, "initrd");
259 #endif
261 void __set_fixmap(enum fixed_addresses idx,
262 phys_addr_t phys, pgprot_t flags)
264 unsigned long addr = __fix_to_virt(idx);
265 pte_t *pte;
267 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
269 pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];;
271 if (pgprot_val(flags)) {
272 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
273 } else {
274 pte_clear(&init_mm, addr, pte);
275 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);