perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / arch / s390 / mm / init.c
blob92d7a153e72a0fe8bad784552d7a142a9d03ba69
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Derived from "arch/i386/mm/init.c"
8 * Copyright (C) 1995 Linus Torvalds
9 */
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/pagemap.h>
24 #include <linux/bootmem.h>
25 #include <linux/memory.h>
26 #include <linux/pfn.h>
27 #include <linux/poison.h>
28 #include <linux/initrd.h>
29 #include <linux/export.h>
30 #include <linux/cma.h>
31 #include <linux/gfp.h>
32 #include <linux/memblock.h>
33 #include <asm/processor.h>
34 #include <linux/uaccess.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/dma.h>
38 #include <asm/lowcore.h>
39 #include <asm/tlb.h>
40 #include <asm/tlbflush.h>
41 #include <asm/sections.h>
42 #include <asm/ctl_reg.h>
43 #include <asm/sclp.h>
44 #include <asm/set_memory.h>
45 #include <asm/kasan.h>
47 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
49 unsigned long empty_zero_page, zero_page_mask;
50 EXPORT_SYMBOL(empty_zero_page);
51 EXPORT_SYMBOL(zero_page_mask);
53 static void __init setup_zero_pages(void)
55 unsigned int order;
56 struct page *page;
57 int i;
59 /* Latest machines require a mapping granularity of 512KB */
60 order = 7;
62 /* Limit number of empty zero pages for small memory sizes */
63 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
64 order--;
66 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
67 if (!empty_zero_page)
68 panic("Out of memory in setup_zero_pages");
70 page = virt_to_page((void *) empty_zero_page);
71 split_page(page, order);
72 for (i = 1 << order; i > 0; i--) {
73 mark_page_reserved(page);
74 page++;
77 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
81 * paging_init() sets up the page tables
83 void __init paging_init(void)
85 unsigned long max_zone_pfns[MAX_NR_ZONES];
86 unsigned long pgd_type, asce_bits;
87 psw_t psw;
89 init_mm.pgd = swapper_pg_dir;
90 if (VMALLOC_END > _REGION2_SIZE) {
91 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
92 pgd_type = _REGION2_ENTRY_EMPTY;
93 } else {
94 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
95 pgd_type = _REGION3_ENTRY_EMPTY;
97 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
98 S390_lowcore.kernel_asce = init_mm.context.asce;
99 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
100 crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
101 vmem_map_init();
102 kasan_copy_shadow(init_mm.pgd);
104 /* enable virtual mapping in kernel mode */
105 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
106 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
107 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
108 psw.mask = __extract_psw();
109 psw_bits(psw).dat = 1;
110 psw_bits(psw).as = PSW_BITS_AS_HOME;
111 __load_psw_mask(psw.mask);
112 kasan_free_early_identity();
114 sparse_memory_present_with_active_regions(MAX_NUMNODES);
115 sparse_init();
116 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
117 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
118 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
119 free_area_init_nodes(max_zone_pfns);
122 void mark_rodata_ro(void)
124 unsigned long size = __end_ro_after_init - __start_ro_after_init;
126 set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
127 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
130 void __init mem_init(void)
132 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
133 cpumask_set_cpu(0, mm_cpumask(&init_mm));
135 set_max_mapnr(max_low_pfn);
136 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
138 /* Setup guest page hinting */
139 cmma_init();
141 /* this will put all low memory onto the freelists */
142 free_all_bootmem();
143 setup_zero_pages(); /* Setup zeroed pages. */
145 cmma_init_nodat();
147 mem_init_print_info(NULL);
150 void free_initmem(void)
152 __set_memory((unsigned long)_sinittext,
153 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
154 SET_MEMORY_RW | SET_MEMORY_NX);
155 free_initmem_default(POISON_FREE_INITMEM);
158 #ifdef CONFIG_BLK_DEV_INITRD
159 void __init free_initrd_mem(unsigned long start, unsigned long end)
161 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
162 "initrd");
164 #endif
166 unsigned long memory_block_size_bytes(void)
169 * Make sure the memory block size is always greater
170 * or equal than the memory increment size.
172 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
175 #ifdef CONFIG_MEMORY_HOTPLUG
177 #ifdef CONFIG_CMA
179 /* Prevent memory blocks which contain cma regions from going offline */
181 struct s390_cma_mem_data {
182 unsigned long start;
183 unsigned long end;
186 static int s390_cma_check_range(struct cma *cma, void *data)
188 struct s390_cma_mem_data *mem_data;
189 unsigned long start, end;
191 mem_data = data;
192 start = cma_get_base(cma);
193 end = start + cma_get_size(cma);
194 if (end < mem_data->start)
195 return 0;
196 if (start >= mem_data->end)
197 return 0;
198 return -EBUSY;
201 static int s390_cma_mem_notifier(struct notifier_block *nb,
202 unsigned long action, void *data)
204 struct s390_cma_mem_data mem_data;
205 struct memory_notify *arg;
206 int rc = 0;
208 arg = data;
209 mem_data.start = arg->start_pfn << PAGE_SHIFT;
210 mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
211 if (action == MEM_GOING_OFFLINE)
212 rc = cma_for_each_area(s390_cma_check_range, &mem_data);
213 return notifier_from_errno(rc);
216 static struct notifier_block s390_cma_mem_nb = {
217 .notifier_call = s390_cma_mem_notifier,
220 static int __init s390_cma_mem_init(void)
222 return register_memory_notifier(&s390_cma_mem_nb);
224 device_initcall(s390_cma_mem_init);
226 #endif /* CONFIG_CMA */
228 int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
229 bool want_memblock)
231 unsigned long start_pfn = PFN_DOWN(start);
232 unsigned long size_pages = PFN_DOWN(size);
233 int rc;
235 rc = vmem_add_mapping(start, size);
236 if (rc)
237 return rc;
239 rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
240 if (rc)
241 vmem_remove_mapping(start, size);
242 return rc;
245 #ifdef CONFIG_MEMORY_HOTREMOVE
246 int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
249 * There is no hardware or firmware interface which could trigger a
250 * hot memory remove on s390. So there is nothing that needs to be
251 * implemented.
253 return -EBUSY;
255 #endif
256 #endif /* CONFIG_MEMORY_HOTPLUG */