1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Derived from "arch/i386/mm/init.c"
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
20 #include <linux/swap.h>
21 #include <linux/swiotlb.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/pagemap.h>
25 #include <linux/memblock.h>
26 #include <linux/memory.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/initrd.h>
30 #include <linux/export.h>
31 #include <linux/cma.h>
32 #include <linux/gfp.h>
33 #include <linux/dma-direct.h>
34 #include <linux/percpu.h>
35 #include <asm/processor.h>
36 #include <linux/uaccess.h>
37 #include <asm/pgalloc.h>
38 #include <asm/ctlreg.h>
39 #include <asm/kfence.h>
41 #include <asm/abs_lowcore.h>
43 #include <asm/tlbflush.h>
44 #include <asm/sections.h>
46 #include <asm/set_memory.h>
47 #include <asm/kasan.h>
48 #include <asm/dma-mapping.h>
50 #include <linux/virtio_anchor.h>
51 #include <linux/virtio_config.h>
52 #include <linux/execmem.h>
54 pgd_t swapper_pg_dir
[PTRS_PER_PGD
] __section(".bss..swapper_pg_dir");
55 pgd_t invalid_pg_dir
[PTRS_PER_PGD
] __section(".bss..invalid_pg_dir");
57 struct ctlreg
__bootdata_preserved(s390_invalid_asce
);
59 unsigned long __bootdata_preserved(page_noexec_mask
);
60 EXPORT_SYMBOL(page_noexec_mask
);
62 unsigned long __bootdata_preserved(segment_noexec_mask
);
63 EXPORT_SYMBOL(segment_noexec_mask
);
65 unsigned long __bootdata_preserved(region_noexec_mask
);
66 EXPORT_SYMBOL(region_noexec_mask
);
68 unsigned long empty_zero_page
, zero_page_mask
;
69 EXPORT_SYMBOL(empty_zero_page
);
70 EXPORT_SYMBOL(zero_page_mask
);
72 static void __init
setup_zero_pages(void)
74 unsigned long total_pages
= memblock_estimated_nr_free_pages();
79 /* Latest machines require a mapping granularity of 512KB */
82 /* Limit number of empty zero pages for small memory sizes */
83 while (order
> 2 && (total_pages
>> 10) < (1UL << order
))
86 empty_zero_page
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
88 panic("Out of memory in setup_zero_pages");
90 page
= virt_to_page((void *) empty_zero_page
);
91 split_page(page
, order
);
92 for (i
= 1 << order
; i
> 0; i
--) {
93 mark_page_reserved(page
);
97 zero_page_mask
= ((PAGE_SIZE
<< order
) - 1) & PAGE_MASK
;
101 * paging_init() sets up the page tables
103 void __init
paging_init(void)
105 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
109 zone_dma_limit
= DMA_BIT_MASK(31);
110 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
111 max_zone_pfns
[ZONE_DMA
] = virt_to_pfn(MAX_DMA_ADDRESS
);
112 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
113 free_area_init(max_zone_pfns
);
116 void mark_rodata_ro(void)
118 unsigned long size
= __end_ro_after_init
- __start_ro_after_init
;
121 system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT
);
122 __set_memory_ro(__start_ro_after_init
, __end_ro_after_init
);
123 pr_info("Write protected read-only-after-init data: %luk\n", size
>> 10);
126 int set_memory_encrypted(unsigned long vaddr
, int numpages
)
130 /* make specified pages unshared, (swiotlb, dma_free) */
131 for (i
= 0; i
< numpages
; ++i
) {
132 uv_remove_shared(virt_to_phys((void *)vaddr
));
138 int set_memory_decrypted(unsigned long vaddr
, int numpages
)
141 /* make specified pages shared (swiotlb, dma_alloca) */
142 for (i
= 0; i
< numpages
; ++i
) {
143 uv_set_shared(virt_to_phys((void *)vaddr
));
149 /* are we a protected virtualization guest? */
150 bool force_dma_unencrypted(struct device
*dev
)
152 return is_prot_virt_guest();
155 /* protected virtualization */
156 static void pv_init(void)
158 if (!is_prot_virt_guest())
161 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc
);
163 /* make sure bounce buffers are shared */
164 swiotlb_init(true, SWIOTLB_FORCE
| SWIOTLB_VERBOSE
);
165 swiotlb_update_mem_attributes();
168 void __init
mem_init(void)
170 cpumask_set_cpu(0, &init_mm
.context
.cpu_attach_mask
);
171 cpumask_set_cpu(0, mm_cpumask(&init_mm
));
173 set_max_mapnr(max_low_pfn
);
174 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
177 kfence_split_mapping();
179 /* this will put all low memory onto the freelists */
181 setup_zero_pages(); /* Setup zeroed pages. */
184 unsigned long memory_block_size_bytes(void)
187 * Make sure the memory block size is always greater
188 * or equal than the memory increment size.
190 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE
, sclp
.rzm
);
193 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
194 EXPORT_SYMBOL(__per_cpu_offset
);
196 static int __init
pcpu_cpu_distance(unsigned int from
, unsigned int to
)
198 return LOCAL_DISTANCE
;
201 static int __init
pcpu_cpu_to_node(int cpu
)
206 void __init
setup_per_cpu_areas(void)
213 * Always reserve area for module percpu variables. That's
214 * what the legacy allocator did.
216 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
217 PERCPU_DYNAMIC_RESERVE
, PAGE_SIZE
,
221 panic("Failed to initialize percpu areas.");
223 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
224 for_each_possible_cpu(cpu
)
225 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
228 #ifdef CONFIG_MEMORY_HOTPLUG
232 /* Prevent memory blocks which contain cma regions from going offline */
234 struct s390_cma_mem_data
{
239 static int s390_cma_check_range(struct cma
*cma
, void *data
)
241 struct s390_cma_mem_data
*mem_data
;
242 unsigned long start
, end
;
245 start
= cma_get_base(cma
);
246 end
= start
+ cma_get_size(cma
);
247 if (end
< mem_data
->start
)
249 if (start
>= mem_data
->end
)
254 static int s390_cma_mem_notifier(struct notifier_block
*nb
,
255 unsigned long action
, void *data
)
257 struct s390_cma_mem_data mem_data
;
258 struct memory_notify
*arg
;
262 mem_data
.start
= arg
->start_pfn
<< PAGE_SHIFT
;
263 mem_data
.end
= mem_data
.start
+ (arg
->nr_pages
<< PAGE_SHIFT
);
264 if (action
== MEM_GOING_OFFLINE
)
265 rc
= cma_for_each_area(s390_cma_check_range
, &mem_data
);
266 return notifier_from_errno(rc
);
269 static struct notifier_block s390_cma_mem_nb
= {
270 .notifier_call
= s390_cma_mem_notifier
,
273 static int __init
s390_cma_mem_init(void)
275 return register_memory_notifier(&s390_cma_mem_nb
);
277 device_initcall(s390_cma_mem_init
);
279 #endif /* CONFIG_CMA */
281 int arch_add_memory(int nid
, u64 start
, u64 size
,
282 struct mhp_params
*params
)
284 unsigned long start_pfn
= PFN_DOWN(start
);
285 unsigned long size_pages
= PFN_DOWN(size
);
288 if (WARN_ON_ONCE(params
->pgprot
.pgprot
!= PAGE_KERNEL
.pgprot
))
291 VM_BUG_ON(!mhp_range_allowed(start
, size
, true));
292 rc
= vmem_add_mapping(start
, size
);
296 rc
= __add_pages(nid
, start_pfn
, size_pages
, params
);
298 vmem_remove_mapping(start
, size
);
302 void arch_remove_memory(u64 start
, u64 size
, struct vmem_altmap
*altmap
)
304 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
305 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
307 __remove_pages(start_pfn
, nr_pages
, altmap
);
308 vmem_remove_mapping(start
, size
);
310 #endif /* CONFIG_MEMORY_HOTPLUG */
312 #ifdef CONFIG_EXECMEM
313 static struct execmem_info execmem_info __ro_after_init
;
315 struct execmem_info __init
*execmem_arch_setup(void)
317 unsigned long module_load_offset
= 0;
321 module_load_offset
= get_random_u32_inclusive(1, 1024) * PAGE_SIZE
;
323 start
= MODULES_VADDR
+ module_load_offset
;
325 execmem_info
= (struct execmem_info
){
327 [EXECMEM_DEFAULT
] = {
328 .flags
= EXECMEM_KASAN_SHADOW
,
331 .pgprot
= PAGE_KERNEL
,
332 .alignment
= MODULE_ALIGN
,
337 return &execmem_info
;
339 #endif /* CONFIG_EXECMEM */