treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / s390 / kernel / setup.c
blobb2c2f75860e8874623aeab013374cc198f71a57c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "arch/i386/kernel/setup.c"
9 * Copyright (C) 1995, Linus Torvalds
13 * This file handles the architecture-dependent parts of initialization
16 #define KMSG_COMPONENT "setup"
17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19 #include <linux/errno.h>
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/sched/task.h>
23 #include <linux/cpu.h>
24 #include <linux/kernel.h>
25 #include <linux/memblock.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/ptrace.h>
30 #include <linux/random.h>
31 #include <linux/user.h>
32 #include <linux/tty.h>
33 #include <linux/ioport.h>
34 #include <linux/delay.h>
35 #include <linux/init.h>
36 #include <linux/initrd.h>
37 #include <linux/root_dev.h>
38 #include <linux/console.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/dma-contiguous.h>
41 #include <linux/device.h>
42 #include <linux/notifier.h>
43 #include <linux/pfn.h>
44 #include <linux/ctype.h>
45 #include <linux/reboot.h>
46 #include <linux/topology.h>
47 #include <linux/kexec.h>
48 #include <linux/crash_dump.h>
49 #include <linux/memory.h>
50 #include <linux/compat.h>
51 #include <linux/start_kernel.h>
53 #include <asm/boot_data.h>
54 #include <asm/ipl.h>
55 #include <asm/facility.h>
56 #include <asm/smp.h>
57 #include <asm/mmu_context.h>
58 #include <asm/cpcmd.h>
59 #include <asm/lowcore.h>
60 #include <asm/nmi.h>
61 #include <asm/irq.h>
62 #include <asm/page.h>
63 #include <asm/ptrace.h>
64 #include <asm/sections.h>
65 #include <asm/ebcdic.h>
66 #include <asm/diag.h>
67 #include <asm/os_info.h>
68 #include <asm/sclp.h>
69 #include <asm/stacktrace.h>
70 #include <asm/sysinfo.h>
71 #include <asm/numa.h>
72 #include <asm/alternative.h>
73 #include <asm/nospec-branch.h>
74 #include <asm/mem_detect.h>
75 #include <asm/uv.h>
76 #include "entry.h"
79 * Machine setup..
81 unsigned int console_mode = 0;
82 EXPORT_SYMBOL(console_mode);
84 unsigned int console_devno = -1;
85 EXPORT_SYMBOL(console_devno);
87 unsigned int console_irq = -1;
88 EXPORT_SYMBOL(console_irq);
90 unsigned long elf_hwcap __read_mostly = 0;
91 char elf_platform[ELF_PLATFORM_SIZE];
93 unsigned long int_hwcap = 0;
95 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
96 int __bootdata_preserved(prot_virt_guest);
97 #endif
99 int __bootdata(noexec_disabled);
100 int __bootdata(memory_end_set);
101 unsigned long __bootdata(memory_end);
102 unsigned long __bootdata(vmalloc_size);
103 unsigned long __bootdata(max_physmem_end);
104 struct mem_detect_info __bootdata(mem_detect);
106 struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
107 struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
108 unsigned long __bootdata_preserved(__swsusp_reset_dma);
109 unsigned long __bootdata_preserved(__stext_dma);
110 unsigned long __bootdata_preserved(__etext_dma);
111 unsigned long __bootdata_preserved(__sdma);
112 unsigned long __bootdata_preserved(__edma);
113 unsigned long __bootdata_preserved(__kaslr_offset);
114 unsigned int __bootdata_preserved(zlib_dfltcc_support);
115 EXPORT_SYMBOL(zlib_dfltcc_support);
117 unsigned long VMALLOC_START;
118 EXPORT_SYMBOL(VMALLOC_START);
120 unsigned long VMALLOC_END;
121 EXPORT_SYMBOL(VMALLOC_END);
123 struct page *vmemmap;
124 EXPORT_SYMBOL(vmemmap);
126 unsigned long MODULES_VADDR;
127 unsigned long MODULES_END;
129 /* An array with a pointer to the lowcore of every CPU. */
130 struct lowcore *lowcore_ptr[NR_CPUS];
131 EXPORT_SYMBOL(lowcore_ptr);
134 * This is set up by the setup-routine at boot-time
135 * for S390 need to find out, what we have to setup
136 * using address 0x10400 ...
139 #include <asm/setup.h>
142 * condev= and conmode= setup parameter.
145 static int __init condev_setup(char *str)
147 int vdev;
149 vdev = simple_strtoul(str, &str, 0);
150 if (vdev >= 0 && vdev < 65536) {
151 console_devno = vdev;
152 console_irq = -1;
154 return 1;
157 __setup("condev=", condev_setup);
159 static void __init set_preferred_console(void)
161 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
162 add_preferred_console("ttyS", 0, NULL);
163 else if (CONSOLE_IS_3270)
164 add_preferred_console("tty3270", 0, NULL);
165 else if (CONSOLE_IS_VT220)
166 add_preferred_console("ttyS", 1, NULL);
167 else if (CONSOLE_IS_HVC)
168 add_preferred_console("hvc", 0, NULL);
171 static int __init conmode_setup(char *str)
173 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
174 if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
175 SET_CONSOLE_SCLP;
176 #endif
177 #if defined(CONFIG_TN3215_CONSOLE)
178 if (!strcmp(str, "3215"))
179 SET_CONSOLE_3215;
180 #endif
181 #if defined(CONFIG_TN3270_CONSOLE)
182 if (!strcmp(str, "3270"))
183 SET_CONSOLE_3270;
184 #endif
185 set_preferred_console();
186 return 1;
189 __setup("conmode=", conmode_setup);
191 static void __init conmode_default(void)
193 char query_buffer[1024];
194 char *ptr;
196 if (MACHINE_IS_VM) {
197 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
198 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
199 ptr = strstr(query_buffer, "SUBCHANNEL =");
200 console_irq = simple_strtoul(ptr + 13, NULL, 16);
201 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
202 ptr = strstr(query_buffer, "CONMODE");
204 * Set the conmode to 3215 so that the device recognition
205 * will set the cu_type of the console to 3215. If the
206 * conmode is 3270 and we don't set it back then both
207 * 3215 and the 3270 driver will try to access the console
208 * device (3215 as console and 3270 as normal tty).
210 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
211 if (ptr == NULL) {
212 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
213 SET_CONSOLE_SCLP;
214 #endif
215 return;
217 if (str_has_prefix(ptr + 8, "3270")) {
218 #if defined(CONFIG_TN3270_CONSOLE)
219 SET_CONSOLE_3270;
220 #elif defined(CONFIG_TN3215_CONSOLE)
221 SET_CONSOLE_3215;
222 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
223 SET_CONSOLE_SCLP;
224 #endif
225 } else if (str_has_prefix(ptr + 8, "3215")) {
226 #if defined(CONFIG_TN3215_CONSOLE)
227 SET_CONSOLE_3215;
228 #elif defined(CONFIG_TN3270_CONSOLE)
229 SET_CONSOLE_3270;
230 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
231 SET_CONSOLE_SCLP;
232 #endif
234 } else if (MACHINE_IS_KVM) {
235 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
236 SET_CONSOLE_VT220;
237 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
238 SET_CONSOLE_SCLP;
239 else
240 SET_CONSOLE_HVC;
241 } else {
242 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
243 SET_CONSOLE_SCLP;
244 #endif
248 #ifdef CONFIG_CRASH_DUMP
249 static void __init setup_zfcpdump(void)
251 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
252 return;
253 if (OLDMEM_BASE)
254 return;
255 strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
256 console_loglevel = 2;
258 #else
259 static inline void setup_zfcpdump(void) {}
260 #endif /* CONFIG_CRASH_DUMP */
263 * Reboot, halt and power_off stubs. They just call _machine_restart,
264 * _machine_halt or _machine_power_off.
267 void machine_restart(char *command)
269 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
271 * Only unblank the console if we are called in enabled
272 * context or a bust_spinlocks cleared the way for us.
274 console_unblank();
275 _machine_restart(command);
278 void machine_halt(void)
280 if (!in_interrupt() || oops_in_progress)
282 * Only unblank the console if we are called in enabled
283 * context or a bust_spinlocks cleared the way for us.
285 console_unblank();
286 _machine_halt();
289 void machine_power_off(void)
291 if (!in_interrupt() || oops_in_progress)
293 * Only unblank the console if we are called in enabled
294 * context or a bust_spinlocks cleared the way for us.
296 console_unblank();
297 _machine_power_off();
301 * Dummy power off function.
303 void (*pm_power_off)(void) = machine_power_off;
304 EXPORT_SYMBOL_GPL(pm_power_off);
306 void *restart_stack __section(.data);
308 unsigned long stack_alloc(void)
310 #ifdef CONFIG_VMAP_STACK
311 return (unsigned long)
312 __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
313 VMALLOC_START, VMALLOC_END,
314 THREADINFO_GFP,
315 PAGE_KERNEL, 0, NUMA_NO_NODE,
316 __builtin_return_address(0));
317 #else
318 return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
319 #endif
322 void stack_free(unsigned long stack)
324 #ifdef CONFIG_VMAP_STACK
325 vfree((void *) stack);
326 #else
327 free_pages(stack, THREAD_SIZE_ORDER);
328 #endif
331 int __init arch_early_irq_init(void)
333 unsigned long stack;
335 stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
336 if (!stack)
337 panic("Couldn't allocate async stack");
338 S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
339 return 0;
342 static int __init async_stack_realloc(void)
344 unsigned long old, new;
346 old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
347 new = stack_alloc();
348 if (!new)
349 panic("Couldn't allocate async stack");
350 S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
351 free_pages(old, THREAD_SIZE_ORDER);
352 return 0;
354 early_initcall(async_stack_realloc);
356 void __init arch_call_rest_init(void)
358 unsigned long stack;
360 stack = stack_alloc();
361 if (!stack)
362 panic("Couldn't allocate kernel stack");
363 current->stack = (void *) stack;
364 #ifdef CONFIG_VMAP_STACK
365 current->stack_vm_area = (void *) stack;
366 #endif
367 set_task_stack_end_magic(current);
368 stack += STACK_INIT_OFFSET;
369 S390_lowcore.kernel_stack = stack;
370 CALL_ON_STACK_NORETURN(rest_init, stack);
373 static void __init setup_lowcore_dat_off(void)
375 struct lowcore *lc;
378 * Setup lowcore for boot cpu
380 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
381 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
382 if (!lc)
383 panic("%s: Failed to allocate %zu bytes align=%zx\n",
384 __func__, sizeof(*lc), sizeof(*lc));
386 lc->restart_psw.mask = PSW_KERNEL_BITS;
387 lc->restart_psw.addr = (unsigned long) restart_int_handler;
388 lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
389 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
390 lc->svc_new_psw.mask = PSW_KERNEL_BITS |
391 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
392 lc->svc_new_psw.addr = (unsigned long) system_call;
393 lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
394 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
395 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
396 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
397 lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
398 lc->io_new_psw.addr = (unsigned long) io_int_handler;
399 lc->clock_comparator = clock_comparator_max;
400 lc->nodat_stack = ((unsigned long) &init_thread_union)
401 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
402 lc->current_task = (unsigned long)&init_task;
403 lc->lpp = LPP_MAGIC;
404 lc->machine_flags = S390_lowcore.machine_flags;
405 lc->preempt_count = S390_lowcore.preempt_count;
406 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
407 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
408 sizeof(lc->stfle_fac_list));
409 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
410 sizeof(lc->alt_stfle_fac_list));
411 nmi_alloc_boot_cpu(lc);
412 vdso_alloc_boot_cpu(lc);
413 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
414 lc->async_enter_timer = S390_lowcore.async_enter_timer;
415 lc->exit_timer = S390_lowcore.exit_timer;
416 lc->user_timer = S390_lowcore.user_timer;
417 lc->system_timer = S390_lowcore.system_timer;
418 lc->steal_timer = S390_lowcore.steal_timer;
419 lc->last_update_timer = S390_lowcore.last_update_timer;
420 lc->last_update_clock = S390_lowcore.last_update_clock;
423 * Allocate the global restart stack which is the same for
424 * all CPUs in cast *one* of them does a PSW restart.
426 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
427 if (!restart_stack)
428 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
429 __func__, THREAD_SIZE, THREAD_SIZE);
430 restart_stack += STACK_INIT_OFFSET;
433 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
434 * restart data to the absolute zero lowcore. This is necessary if
435 * PSW restart is done on an offline CPU that has lowcore zero.
437 lc->restart_stack = (unsigned long) restart_stack;
438 lc->restart_fn = (unsigned long) do_restart;
439 lc->restart_data = 0;
440 lc->restart_source = -1UL;
442 /* Setup absolute zero lowcore */
443 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
444 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
445 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
446 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
447 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
449 lc->spinlock_lockval = arch_spin_lockval(0);
450 lc->spinlock_index = 0;
451 arch_spin_lock_setup(0);
452 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
454 set_prefix((u32)(unsigned long) lc);
455 lowcore_ptr[0] = lc;
458 static void __init setup_lowcore_dat_on(void)
460 __ctl_clear_bit(0, 28);
461 S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
462 S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
463 S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
464 S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
465 __ctl_set_bit(0, 28);
468 static struct resource code_resource = {
469 .name = "Kernel code",
470 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
473 static struct resource data_resource = {
474 .name = "Kernel data",
475 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
478 static struct resource bss_resource = {
479 .name = "Kernel bss",
480 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
483 static struct resource __initdata *standard_resources[] = {
484 &code_resource,
485 &data_resource,
486 &bss_resource,
489 static void __init setup_resources(void)
491 struct resource *res, *std_res, *sub_res;
492 struct memblock_region *reg;
493 int j;
495 code_resource.start = (unsigned long) _text;
496 code_resource.end = (unsigned long) _etext - 1;
497 data_resource.start = (unsigned long) _etext;
498 data_resource.end = (unsigned long) _edata - 1;
499 bss_resource.start = (unsigned long) __bss_start;
500 bss_resource.end = (unsigned long) __bss_stop - 1;
502 for_each_memblock(memory, reg) {
503 res = memblock_alloc(sizeof(*res), 8);
504 if (!res)
505 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
506 __func__, sizeof(*res), 8);
507 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
509 res->name = "System RAM";
510 res->start = reg->base;
511 res->end = reg->base + reg->size - 1;
512 request_resource(&iomem_resource, res);
514 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
515 std_res = standard_resources[j];
516 if (std_res->start < res->start ||
517 std_res->start > res->end)
518 continue;
519 if (std_res->end > res->end) {
520 sub_res = memblock_alloc(sizeof(*sub_res), 8);
521 if (!sub_res)
522 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
523 __func__, sizeof(*sub_res), 8);
524 *sub_res = *std_res;
525 sub_res->end = res->end;
526 std_res->start = res->end + 1;
527 request_resource(res, sub_res);
528 } else {
529 request_resource(res, std_res);
533 #ifdef CONFIG_CRASH_DUMP
535 * Re-add removed crash kernel memory as reserved memory. This makes
536 * sure it will be mapped with the identity mapping and struct pages
537 * will be created, so it can be resized later on.
538 * However add it later since the crash kernel resource should not be
539 * part of the System RAM resource.
541 if (crashk_res.end) {
542 memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
543 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
544 insert_resource(&iomem_resource, &crashk_res);
546 #endif
549 static void __init setup_memory_end(void)
551 unsigned long vmax, tmp;
553 /* Choose kernel address space layout: 3 or 4 levels. */
554 if (IS_ENABLED(CONFIG_KASAN)) {
555 vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)
556 ? _REGION1_SIZE
557 : _REGION2_SIZE;
558 } else {
559 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
560 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
561 if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
562 vmax = _REGION2_SIZE; /* 3-level kernel page table */
563 else
564 vmax = _REGION1_SIZE; /* 4-level kernel page table */
567 /* module area is at the end of the kernel address space. */
568 MODULES_END = vmax;
569 MODULES_VADDR = MODULES_END - MODULES_LEN;
570 VMALLOC_END = MODULES_VADDR;
571 VMALLOC_START = VMALLOC_END - vmalloc_size;
573 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
574 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
575 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
576 tmp = SECTION_ALIGN_UP(tmp);
577 tmp = VMALLOC_START - tmp * sizeof(struct page);
578 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
579 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
580 vmemmap = (struct page *) tmp;
582 /* Take care that memory_end is set and <= vmemmap */
583 memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
584 #ifdef CONFIG_KASAN
585 /* fit in kasan shadow memory region between 1:1 and vmemmap */
586 memory_end = min(memory_end, KASAN_SHADOW_START);
587 vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
588 #endif
589 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
590 memblock_remove(memory_end, ULONG_MAX);
592 pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
595 #ifdef CONFIG_CRASH_DUMP
598 * When kdump is enabled, we have to ensure that no memory from
599 * the area [0 - crashkernel memory size] and
600 * [crashk_res.start - crashk_res.end] is set offline.
602 static int kdump_mem_notifier(struct notifier_block *nb,
603 unsigned long action, void *data)
605 struct memory_notify *arg = data;
607 if (action != MEM_GOING_OFFLINE)
608 return NOTIFY_OK;
609 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
610 return NOTIFY_BAD;
611 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
612 return NOTIFY_OK;
613 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
614 return NOTIFY_OK;
615 return NOTIFY_BAD;
618 static struct notifier_block kdump_mem_nb = {
619 .notifier_call = kdump_mem_notifier,
622 #endif
625 * Make sure that the area behind memory_end is protected
627 static void reserve_memory_end(void)
629 if (memory_end_set)
630 memblock_reserve(memory_end, ULONG_MAX);
634 * Make sure that oldmem, where the dump is stored, is protected
636 static void reserve_oldmem(void)
638 #ifdef CONFIG_CRASH_DUMP
639 if (OLDMEM_BASE)
640 /* Forget all memory above the running kdump system */
641 memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
642 #endif
646 * Make sure that oldmem, where the dump is stored, is protected
648 static void remove_oldmem(void)
650 #ifdef CONFIG_CRASH_DUMP
651 if (OLDMEM_BASE)
652 /* Forget all memory above the running kdump system */
653 memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
654 #endif
658 * Reserve memory for kdump kernel to be loaded with kexec
660 static void __init reserve_crashkernel(void)
662 #ifdef CONFIG_CRASH_DUMP
663 unsigned long long crash_base, crash_size;
664 phys_addr_t low, high;
665 int rc;
667 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
668 &crash_base);
670 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
671 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
672 if (rc || crash_size == 0)
673 return;
675 if (memblock.memory.regions[0].size < crash_size) {
676 pr_info("crashkernel reservation failed: %s\n",
677 "first memory chunk must be at least crashkernel size");
678 return;
681 low = crash_base ?: OLDMEM_BASE;
682 high = low + crash_size;
683 if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
684 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
685 crash_base = low;
686 } else {
687 /* Find suitable area in free memory */
688 low = max_t(unsigned long, crash_size, sclp.hsa_size);
689 high = crash_base ? crash_base + crash_size : ULONG_MAX;
691 if (crash_base && crash_base < low) {
692 pr_info("crashkernel reservation failed: %s\n",
693 "crash_base too low");
694 return;
696 low = crash_base ?: low;
697 crash_base = memblock_find_in_range(low, high, crash_size,
698 KEXEC_CRASH_MEM_ALIGN);
701 if (!crash_base) {
702 pr_info("crashkernel reservation failed: %s\n",
703 "no suitable area found");
704 return;
707 if (register_memory_notifier(&kdump_mem_nb))
708 return;
710 if (!OLDMEM_BASE && MACHINE_IS_VM)
711 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
712 crashk_res.start = crash_base;
713 crashk_res.end = crash_base + crash_size - 1;
714 memblock_remove(crash_base, crash_size);
715 pr_info("Reserving %lluMB of memory at %lluMB "
716 "for crashkernel (System RAM: %luMB)\n",
717 crash_size >> 20, crash_base >> 20,
718 (unsigned long)memblock.memory.total_size >> 20);
719 os_info_crashkernel_add(crash_base, crash_size);
720 #endif
724 * Reserve the initrd from being used by memblock
726 static void __init reserve_initrd(void)
728 #ifdef CONFIG_BLK_DEV_INITRD
729 if (!INITRD_START || !INITRD_SIZE)
730 return;
731 initrd_start = INITRD_START;
732 initrd_end = initrd_start + INITRD_SIZE;
733 memblock_reserve(INITRD_START, INITRD_SIZE);
734 #endif
738 * Reserve the memory area used to pass the certificate lists
740 static void __init reserve_certificate_list(void)
742 if (ipl_cert_list_addr)
743 memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
746 static void __init reserve_mem_detect_info(void)
748 unsigned long start, size;
750 get_mem_detect_reserved(&start, &size);
751 if (size)
752 memblock_reserve(start, size);
755 static void __init free_mem_detect_info(void)
757 unsigned long start, size;
759 get_mem_detect_reserved(&start, &size);
760 if (size)
761 memblock_free(start, size);
764 static const char * __init get_mem_info_source(void)
766 switch (mem_detect.info_source) {
767 case MEM_DETECT_SCLP_STOR_INFO:
768 return "sclp storage info";
769 case MEM_DETECT_DIAG260:
770 return "diag260";
771 case MEM_DETECT_SCLP_READ_INFO:
772 return "sclp read info";
773 case MEM_DETECT_BIN_SEARCH:
774 return "binary search";
776 return "none";
779 static void __init memblock_add_mem_detect_info(void)
781 unsigned long start, end;
782 int i;
784 memblock_dbg("physmem info source: %s (%hhd)\n",
785 get_mem_info_source(), mem_detect.info_source);
786 /* keep memblock lists close to the kernel */
787 memblock_set_bottom_up(true);
788 for_each_mem_detect_block(i, &start, &end) {
789 memblock_add(start, end - start);
790 memblock_physmem_add(start, end - start);
792 memblock_set_bottom_up(false);
793 memblock_dump_all();
797 * Check for initrd being in usable memory
799 static void __init check_initrd(void)
801 #ifdef CONFIG_BLK_DEV_INITRD
802 if (INITRD_START && INITRD_SIZE &&
803 !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
804 pr_err("The initial RAM disk does not fit into the memory\n");
805 memblock_free(INITRD_START, INITRD_SIZE);
806 initrd_start = initrd_end = 0;
808 #endif
812 * Reserve memory used for lowcore/command line/kernel image.
814 static void __init reserve_kernel(void)
816 unsigned long start_pfn = PFN_UP(__pa(_end));
818 memblock_reserve(0, HEAD_END);
819 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
820 - (unsigned long)_stext);
821 memblock_reserve(__sdma, __edma - __sdma);
824 static void __init setup_memory(void)
826 struct memblock_region *reg;
829 * Init storage key for present memory
831 for_each_memblock(memory, reg) {
832 storage_key_init_range(reg->base, reg->base + reg->size);
834 psw_set_key(PAGE_DEFAULT_KEY);
836 /* Only cosmetics */
837 memblock_enforce_memory_limit(memblock_end_of_DRAM());
841 * Setup hardware capabilities.
843 static int __init setup_hwcaps(void)
845 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
846 struct cpuid cpu_id;
847 int i;
850 * The store facility list bits numbers as found in the principles
851 * of operation are numbered with bit 1UL<<31 as number 0 to
852 * bit 1UL<<0 as number 31.
853 * Bit 0: instructions named N3, "backported" to esa-mode
854 * Bit 2: z/Architecture mode is active
855 * Bit 7: the store-facility-list-extended facility is installed
856 * Bit 17: the message-security assist is installed
857 * Bit 19: the long-displacement facility is installed
858 * Bit 21: the extended-immediate facility is installed
859 * Bit 22: extended-translation facility 3 is installed
860 * Bit 30: extended-translation facility 3 enhancement facility
861 * These get translated to:
862 * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
863 * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
864 * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
865 * HWCAP_S390_ETF3EH bit 8 (22 && 30).
867 for (i = 0; i < 6; i++)
868 if (test_facility(stfl_bits[i]))
869 elf_hwcap |= 1UL << i;
871 if (test_facility(22) && test_facility(30))
872 elf_hwcap |= HWCAP_S390_ETF3EH;
875 * Check for additional facilities with store-facility-list-extended.
876 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
877 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
878 * as stored by stfl, bits 32-xxx contain additional facilities.
879 * How many facility words are stored depends on the number of
880 * doublewords passed to the instruction. The additional facilities
881 * are:
882 * Bit 42: decimal floating point facility is installed
883 * Bit 44: perform floating point operation facility is installed
884 * translated to:
885 * HWCAP_S390_DFP bit 6 (42 && 44).
887 if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
888 elf_hwcap |= HWCAP_S390_DFP;
891 * Huge page support HWCAP_S390_HPAGE is bit 7.
893 if (MACHINE_HAS_EDAT1)
894 elf_hwcap |= HWCAP_S390_HPAGE;
897 * 64-bit register support for 31-bit processes
898 * HWCAP_S390_HIGH_GPRS is bit 9.
900 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
903 * Transactional execution support HWCAP_S390_TE is bit 10.
905 if (MACHINE_HAS_TE)
906 elf_hwcap |= HWCAP_S390_TE;
909 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
910 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
911 * instead of facility bit 129.
913 if (MACHINE_HAS_VX) {
914 elf_hwcap |= HWCAP_S390_VXRS;
915 if (test_facility(134))
916 elf_hwcap |= HWCAP_S390_VXRS_EXT;
917 if (test_facility(135))
918 elf_hwcap |= HWCAP_S390_VXRS_BCD;
919 if (test_facility(148))
920 elf_hwcap |= HWCAP_S390_VXRS_EXT2;
921 if (test_facility(152))
922 elf_hwcap |= HWCAP_S390_VXRS_PDE;
924 if (test_facility(150))
925 elf_hwcap |= HWCAP_S390_SORT;
926 if (test_facility(151))
927 elf_hwcap |= HWCAP_S390_DFLT;
930 * Guarded storage support HWCAP_S390_GS is bit 12.
932 if (MACHINE_HAS_GS)
933 elf_hwcap |= HWCAP_S390_GS;
935 get_cpu_id(&cpu_id);
936 add_device_randomness(&cpu_id, sizeof(cpu_id));
937 switch (cpu_id.machine) {
938 case 0x2064:
939 case 0x2066:
940 default: /* Use "z900" as default for 64 bit kernels. */
941 strcpy(elf_platform, "z900");
942 break;
943 case 0x2084:
944 case 0x2086:
945 strcpy(elf_platform, "z990");
946 break;
947 case 0x2094:
948 case 0x2096:
949 strcpy(elf_platform, "z9-109");
950 break;
951 case 0x2097:
952 case 0x2098:
953 strcpy(elf_platform, "z10");
954 break;
955 case 0x2817:
956 case 0x2818:
957 strcpy(elf_platform, "z196");
958 break;
959 case 0x2827:
960 case 0x2828:
961 strcpy(elf_platform, "zEC12");
962 break;
963 case 0x2964:
964 case 0x2965:
965 strcpy(elf_platform, "z13");
966 break;
967 case 0x3906:
968 case 0x3907:
969 strcpy(elf_platform, "z14");
970 break;
971 case 0x8561:
972 case 0x8562:
973 strcpy(elf_platform, "z15");
974 break;
978 * Virtualization support HWCAP_INT_SIE is bit 0.
980 if (sclp.has_sief2)
981 int_hwcap |= HWCAP_INT_SIE;
983 return 0;
985 arch_initcall(setup_hwcaps);
988 * Add system information as device randomness
990 static void __init setup_randomness(void)
992 struct sysinfo_3_2_2 *vmms;
994 vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
995 PAGE_SIZE);
996 if (!vmms)
997 panic("Failed to allocate memory for sysinfo structure\n");
999 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
1000 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
1001 memblock_free((unsigned long) vmms, PAGE_SIZE);
1005 * Find the correct size for the task_struct. This depends on
1006 * the size of the struct fpu at the end of the thread_struct
1007 * which is embedded in the task_struct.
1009 static void __init setup_task_size(void)
1011 int task_size = sizeof(struct task_struct);
1013 if (!MACHINE_HAS_VX) {
1014 task_size -= sizeof(__vector128) * __NUM_VXRS;
1015 task_size += sizeof(freg_t) * __NUM_FPRS;
1017 arch_task_struct_size = task_size;
1021 * Issue diagnose 318 to set the control program name and
1022 * version codes.
1024 static void __init setup_control_program_code(void)
1026 union diag318_info diag318_info = {
1027 .cpnc = CPNC_LINUX,
1028 .cpvc_linux = 0,
1029 .cpvc_distro = {0},
1032 if (!sclp.has_diag318)
1033 return;
1035 diag_stat_inc(DIAG_STAT_X318);
1036 asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
1040 * Print the component list from the IPL report
1042 static void __init log_component_list(void)
1044 struct ipl_rb_component_entry *ptr, *end;
1045 char *str;
1047 if (!early_ipl_comp_list_addr)
1048 return;
1049 if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
1050 pr_info("Linux is running with Secure-IPL enabled\n");
1051 else
1052 pr_info("Linux is running with Secure-IPL disabled\n");
1053 ptr = (void *) early_ipl_comp_list_addr;
1054 end = (void *) ptr + early_ipl_comp_list_size;
1055 pr_info("The IPL report contains the following components:\n");
1056 while (ptr < end) {
1057 if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
1058 if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
1059 str = "signed, verified";
1060 else
1061 str = "signed, verification failed";
1062 } else {
1063 str = "not signed";
1065 pr_info("%016llx - %016llx (%s)\n",
1066 ptr->addr, ptr->addr + ptr->len, str);
1067 ptr++;
1072 * Setup function called from init/main.c just after the banner
1073 * was printed.
1076 void __init setup_arch(char **cmdline_p)
1079 * print what head.S has found out about the machine
1081 if (MACHINE_IS_VM)
1082 pr_info("Linux is running as a z/VM "
1083 "guest operating system in 64-bit mode\n");
1084 else if (MACHINE_IS_KVM)
1085 pr_info("Linux is running under KVM in 64-bit mode\n");
1086 else if (MACHINE_IS_LPAR)
1087 pr_info("Linux is running natively in 64-bit mode\n");
1088 else
1089 pr_info("Linux is running as a guest in 64-bit mode\n");
1091 log_component_list();
1093 /* Have one command line that is parsed and saved in /proc/cmdline */
1094 /* boot_command_line has been already set up in early.c */
1095 *cmdline_p = boot_command_line;
1097 ROOT_DEV = Root_RAM0;
1099 init_mm.start_code = (unsigned long) _text;
1100 init_mm.end_code = (unsigned long) _etext;
1101 init_mm.end_data = (unsigned long) _edata;
1102 init_mm.brk = (unsigned long) _end;
1104 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
1105 nospec_auto_detect();
1107 parse_early_param();
1108 #ifdef CONFIG_CRASH_DUMP
1109 /* Deactivate elfcorehdr= kernel parameter */
1110 elfcorehdr_addr = ELFCORE_ADDR_MAX;
1111 #endif
1113 os_info_init();
1114 setup_ipl();
1115 setup_task_size();
1116 setup_control_program_code();
1118 /* Do some memory reservations *before* memory is added to memblock */
1119 reserve_memory_end();
1120 reserve_oldmem();
1121 reserve_kernel();
1122 reserve_initrd();
1123 reserve_certificate_list();
1124 reserve_mem_detect_info();
1125 memblock_allow_resize();
1127 /* Get information about *all* installed memory */
1128 memblock_add_mem_detect_info();
1130 free_mem_detect_info();
1131 remove_oldmem();
1134 * Make sure all chunks are MAX_ORDER aligned so we don't need the
1135 * extra checks that HOLES_IN_ZONE would require.
1137 * Is this still required?
1139 memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
1141 setup_memory_end();
1142 setup_memory();
1143 dma_contiguous_reserve(memory_end);
1144 vmcp_cma_reserve();
1146 check_initrd();
1147 reserve_crashkernel();
1148 #ifdef CONFIG_CRASH_DUMP
1150 * Be aware that smp_save_dump_cpus() triggers a system reset.
1151 * Therefore CPU and device initialization should be done afterwards.
1153 smp_save_dump_cpus();
1154 #endif
1156 setup_resources();
1157 setup_lowcore_dat_off();
1158 smp_fill_possible_mask();
1159 cpu_detect_mhz_feature();
1160 cpu_init();
1161 numa_setup();
1162 smp_detect_cpus();
1163 topology_init_early();
1166 * Create kernel page tables and switch to virtual addressing.
1168 paging_init();
1171 * After paging_init created the kernel page table, the new PSWs
1172 * in lowcore can now run with DAT enabled.
1174 setup_lowcore_dat_on();
1176 /* Setup default console */
1177 conmode_default();
1178 set_preferred_console();
1180 apply_alternative_instructions();
1181 if (IS_ENABLED(CONFIG_EXPOLINE))
1182 nospec_init_branches();
1184 /* Setup zfcpdump support */
1185 setup_zfcpdump();
1187 /* Add system specific data to the random pool */
1188 setup_randomness();