mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / s390 / kernel / early.c
blob73045142febf062b6857af4001bd40c0cfbf586f
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
8 #define KMSG_COMPONENT "setup"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/compiler.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/ctype.h>
16 #include <linux/lockdep.h>
17 #include <linux/extable.h>
18 #include <linux/pfn.h>
19 #include <linux/uaccess.h>
20 #include <linux/kernel.h>
21 #include <asm/diag.h>
22 #include <asm/ebcdic.h>
23 #include <asm/ipl.h>
24 #include <asm/lowcore.h>
25 #include <asm/processor.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/sysinfo.h>
29 #include <asm/cpcmd.h>
30 #include <asm/sclp.h>
31 #include <asm/facility.h>
32 #include "entry.h"
35 * Create a Kernel NSS if the SAVESYS= parameter is defined
37 #define DEFSYS_CMD_SIZE 128
38 #define SAVESYS_CMD_SIZE 32
40 char kernel_nss_name[NSS_NAME_SIZE + 1];
42 static void __init setup_boot_command_line(void);
45 * Get the TOD clock running.
47 static void __init reset_tod_clock(void)
49 u64 time;
51 if (store_tod_clock(&time) == 0)
52 return;
53 /* TOD clock not running. Set the clock to Unix Epoch. */
54 if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
55 disabled_wait(0);
57 memset(tod_clock_base, 0, 16);
58 *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
59 S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
62 #ifdef CONFIG_SHARED_KERNEL
63 int __init savesys_ipl_nss(char *cmd, const int cmdlen);
65 asm(
66 " .section .init.text,\"ax\",@progbits\n"
67 " .align 4\n"
68 " .type savesys_ipl_nss, @function\n"
69 "savesys_ipl_nss:\n"
70 " stmg 6,15,48(15)\n"
71 " lgr 14,3\n"
72 " sam31\n"
73 " diag 2,14,0x8\n"
74 " sam64\n"
75 " lgr 2,14\n"
76 " lmg 6,15,48(15)\n"
77 " br 14\n"
78 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
79 " .previous\n");
81 static __initdata char upper_command_line[COMMAND_LINE_SIZE];
83 static noinline __init void create_kernel_nss(void)
85 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
86 #ifdef CONFIG_BLK_DEV_INITRD
87 unsigned int sinitrd_pfn, einitrd_pfn;
88 #endif
89 int response;
90 int hlen;
91 size_t len;
92 char *savesys_ptr;
93 char defsys_cmd[DEFSYS_CMD_SIZE];
94 char savesys_cmd[SAVESYS_CMD_SIZE];
96 /* Do nothing if we are not running under VM */
97 if (!MACHINE_IS_VM)
98 return;
100 /* Convert COMMAND_LINE to upper case */
101 for (i = 0; i < strlen(boot_command_line); i++)
102 upper_command_line[i] = toupper(boot_command_line[i]);
104 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
106 if (!savesys_ptr)
107 return;
109 savesys_ptr += 8; /* Point to the beginning of the NSS name */
110 for (i = 0; i < NSS_NAME_SIZE; i++) {
111 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
112 break;
113 kernel_nss_name[i] = savesys_ptr[i];
116 stext_pfn = PFN_DOWN(__pa(&_stext));
117 eshared_pfn = PFN_DOWN(__pa(&_eshared));
118 end_pfn = PFN_UP(__pa(&_end));
119 min_size = end_pfn << 2;
121 hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
122 "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
123 kernel_nss_name, stext_pfn - 1, stext_pfn,
124 eshared_pfn - 1, eshared_pfn, end_pfn);
126 #ifdef CONFIG_BLK_DEV_INITRD
127 if (INITRD_START && INITRD_SIZE) {
128 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
129 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
130 min_size = einitrd_pfn << 2;
131 hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
132 " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
134 #endif
136 snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
137 " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
138 defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
139 snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
140 kernel_nss_name, kernel_nss_name);
141 savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
143 __cpcmd(defsys_cmd, NULL, 0, &response);
145 if (response != 0) {
146 pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
147 response);
148 kernel_nss_name[0] = '\0';
149 return;
152 len = strlen(savesys_cmd);
153 ASCEBC(savesys_cmd, len);
154 response = savesys_ipl_nss(savesys_cmd, len);
156 /* On success: response is equal to the command size,
157 * max SAVESYS_CMD_SIZE
158 * On error: response contains the numeric portion of cp error message.
159 * for SAVESYS it will be >= 263
160 * for missing privilege class, it will be 1
162 if (response > SAVESYS_CMD_SIZE || response == 1) {
163 pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
164 response);
165 kernel_nss_name[0] = '\0';
166 return;
169 /* re-initialize cputime accounting. */
170 get_tod_clock_ext(tod_clock_base);
171 S390_lowcore.last_update_clock = *(__u64 *) &tod_clock_base[1];
172 S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
173 S390_lowcore.user_timer = 0;
174 S390_lowcore.system_timer = 0;
175 asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
177 /* re-setup boot command line with new ipl vm parms */
178 ipl_update_parameters();
179 setup_boot_command_line();
181 ipl_flags = IPL_NSS_VALID;
184 #else /* CONFIG_SHARED_KERNEL */
186 static inline void create_kernel_nss(void) { }
188 #endif /* CONFIG_SHARED_KERNEL */
191 * Clear bss memory
193 static noinline __init void clear_bss_section(void)
195 memset(__bss_start, 0, __bss_stop - __bss_start);
199 * Initialize storage key for kernel pages
201 static noinline __init void init_kernel_storage_key(void)
203 #if PAGE_DEFAULT_KEY
204 unsigned long end_pfn, init_pfn;
206 end_pfn = PFN_UP(__pa(&_end));
208 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
209 page_set_storage_key(init_pfn << PAGE_SHIFT,
210 PAGE_DEFAULT_KEY, 0);
211 #endif
214 static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
216 static noinline __init void detect_machine_type(void)
218 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
220 /* Check current-configuration-level */
221 if (stsi(NULL, 0, 0, 0) <= 2) {
222 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
223 return;
225 /* Get virtual-machine cpu information. */
226 if (stsi(vmms, 3, 2, 2) || !vmms->count)
227 return;
229 /* Detect known hypervisors */
230 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
231 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
232 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
233 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
236 /* Remove leading, trailing and double whitespace. */
237 static inline void strim_all(char *str)
239 char *s;
241 s = strim(str);
242 if (s != str)
243 memmove(str, s, strlen(s));
244 while (*str) {
245 if (!isspace(*str++))
246 continue;
247 if (isspace(*str)) {
248 s = skip_spaces(str);
249 memmove(str, s, strlen(s) + 1);
254 static noinline __init void setup_arch_string(void)
256 struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
257 struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
258 char mstr[80], hvstr[17];
260 if (stsi(mach, 1, 1, 1))
261 return;
262 EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
263 EBCASC(mach->type, sizeof(mach->type));
264 EBCASC(mach->model, sizeof(mach->model));
265 EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
266 sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
267 mach->manufacturer, mach->type,
268 mach->model, mach->model_capacity);
269 strim_all(mstr);
270 if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
271 EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
272 sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
273 strim_all(hvstr);
274 } else {
275 sprintf(hvstr, "%s",
276 MACHINE_IS_LPAR ? "LPAR" :
277 MACHINE_IS_VM ? "z/VM" :
278 MACHINE_IS_KVM ? "KVM" : "unknown");
280 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
283 static __init void setup_topology(void)
285 int max_mnest;
287 if (!test_facility(11))
288 return;
289 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
290 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
291 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
292 break;
294 topology_max_mnest = max_mnest;
297 static void early_pgm_check_handler(void)
299 const struct exception_table_entry *fixup;
300 unsigned long cr0, cr0_new;
301 unsigned long addr;
303 addr = S390_lowcore.program_old_psw.addr;
304 fixup = search_exception_tables(addr);
305 if (!fixup)
306 disabled_wait(0);
307 /* Disable low address protection before storing into lowcore. */
308 __ctl_store(cr0, 0, 0);
309 cr0_new = cr0 & ~(1UL << 28);
310 __ctl_load(cr0_new, 0, 0);
311 S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
312 __ctl_load(cr0, 0, 0);
315 static noinline __init void setup_lowcore_early(void)
317 psw_t psw;
319 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
320 if (IS_ENABLED(CONFIG_KASAN))
321 psw.mask |= PSW_MASK_DAT;
322 psw.addr = (unsigned long) s390_base_ext_handler;
323 S390_lowcore.external_new_psw = psw;
324 psw.addr = (unsigned long) s390_base_pgm_handler;
325 S390_lowcore.program_new_psw = psw;
326 s390_base_pgm_handler_fn = early_pgm_check_handler;
327 S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
330 static noinline __init void setup_facility_list(void)
332 stfle(S390_lowcore.stfle_fac_list,
333 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
334 memcpy(S390_lowcore.alt_stfle_fac_list,
335 S390_lowcore.stfle_fac_list,
336 sizeof(S390_lowcore.alt_stfle_fac_list));
337 if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
338 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
341 static __init void detect_diag9c(void)
343 unsigned int cpu_address;
344 int rc;
346 cpu_address = stap();
347 diag_stat_inc(DIAG_STAT_X09C);
348 asm volatile(
349 " diag %2,0,0x9c\n"
350 "0: la %0,0\n"
351 "1:\n"
352 EX_TABLE(0b,1b)
353 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
354 if (!rc)
355 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
358 static __init void detect_diag44(void)
360 int rc;
362 diag_stat_inc(DIAG_STAT_X044);
363 asm volatile(
364 " diag 0,0,0x44\n"
365 "0: la %0,0\n"
366 "1:\n"
367 EX_TABLE(0b,1b)
368 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
369 if (!rc)
370 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
373 static __init void detect_machine_facilities(void)
375 if (test_facility(8)) {
376 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
377 __ctl_set_bit(0, 23);
379 if (test_facility(78))
380 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
381 if (test_facility(3))
382 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
383 if (test_facility(40))
384 S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
385 if (test_facility(50) && test_facility(73)) {
386 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
387 __ctl_set_bit(0, 55);
389 if (test_facility(51))
390 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
391 if (test_facility(129)) {
392 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
393 __ctl_set_bit(0, 17);
395 if (test_facility(130)) {
396 S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
397 __ctl_set_bit(0, 20);
399 if (test_facility(133))
400 S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
401 if (test_facility(139) && (tod_clock_base[1] & 0x80)) {
402 /* Enabled signed clock comparator comparisons */
403 S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
404 clock_comparator_max = -1ULL >> 1;
405 __ctl_set_bit(0, 53);
409 static inline void save_vector_registers(void)
411 #ifdef CONFIG_CRASH_DUMP
412 if (test_facility(129))
413 save_vx_regs(boot_cpu_vector_save_area);
414 #endif
417 static int __init disable_vector_extension(char *str)
419 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
420 __ctl_clear_bit(0, 17);
421 return 0;
423 early_param("novx", disable_vector_extension);
425 static int __init noexec_setup(char *str)
427 bool enabled;
428 int rc;
430 rc = kstrtobool(str, &enabled);
431 if (!rc && !enabled) {
432 /* Disable no-execute support */
433 S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
434 __ctl_clear_bit(0, 20);
436 return rc;
438 early_param("noexec", noexec_setup);
440 static int __init cad_setup(char *str)
442 bool enabled;
443 int rc;
445 rc = kstrtobool(str, &enabled);
446 if (!rc && enabled && test_facility(128))
447 /* Enable problem state CAD. */
448 __ctl_set_bit(2, 3);
449 return rc;
451 early_param("cad", cad_setup);
453 static __init void memmove_early(void *dst, const void *src, size_t n)
455 unsigned long addr;
456 long incr;
457 psw_t old;
459 if (!n)
460 return;
461 incr = 1;
462 if (dst > src) {
463 incr = -incr;
464 dst += n - 1;
465 src += n - 1;
467 old = S390_lowcore.program_new_psw;
468 S390_lowcore.program_new_psw.mask = __extract_psw();
469 asm volatile(
470 " larl %[addr],1f\n"
471 " stg %[addr],%[psw_pgm_addr]\n"
472 "0: mvc 0(1,%[dst]),0(%[src])\n"
473 " agr %[dst],%[incr]\n"
474 " agr %[src],%[incr]\n"
475 " brctg %[n],0b\n"
476 "1:\n"
477 : [addr] "=&d" (addr),
478 [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
479 [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
480 : [incr] "d" (incr)
481 : "cc", "memory");
482 S390_lowcore.program_new_psw = old;
485 static __init noinline void ipl_save_parameters(void)
487 void *src, *dst;
489 src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
490 dst = (void *) IPL_PARMBLOCK_ORIGIN;
491 memmove_early(dst, src, PAGE_SIZE);
492 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
495 static __init noinline void rescue_initrd(void)
497 #ifdef CONFIG_BLK_DEV_INITRD
498 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
500 * Just like in case of IPL from VM reader we make sure there is a
501 * gap of 4MB between end of kernel and start of initrd.
502 * That way we can also be sure that saving an NSS will succeed,
503 * which however only requires different segments.
505 if (!INITRD_START || !INITRD_SIZE)
506 return;
507 if (INITRD_START >= min_initrd_addr)
508 return;
509 memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
510 INITRD_START = min_initrd_addr;
511 #endif
514 /* Set up boot command line */
515 static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
517 char *parm, *delim;
518 size_t rc, len;
520 len = strlen(boot_command_line);
522 delim = boot_command_line + len; /* '\0' character position */
523 parm = boot_command_line + len + 1; /* append right after '\0' */
525 rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
526 if (rc) {
527 if (*parm == '=')
528 memmove(boot_command_line, parm + 1, rc);
529 else
530 *delim = ' '; /* replace '\0' with space */
534 static inline int has_ebcdic_char(const char *str)
536 int i;
538 for (i = 0; str[i]; i++)
539 if (str[i] & 0x80)
540 return 1;
541 return 0;
544 static void __init setup_boot_command_line(void)
546 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
547 /* convert arch command line to ascii if necessary */
548 if (has_ebcdic_char(COMMAND_LINE))
549 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
550 /* copy arch command line */
551 strlcpy(boot_command_line, strstrip(COMMAND_LINE),
552 ARCH_COMMAND_LINE_SIZE);
554 /* append IPL PARM data to the boot command line */
555 if (MACHINE_IS_VM)
556 append_to_cmdline(append_ipl_vmparm);
558 append_to_cmdline(append_ipl_scpdata);
562 * Save ipl parameters, clear bss memory, initialize storage keys
563 * and create a kernel NSS at startup if the SAVESYS= parm is defined
565 void __init startup_init(void)
567 reset_tod_clock();
568 ipl_save_parameters();
569 rescue_initrd();
570 clear_bss_section();
571 ipl_verify_parameters();
572 time_early_init();
573 init_kernel_storage_key();
574 lockdep_off();
575 setup_lowcore_early();
576 setup_facility_list();
577 detect_machine_type();
578 setup_arch_string();
579 ipl_update_parameters();
580 setup_boot_command_line();
581 create_kernel_nss();
582 detect_diag9c();
583 detect_diag44();
584 detect_machine_facilities();
585 save_vector_registers();
586 setup_topology();
587 sclp_early_detect();
588 lockdep_on();