1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 #include <linux/delay.h>
7 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/seq_file.h>
11 #include <linux/string.h>
12 #include <linux/utsname.h>
13 #include <linux/sched.h>
14 #include <linux/sched/task.h>
15 #include <linux/kmsg_dump.h>
16 #include <linux/suspend.h>
18 #include <asm/processor.h>
19 #include <asm/sections.h>
20 #include <asm/setup.h>
21 #include <as-layout.h>
25 #include <kern_util.h>
29 #define DEFAULT_COMMAND_LINE "root=98:0"
31 /* Changed in add_arg and setup_arch, which run before SMP is started */
32 static char __initdata command_line
[COMMAND_LINE_SIZE
] = { 0 };
34 static void __init
add_arg(char *arg
)
36 if (strlen(command_line
) + strlen(arg
) + 1 > COMMAND_LINE_SIZE
) {
37 os_warn("add_arg: Too many command line arguments!\n");
40 if (strlen(command_line
) > 0)
41 strcat(command_line
, " ");
42 strcat(command_line
, arg
);
46 * These fields are initialized at boot time and not changed.
47 * XXX This structure is used only in the non-SMP case. Maybe this
48 * should be moved to smp.c.
50 struct cpuinfo_um boot_cpu_data
= {
52 .ipi_pipe
= { -1, -1 }
55 union thread_union cpu0_irqstack
56 __section(".data..init_irqstack") =
57 { .thread_info
= INIT_THREAD_INFO(init_task
) };
59 /* Changed in setup_arch, which is called in early boot */
60 static char host_info
[(__NEW_UTS_LEN
+ 1) * 5];
62 static int show_cpuinfo(struct seq_file
*m
, void *v
)
66 seq_printf(m
, "processor\t: %d\n", index
);
67 seq_printf(m
, "vendor_id\t: User Mode Linux\n");
68 seq_printf(m
, "model name\t: UML\n");
69 seq_printf(m
, "mode\t\t: skas\n");
70 seq_printf(m
, "host\t\t: %s\n", host_info
);
71 seq_printf(m
, "bogomips\t: %lu.%02lu\n\n",
72 loops_per_jiffy
/(500000/HZ
),
73 (loops_per_jiffy
/(5000/HZ
)) % 100);
78 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
80 return *pos
< NR_CPUS
? cpu_data
+ *pos
: NULL
;
83 static void *c_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
86 return c_start(m
, pos
);
89 static void c_stop(struct seq_file
*m
, void *v
)
93 const struct seq_operations cpuinfo_op
= {
100 /* Set in linux_main */
101 unsigned long uml_physmem
;
102 EXPORT_SYMBOL(uml_physmem
);
104 unsigned long uml_reserved
; /* Also modified in mem_init */
105 unsigned long start_vm
;
106 unsigned long end_vm
;
108 /* Set in uml_ncpus_setup */
111 /* Set in early boot */
112 static int have_root __initdata
= 0;
114 /* Set in uml_mem_setup and modified in linux_main */
115 long long physmem_size
= 32 * 1024 * 1024;
116 EXPORT_SYMBOL(physmem_size
);
118 static const char *usage_string
=
119 "User Mode Linux v%s\n"
120 " available at http://user-mode-linux.sourceforge.net/\n\n";
122 static int __init
uml_version_setup(char *line
, int *add
)
124 /* Explicitly use printf() to show version in stdout */
125 printf("%s\n", init_utsname()->release
);
131 __uml_setup("--version", uml_version_setup
,
133 " Prints the version number of the kernel.\n\n"
136 static int __init
uml_root_setup(char *line
, int *add
)
142 __uml_setup("root=", uml_root_setup
,
143 "root=<file containing the root fs>\n"
144 " This is actually used by the generic kernel in exactly the same\n"
145 " way as in any other kernel. If you configure a number of block\n"
146 " devices and want to boot off something other than ubd0, you \n"
147 " would use something like:\n"
148 " root=/dev/ubd5\n\n"
151 static int __init
no_skas_debug_setup(char *line
, int *add
)
153 os_warn("'debug' is not necessary to gdb UML in skas mode - run\n");
154 os_warn("'gdb linux'\n");
159 __uml_setup("debug", no_skas_debug_setup
,
161 " this flag is not needed to run gdb on UML in skas mode\n\n"
164 static int __init
Usage(char *line
, int *add
)
168 printf(usage_string
, init_utsname()->release
);
169 p
= &__uml_help_start
;
170 /* Explicitly use printf() to show help in stdout */
171 while (p
< &__uml_help_end
) {
179 __uml_setup("--help", Usage
,
181 " Prints this message.\n\n"
184 static void __init
uml_checksetup(char *line
, int *add
)
188 p
= &__uml_setup_start
;
189 while (p
< &__uml_setup_end
) {
193 if (!strncmp(line
, p
->str
, n
) && p
->setup_func(line
+ n
, add
))
199 static void __init
uml_postsetup(void)
203 p
= &__uml_postsetup_start
;
204 while (p
< &__uml_postsetup_end
) {
211 static int panic_exit(struct notifier_block
*self
, unsigned long unused1
,
214 kmsg_dump(KMSG_DUMP_PANIC
);
222 static struct notifier_block panic_exit_notifier
= {
223 .notifier_call
= panic_exit
,
228 void uml_finishsetup(void)
230 atomic_notifier_chain_register(&panic_notifier_list
,
231 &panic_exit_notifier
);
235 new_thread_handler();
238 /* Set during early boot */
239 unsigned long task_size
;
240 EXPORT_SYMBOL(task_size
);
242 unsigned long host_task_size
;
244 unsigned long brk_start
;
245 unsigned long end_iomem
;
246 EXPORT_SYMBOL(end_iomem
);
248 #define MIN_VMALLOC (32 * 1024 * 1024)
250 int __init
linux_main(int argc
, char **argv
)
252 unsigned long avail
, diff
;
253 unsigned long virtmem_size
, max_physmem
;
258 for (i
= 1; i
< argc
; i
++) {
259 if ((i
== 1) && (argv
[i
][0] == ' '))
262 uml_checksetup(argv
[i
], &add
);
267 add_arg(DEFAULT_COMMAND_LINE
);
269 host_task_size
= os_get_top_address();
271 * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps
274 task_size
= host_task_size
& PGDIR_MASK
;
276 /* OS sanity checks that need to happen before the kernel runs */
279 brk_start
= (unsigned long) sbrk(0);
282 * Increase physical memory size for exec-shield users
283 * so they actually get what they asked for. This should
284 * add zero for non-exec shield users
287 diff
= UML_ROUND_UP(brk_start
) - UML_ROUND_UP(&_end
);
288 if (diff
> 1024 * 1024) {
289 os_info("Adding %ld bytes to physical memory to account for "
290 "exec-shield gap\n", diff
);
291 physmem_size
+= UML_ROUND_UP(brk_start
) - UML_ROUND_UP(&_end
);
294 uml_physmem
= (unsigned long) __binary_start
& PAGE_MASK
;
296 /* Reserve up to 4M after the current brk */
297 uml_reserved
= ROUND_4M(brk_start
) + (1 << 22);
299 setup_machinename(init_utsname()->machine
);
302 iomem_size
= (iomem_size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
303 max_physmem
= TASK_SIZE
- uml_physmem
- iomem_size
- MIN_VMALLOC
;
306 * Zones have to begin on a 1 << MAX_ORDER page boundary,
307 * so this makes sure that's true for highmem
309 max_physmem
&= ~((1 << (PAGE_SHIFT
+ MAX_ORDER
)) - 1);
310 if (physmem_size
+ iomem_size
> max_physmem
) {
311 highmem
= physmem_size
+ iomem_size
- max_physmem
;
312 physmem_size
-= highmem
;
315 high_physmem
= uml_physmem
+ physmem_size
;
316 end_iomem
= high_physmem
+ iomem_size
;
317 high_memory
= (void *) end_iomem
;
319 start_vm
= VMALLOC_START
;
321 virtmem_size
= physmem_size
;
322 stack
= (unsigned long) argv
;
323 stack
&= ~(1024 * 1024 - 1);
324 avail
= stack
- start_vm
;
325 if (physmem_size
> avail
)
326 virtmem_size
= avail
;
327 end_vm
= start_vm
+ virtmem_size
;
329 if (virtmem_size
< physmem_size
)
330 os_info("Kernel virtual memory size shrunk to %lu bytes\n",
338 int __init __weak
read_initrd(void)
343 void __init
setup_arch(char **cmdline_p
)
345 stack_protections((unsigned long) &init_thread_info
);
346 setup_physmem(uml_physmem
, uml_reserved
, physmem_size
, highmem
);
347 mem_total_pages(physmem_size
, iomem_size
, highmem
);
351 strlcpy(boot_command_line
, command_line
, COMMAND_LINE_SIZE
);
352 *cmdline_p
= command_line
;
353 setup_hostinfo(host_info
, sizeof host_info
);
356 void __init
check_bugs(void)
362 void apply_alternatives(struct alt_instr
*start
, struct alt_instr
*end
)
366 void *text_poke(void *addr
, const void *opcode
, size_t len
)
369 * In UML, the only reference to this function is in
370 * apply_relocate_add(), which shouldn't ever actually call this
371 * because UML doesn't have live patching.
375 return memcpy(addr
, opcode
, len
);
378 void text_poke_sync(void)
382 void uml_pm_wake(void)
387 #ifdef CONFIG_PM_SLEEP
388 static int um_suspend_valid(suspend_state_t state
)
390 return state
== PM_SUSPEND_MEM
;
393 static int um_suspend_prepare(void)
399 static int um_suspend_enter(suspend_state_t state
)
401 if (WARN_ON(state
!= PM_SUSPEND_MEM
))
405 * This is identical to the idle sleep, but we've just
406 * (during suspend) turned off all interrupt sources
407 * except for the ones we want, so now we can only wake
408 * up on something we actually want to wake up on. All
409 * timing has also been suspended.
415 static void um_suspend_finish(void)
420 const struct platform_suspend_ops um_suspend_ops
= {
421 .valid
= um_suspend_valid
,
422 .prepare
= um_suspend_prepare
,
423 .enter
= um_suspend_enter
,
424 .finish
= um_suspend_finish
,
427 static int init_pm_wake_signal(void)
430 * In external time-travel mode we can't use signals to wake up
431 * since that would mess with the scheduling. We'll have to do
432 * some additional work to support wakeup on virtio devices or
433 * similar, perhaps implementing a fake RTC controller that can
434 * trigger wakeup (and request the appropriate scheduling from
435 * the external scheduler when going to suspend.)
437 if (time_travel_mode
!= TT_MODE_EXTERNAL
)
438 register_pm_wake_signal();
440 suspend_set_ops(&um_suspend_ops
);
445 late_initcall(init_pm_wake_signal
);