1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
4 #include <asm/sections.h>
10 #include "compressed/decompressor.h"
13 extern char __boot_data_start
[], __boot_data_end
[];
14 extern char __boot_data_preserved_start
[], __boot_data_preserved_end
[];
15 unsigned long __bootdata_preserved(__kaslr_offset
);
18 * Some code and data needs to stay below 2 GB, even when the kernel would be
19 * relocated above 2 GB, because it has to use 31 bit addresses.
20 * Such code and data is part of the .dma section, and its location is passed
21 * over to the decompressed / relocated kernel via the .boot.preserved.data
24 extern char _sdma
[], _edma
[];
25 extern char _stext_dma
[], _etext_dma
[];
26 extern struct exception_table_entry _start_dma_ex_table
[];
27 extern struct exception_table_entry _stop_dma_ex_table
[];
28 unsigned long __bootdata_preserved(__sdma
) = __pa(&_sdma
);
29 unsigned long __bootdata_preserved(__edma
) = __pa(&_edma
);
30 unsigned long __bootdata_preserved(__stext_dma
) = __pa(&_stext_dma
);
31 unsigned long __bootdata_preserved(__etext_dma
) = __pa(&_etext_dma
);
32 struct exception_table_entry
*
33 __bootdata_preserved(__start_dma_ex_table
) = _start_dma_ex_table
;
34 struct exception_table_entry
*
35 __bootdata_preserved(__stop_dma_ex_table
) = _stop_dma_ex_table
;
37 int _diag210_dma(struct diag210
*addr
);
38 int _diag26c_dma(void *req
, void *resp
, enum diag26c_sc subcode
);
39 int _diag14_dma(unsigned long rx
, unsigned long ry1
, unsigned long subcode
);
40 void _diag0c_dma(struct hypfs_diag0c_entry
*entry
);
41 void _diag308_reset_dma(void);
42 struct diag_ops
__bootdata_preserved(diag_dma_ops
) = {
43 .diag210
= _diag210_dma
,
44 .diag26c
= _diag26c_dma
,
45 .diag14
= _diag14_dma
,
46 .diag0c
= _diag0c_dma
,
47 .diag308_reset
= _diag308_reset_dma
49 static struct diag210 _diag210_tmp_dma
__section(.dma
.data
);
50 struct diag210
*__bootdata_preserved(__diag210_tmp_dma
) = &_diag210_tmp_dma
;
51 void _swsusp_reset_dma(void);
52 unsigned long __bootdata_preserved(__swsusp_reset_dma
) = __pa(_swsusp_reset_dma
);
56 sclp_early_printk("\n\n");
58 sclp_early_printk("\n\n -- System halted");
63 #ifdef CONFIG_KERNEL_UNCOMPRESSED
64 unsigned long mem_safe_offset(void)
66 return vmlinux
.default_lma
+ vmlinux
.image_size
+ vmlinux
.bss_size
;
70 static void rescue_initrd(unsigned long addr
)
72 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD
))
74 if (!INITRD_START
|| !INITRD_SIZE
)
76 if (addr
<= INITRD_START
)
78 memmove((void *)addr
, (void *)INITRD_START
, INITRD_SIZE
);
82 static void copy_bootdata(void)
84 if (__boot_data_end
- __boot_data_start
!= vmlinux
.bootdata_size
)
85 error(".boot.data section size mismatch");
86 memcpy((void *)vmlinux
.bootdata_off
, __boot_data_start
, vmlinux
.bootdata_size
);
87 if (__boot_data_preserved_end
- __boot_data_preserved_start
!= vmlinux
.bootdata_preserved_size
)
88 error(".boot.preserved.data section size mismatch");
89 memcpy((void *)vmlinux
.bootdata_preserved_off
, __boot_data_preserved_start
, vmlinux
.bootdata_preserved_size
);
92 static void handle_relocs(unsigned long offset
)
94 Elf64_Rela
*rela_start
, *rela_end
, *rela
;
95 int r_type
, r_sym
, rc
;
99 rela_start
= (Elf64_Rela
*) vmlinux
.rela_dyn_start
;
100 rela_end
= (Elf64_Rela
*) vmlinux
.rela_dyn_end
;
101 dynsym
= (Elf64_Sym
*) vmlinux
.dynsym_start
;
102 for (rela
= rela_start
; rela
< rela_end
; rela
++) {
103 loc
= rela
->r_offset
+ offset
;
104 val
= rela
->r_addend
;
105 r_sym
= ELF64_R_SYM(rela
->r_info
);
107 if (dynsym
[r_sym
].st_shndx
!= SHN_UNDEF
)
108 val
+= dynsym
[r_sym
].st_value
+ offset
;
111 * 0 == undefined symbol table index (STN_UNDEF),
112 * used for R_390_RELATIVE, only add KASLR offset
116 r_type
= ELF64_R_TYPE(rela
->r_info
);
117 rc
= arch_kexec_do_relocs(r_type
, (void *) loc
, val
, 0);
119 error("Unknown relocation type");
123 static void clear_bss_section(void)
125 memset((void *)vmlinux
.default_lma
+ vmlinux
.image_size
, 0, vmlinux
.bss_size
);
128 void startup_kernel(void)
130 unsigned long random_lma
;
131 unsigned long safe_addr
;
134 store_ipl_parmblock();
135 safe_addr
= mem_safe_offset();
136 safe_addr
= read_ipl_report(safe_addr
);
138 rescue_initrd(safe_addr
);
139 sclp_early_read_info();
140 setup_boot_command_line();
141 parse_boot_command_line();
145 random_lma
= __kaslr_offset
= 0;
146 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE
) && kaslr_enabled
) {
147 random_lma
= get_random_base(safe_addr
);
149 __kaslr_offset
= random_lma
- vmlinux
.default_lma
;
150 img
= (void *)vmlinux
.default_lma
;
151 vmlinux
.default_lma
+= __kaslr_offset
;
152 vmlinux
.entry
+= __kaslr_offset
;
153 vmlinux
.bootdata_off
+= __kaslr_offset
;
154 vmlinux
.bootdata_preserved_off
+= __kaslr_offset
;
155 vmlinux
.rela_dyn_start
+= __kaslr_offset
;
156 vmlinux
.rela_dyn_end
+= __kaslr_offset
;
157 vmlinux
.dynsym_start
+= __kaslr_offset
;
161 if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED
)) {
162 img
= decompress_kernel();
163 memmove((void *)vmlinux
.default_lma
, img
, vmlinux
.image_size
);
164 } else if (__kaslr_offset
)
165 memcpy((void *)vmlinux
.default_lma
, img
, vmlinux
.image_size
);
169 if (IS_ENABLED(CONFIG_RELOCATABLE
))
170 handle_relocs(__kaslr_offset
);
172 if (__kaslr_offset
) {
174 * Save KASLR offset for early dumps, before vmcore_info is set.
175 * Mark as uneven to distinguish from real vmcore_info pointer.
177 S390_lowcore
.vmcore_info
= __kaslr_offset
| 0x1UL
;
178 /* Clear non-relocated kernel */
179 if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED
))
180 memset(img
, 0, vmlinux
.image_size
);