1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
4 #include <asm/boot_data.h>
5 #include <asm/sections.h>
6 #include <asm/cpu_mf.h>
12 #include "compressed/decompressor.h"
15 extern char __boot_data_start
[], __boot_data_end
[];
16 extern char __boot_data_preserved_start
[], __boot_data_preserved_end
[];
17 unsigned long __bootdata_preserved(__kaslr_offset
);
18 unsigned long __bootdata(ident_map_size
);
21 * Some code and data needs to stay below 2 GB, even when the kernel would be
22 * relocated above 2 GB, because it has to use 31 bit addresses.
23 * Such code and data is part of the .dma section, and its location is passed
24 * over to the decompressed / relocated kernel via the .boot.preserved.data
27 extern char _sdma
[], _edma
[];
28 extern char _stext_dma
[], _etext_dma
[];
29 extern struct exception_table_entry _start_dma_ex_table
[];
30 extern struct exception_table_entry _stop_dma_ex_table
[];
31 unsigned long __bootdata_preserved(__sdma
) = __pa(&_sdma
);
32 unsigned long __bootdata_preserved(__edma
) = __pa(&_edma
);
33 unsigned long __bootdata_preserved(__stext_dma
) = __pa(&_stext_dma
);
34 unsigned long __bootdata_preserved(__etext_dma
) = __pa(&_etext_dma
);
35 struct exception_table_entry
*
36 __bootdata_preserved(__start_dma_ex_table
) = _start_dma_ex_table
;
37 struct exception_table_entry
*
38 __bootdata_preserved(__stop_dma_ex_table
) = _stop_dma_ex_table
;
40 int _diag210_dma(struct diag210
*addr
);
41 int _diag26c_dma(void *req
, void *resp
, enum diag26c_sc subcode
);
42 int _diag14_dma(unsigned long rx
, unsigned long ry1
, unsigned long subcode
);
43 void _diag0c_dma(struct hypfs_diag0c_entry
*entry
);
44 void _diag308_reset_dma(void);
45 struct diag_ops
__bootdata_preserved(diag_dma_ops
) = {
46 .diag210
= _diag210_dma
,
47 .diag26c
= _diag26c_dma
,
48 .diag14
= _diag14_dma
,
49 .diag0c
= _diag0c_dma
,
50 .diag308_reset
= _diag308_reset_dma
52 static struct diag210 _diag210_tmp_dma
__section(".dma.data");
53 struct diag210
*__bootdata_preserved(__diag210_tmp_dma
) = &_diag210_tmp_dma
;
57 sclp_early_printk("\n\n");
59 sclp_early_printk("\n\n -- System halted");
64 static void setup_lpp(void)
66 S390_lowcore
.current_pid
= 0;
67 S390_lowcore
.lpp
= LPP_MAGIC
;
68 if (test_facility(40))
69 lpp(&S390_lowcore
.lpp
);
72 #ifdef CONFIG_KERNEL_UNCOMPRESSED
73 unsigned long mem_safe_offset(void)
75 return vmlinux
.default_lma
+ vmlinux
.image_size
+ vmlinux
.bss_size
;
79 static void rescue_initrd(unsigned long addr
)
81 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD
))
83 if (!INITRD_START
|| !INITRD_SIZE
)
85 if (addr
<= INITRD_START
)
87 memmove((void *)addr
, (void *)INITRD_START
, INITRD_SIZE
);
91 static void copy_bootdata(void)
93 if (__boot_data_end
- __boot_data_start
!= vmlinux
.bootdata_size
)
94 error(".boot.data section size mismatch");
95 memcpy((void *)vmlinux
.bootdata_off
, __boot_data_start
, vmlinux
.bootdata_size
);
96 if (__boot_data_preserved_end
- __boot_data_preserved_start
!= vmlinux
.bootdata_preserved_size
)
97 error(".boot.preserved.data section size mismatch");
98 memcpy((void *)vmlinux
.bootdata_preserved_off
, __boot_data_preserved_start
, vmlinux
.bootdata_preserved_size
);
101 static void handle_relocs(unsigned long offset
)
103 Elf64_Rela
*rela_start
, *rela_end
, *rela
;
104 int r_type
, r_sym
, rc
;
108 rela_start
= (Elf64_Rela
*) vmlinux
.rela_dyn_start
;
109 rela_end
= (Elf64_Rela
*) vmlinux
.rela_dyn_end
;
110 dynsym
= (Elf64_Sym
*) vmlinux
.dynsym_start
;
111 for (rela
= rela_start
; rela
< rela_end
; rela
++) {
112 loc
= rela
->r_offset
+ offset
;
113 val
= rela
->r_addend
;
114 r_sym
= ELF64_R_SYM(rela
->r_info
);
116 if (dynsym
[r_sym
].st_shndx
!= SHN_UNDEF
)
117 val
+= dynsym
[r_sym
].st_value
+ offset
;
120 * 0 == undefined symbol table index (STN_UNDEF),
121 * used for R_390_RELATIVE, only add KASLR offset
125 r_type
= ELF64_R_TYPE(rela
->r_info
);
126 rc
= arch_kexec_do_relocs(r_type
, (void *) loc
, val
, 0);
128 error("Unknown relocation type");
133 * Merge information from several sources into a single ident_map_size value.
134 * "ident_map_size" represents the upper limit of physical memory we may ever
135 * reach. It might not be all online memory, but also include standby (offline)
136 * memory. "ident_map_size" could be lower then actual standby or even online
137 * memory present, due to limiting factors. We should never go above this limit.
138 * It is the size of our identity mapping.
140 * Consider the following factors:
141 * 1. max_physmem_end - end of physical memory online or standby.
142 * Always <= end of the last online memory block (get_mem_detect_end()).
143 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
144 * kernel is able to support.
145 * 3. "mem=" kernel command line option which limits physical memory usage.
146 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
148 * 5. "hsa" size which is a memory limit when the kernel is executed during
151 static void setup_ident_map_size(unsigned long max_physmem_end
)
153 unsigned long hsa_size
;
155 ident_map_size
= max_physmem_end
;
157 ident_map_size
= min(ident_map_size
, memory_limit
);
158 ident_map_size
= min(ident_map_size
, 1UL << MAX_PHYSMEM_BITS
);
160 #ifdef CONFIG_CRASH_DUMP
163 ident_map_size
= min(ident_map_size
, OLDMEM_SIZE
);
164 } else if (ipl_block_valid
&& is_ipl_block_dump()) {
166 if (!sclp_early_get_hsa_size(&hsa_size
) && hsa_size
)
167 ident_map_size
= min(ident_map_size
, hsa_size
);
173 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
175 static void clear_bss_section(void)
177 memset((void *)vmlinux
.default_lma
+ vmlinux
.image_size
, 0, vmlinux
.bss_size
);
181 * Set vmalloc area size to an 8th of (potential) physical memory
182 * size, unless size has been set by kernel command line parameter.
184 static void setup_vmalloc_size(void)
188 if (vmalloc_size_set
)
190 size
= round_up(ident_map_size
/ 8, _SEGMENT_SIZE
);
191 vmalloc_size
= max(size
, vmalloc_size
);
194 void startup_kernel(void)
196 unsigned long random_lma
;
197 unsigned long safe_addr
;
201 store_ipl_parmblock();
202 safe_addr
= mem_safe_offset();
203 safe_addr
= read_ipl_report(safe_addr
);
205 rescue_initrd(safe_addr
);
206 sclp_early_read_info();
207 setup_boot_command_line();
208 parse_boot_command_line();
209 setup_ident_map_size(detect_memory());
210 setup_vmalloc_size();
212 random_lma
= __kaslr_offset
= 0;
213 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE
) && kaslr_enabled
) {
214 random_lma
= get_random_base(safe_addr
);
216 __kaslr_offset
= random_lma
- vmlinux
.default_lma
;
217 img
= (void *)vmlinux
.default_lma
;
218 vmlinux
.default_lma
+= __kaslr_offset
;
219 vmlinux
.entry
+= __kaslr_offset
;
220 vmlinux
.bootdata_off
+= __kaslr_offset
;
221 vmlinux
.bootdata_preserved_off
+= __kaslr_offset
;
222 vmlinux
.rela_dyn_start
+= __kaslr_offset
;
223 vmlinux
.rela_dyn_end
+= __kaslr_offset
;
224 vmlinux
.dynsym_start
+= __kaslr_offset
;
228 if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED
)) {
229 img
= decompress_kernel();
230 memmove((void *)vmlinux
.default_lma
, img
, vmlinux
.image_size
);
231 } else if (__kaslr_offset
)
232 memcpy((void *)vmlinux
.default_lma
, img
, vmlinux
.image_size
);
236 if (IS_ENABLED(CONFIG_RELOCATABLE
))
237 handle_relocs(__kaslr_offset
);
239 if (__kaslr_offset
) {
241 * Save KASLR offset for early dumps, before vmcore_info is set.
242 * Mark as uneven to distinguish from real vmcore_info pointer.
244 S390_lowcore
.vmcore_info
= __kaslr_offset
| 0x1UL
;
245 /* Clear non-relocated kernel */
246 if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED
))
247 memset(img
, 0, vmlinux
.image_size
);