1 // SPDX-License-Identifier: GPL-2.0-only
3 // Copyright (C) 2019 Jason Yan <yanaijie@huawei.com>
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
10 #include <linux/swap.h>
11 #include <linux/stddef.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/libfdt.h>
16 #include <linux/crash_core.h>
17 #include <asm/cacheflush.h>
19 #include <asm/kdump.h>
20 #include <mm/mmu_decl.h>
21 #include <generated/compile.h>
22 #include <generated/utsrelease.h>
25 unsigned long pa_start
;
27 unsigned long kernel_size
;
28 unsigned long dtb_start
;
29 unsigned long dtb_end
;
30 unsigned long initrd_start
;
31 unsigned long initrd_end
;
32 unsigned long crash_start
;
33 unsigned long crash_end
;
35 int reserved_mem_addr_cells
;
36 int reserved_mem_size_cells
;
39 /* Simplified build-specific string for starting entropy. */
40 static const char build_str
[] = UTS_RELEASE
" (" LINUX_COMPILE_BY
"@"
41 LINUX_COMPILE_HOST
") (" LINUX_COMPILER
") " UTS_VERSION
;
43 struct regions __initdata regions
;
45 static __init
void kaslr_get_cmdline(void *fdt
)
47 int node
= fdt_path_offset(fdt
, "/chosen");
49 early_init_dt_scan_chosen(node
, "chosen", 1, boot_command_line
);
52 static unsigned long __init
rotate_xor(unsigned long hash
, const void *area
,
56 const unsigned long *ptr
= area
;
58 for (i
= 0; i
< size
/ sizeof(hash
); i
++) {
59 /* Rotate by odd number of bits and XOR. */
60 hash
= (hash
<< ((sizeof(hash
) * 8) - 7)) | (hash
>> 7);
67 /* Attempt to create a simple starting entropy. This can make it defferent for
68 * every build but it is still not enough. Stronger entropy should
69 * be added to make it change for every boot.
71 static unsigned long __init
get_boot_seed(void *fdt
)
73 unsigned long hash
= 0;
75 hash
= rotate_xor(hash
, build_str
, sizeof(build_str
));
76 hash
= rotate_xor(hash
, fdt
, fdt_totalsize(fdt
));
81 static __init u64
get_kaslr_seed(void *fdt
)
87 node
= fdt_path_offset(fdt
, "/chosen");
91 prop
= fdt_getprop_w(fdt
, node
, "kaslr-seed", &len
);
92 if (!prop
|| len
!= sizeof(u64
))
95 ret
= fdt64_to_cpu(*prop
);
100 static __init
bool regions_overlap(u32 s1
, u32 e1
, u32 s2
, u32 e2
)
102 return e1
>= s2
&& e2
>= s1
;
105 static __init
bool overlaps_reserved_region(const void *fdt
, u32 start
,
111 /* check for overlap with /memreserve/ entries */
112 for (i
= 0; i
< fdt_num_mem_rsv(fdt
); i
++) {
113 if (fdt_get_mem_rsv(fdt
, i
, &base
, &size
) < 0)
115 if (regions_overlap(start
, end
, base
, base
+ size
))
119 if (regions
.reserved_mem
< 0)
122 /* check for overlap with static reservations in /reserved-memory */
123 for (subnode
= fdt_first_subnode(fdt
, regions
.reserved_mem
);
125 subnode
= fdt_next_subnode(fdt
, subnode
)) {
130 reg
= fdt_getprop(fdt
, subnode
, "reg", &len
);
131 while (len
>= (regions
.reserved_mem_addr_cells
+
132 regions
.reserved_mem_size_cells
)) {
133 base
= fdt32_to_cpu(reg
[0]);
134 if (regions
.reserved_mem_addr_cells
== 2)
135 base
= (base
<< 32) | fdt32_to_cpu(reg
[1]);
137 reg
+= regions
.reserved_mem_addr_cells
;
138 len
-= 4 * regions
.reserved_mem_addr_cells
;
140 size
= fdt32_to_cpu(reg
[0]);
141 if (regions
.reserved_mem_size_cells
== 2)
142 size
= (size
<< 32) | fdt32_to_cpu(reg
[1]);
144 reg
+= regions
.reserved_mem_size_cells
;
145 len
-= 4 * regions
.reserved_mem_size_cells
;
147 if (base
>= regions
.pa_end
)
150 rsv_end
= min(base
+ size
, (u64
)U32_MAX
);
152 if (regions_overlap(start
, end
, base
, rsv_end
))
159 static __init
bool overlaps_region(const void *fdt
, u32 start
,
162 if (regions_overlap(start
, end
, __pa(_stext
), __pa(_end
)))
165 if (regions_overlap(start
, end
, regions
.dtb_start
,
169 if (regions_overlap(start
, end
, regions
.initrd_start
,
173 if (regions_overlap(start
, end
, regions
.crash_start
,
177 return overlaps_reserved_region(fdt
, start
, end
);
180 static void __init
get_crash_kernel(void *fdt
, unsigned long size
)
182 #ifdef CONFIG_CRASH_CORE
183 unsigned long long crash_size
, crash_base
;
186 ret
= parse_crashkernel(boot_command_line
, size
, &crash_size
,
188 if (ret
!= 0 || crash_size
== 0)
191 crash_base
= KDUMP_KERNELBASE
;
193 regions
.crash_start
= (unsigned long)crash_base
;
194 regions
.crash_end
= (unsigned long)(crash_base
+ crash_size
);
196 pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base
, crash_size
);
200 static void __init
get_initrd_range(void *fdt
)
206 node
= fdt_path_offset(fdt
, "/chosen");
210 prop
= fdt_getprop(fdt
, node
, "linux,initrd-start", &len
);
213 start
= of_read_number(prop
, len
/ 4);
215 prop
= fdt_getprop(fdt
, node
, "linux,initrd-end", &len
);
218 end
= of_read_number(prop
, len
/ 4);
220 regions
.initrd_start
= (unsigned long)start
;
221 regions
.initrd_end
= (unsigned long)end
;
223 pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start
, end
);
226 static __init
unsigned long get_usable_address(const void *fdt
,
228 unsigned long offset
)
231 unsigned long pa_end
;
233 for (pa
= offset
; (long)pa
> (long)start
; pa
-= SZ_16K
) {
234 pa_end
= pa
+ regions
.kernel_size
;
235 if (overlaps_region(fdt
, pa
, pa_end
))
243 static __init
void get_cell_sizes(const void *fdt
, int node
, int *addr_cells
,
250 * Retrieve the #address-cells and #size-cells properties
251 * from the 'node', or use the default if not provided.
253 *addr_cells
= *size_cells
= 1;
255 prop
= fdt_getprop(fdt
, node
, "#address-cells", &len
);
257 *addr_cells
= fdt32_to_cpu(*prop
);
258 prop
= fdt_getprop(fdt
, node
, "#size-cells", &len
);
260 *size_cells
= fdt32_to_cpu(*prop
);
263 static unsigned long __init
kaslr_legal_offset(void *dt_ptr
, unsigned long index
,
264 unsigned long offset
)
266 unsigned long koffset
= 0;
269 while ((long)index
>= 0) {
270 offset
= memstart_addr
+ index
* SZ_64M
+ offset
;
271 start
= memstart_addr
+ index
* SZ_64M
;
272 koffset
= get_usable_address(dt_ptr
, start
, offset
);
279 koffset
-= memstart_addr
;
284 static inline __init
bool kaslr_disabled(void)
286 return strstr(boot_command_line
, "nokaslr") != NULL
;
289 static unsigned long __init
kaslr_choose_location(void *dt_ptr
, phys_addr_t size
,
290 unsigned long kernel_sz
)
292 unsigned long offset
, random
;
293 unsigned long ram
, linear_sz
;
297 kaslr_get_cmdline(dt_ptr
);
298 if (kaslr_disabled())
301 random
= get_boot_seed(dt_ptr
);
303 seed
= get_tb() << 32;
305 random
= rotate_xor(random
, &seed
, sizeof(seed
));
308 * Retrieve (and wipe) the seed from the FDT
310 seed
= get_kaslr_seed(dt_ptr
);
312 random
= rotate_xor(random
, &seed
, sizeof(seed
));
314 pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
316 ram
= min_t(phys_addr_t
, __max_low_memory
, size
);
317 ram
= map_mem_in_cams(ram
, CONFIG_LOWMEM_CAM_NUM
, true);
318 linear_sz
= min_t(unsigned long, ram
, SZ_512M
);
320 /* If the linear size is smaller than 64M, do not randmize */
321 if (linear_sz
< SZ_64M
)
324 /* check for a reserved-memory node and record its cell sizes */
325 regions
.reserved_mem
= fdt_path_offset(dt_ptr
, "/reserved-memory");
326 if (regions
.reserved_mem
>= 0)
327 get_cell_sizes(dt_ptr
, regions
.reserved_mem
,
328 ®ions
.reserved_mem_addr_cells
,
329 ®ions
.reserved_mem_size_cells
);
331 regions
.pa_start
= memstart_addr
;
332 regions
.pa_end
= memstart_addr
+ linear_sz
;
333 regions
.dtb_start
= __pa(dt_ptr
);
334 regions
.dtb_end
= __pa(dt_ptr
) + fdt_totalsize(dt_ptr
);
335 regions
.kernel_size
= kernel_sz
;
337 get_initrd_range(dt_ptr
);
338 get_crash_kernel(dt_ptr
, ram
);
341 * Decide which 64M we want to start
342 * Only use the low 8 bits of the random seed
344 index
= random
& 0xFF;
345 index
%= linear_sz
/ SZ_64M
;
347 /* Decide offset inside 64M */
348 offset
= random
% (SZ_64M
- kernel_sz
);
349 offset
= round_down(offset
, SZ_16K
);
351 return kaslr_legal_offset(dt_ptr
, index
, offset
);
355 * To see if we need to relocate the kernel to a random offset
356 * void *dt_ptr - address of the device tree
357 * phys_addr_t size - size of the first memory block
359 notrace
void __init
kaslr_early_init(void *dt_ptr
, phys_addr_t size
)
361 unsigned long tlb_virt
;
362 phys_addr_t tlb_phys
;
363 unsigned long offset
;
364 unsigned long kernel_sz
;
366 kernel_sz
= (unsigned long)_end
- (unsigned long)_stext
;
368 offset
= kaslr_choose_location(dt_ptr
, size
, kernel_sz
);
372 kernstart_virt_addr
+= offset
;
373 kernstart_addr
+= offset
;
377 if (offset
>= SZ_64M
) {
378 tlb_virt
= round_down(kernstart_virt_addr
, SZ_64M
);
379 tlb_phys
= round_down(kernstart_addr
, SZ_64M
);
381 /* Create kernel map to relocate in */
382 create_kaslr_tlb_entry(1, tlb_virt
, tlb_phys
);
385 /* Copy the kernel to it's new location and run */
386 memcpy((void *)kernstart_virt_addr
, (void *)_stext
, kernel_sz
);
387 flush_icache_range(kernstart_virt_addr
, kernstart_virt_addr
+ kernel_sz
);
389 reloc_kernel_entry(dt_ptr
, kernstart_virt_addr
);
392 void __init
kaslr_late_init(void)
394 /* If randomized, clear the original kernel */
395 if (kernstart_virt_addr
!= KERNELBASE
) {
396 unsigned long kernel_sz
;
398 kernel_sz
= (unsigned long)_end
- kernstart_virt_addr
;
399 memzero_explicit((void *)KERNELBASE
, kernel_sz
);