1 // SPDX-License-Identifier: GPL-2.0
5 * This contains the routines needed to generate a reasonable level of
6 * entropy to choose a randomized kernel base address offset in support
7 * of Kernel Address Space Layout Randomization (KASLR). Additionally
8 * handles walking the physical memory maps (and tracking memory regions
9 * to avoid) in order to select a physical memory location that can
10 * contain the entire properly aligned running kernel image.
15 * isspace() in linux/ctype.h is expected by next_args() to filter
16 * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
17 * since isdigit() is implemented in both of them. Hence disable it
24 #include "../string.h"
26 #include <generated/compile.h>
27 #include <linux/module.h>
28 #include <linux/uts.h>
29 #include <linux/utsname.h>
30 #include <linux/ctype.h>
31 #include <linux/efi.h>
32 #include <generated/utsrelease.h>
35 /* Macros used by the included decompressor code below. */
37 #include <linux/decompress/mm.h>
40 #include <asm/setup.h> /* For COMMAND_LINE_SIZE */
43 extern unsigned long get_cmd_line_ptr(void);
45 /* Simplified build-specific string for starting entropy. */
46 static const char build_str
[] = UTS_RELEASE
" (" LINUX_COMPILE_BY
"@"
47 LINUX_COMPILE_HOST
") (" LINUX_COMPILER
") " UTS_VERSION
;
49 static unsigned long rotate_xor(unsigned long hash
, const void *area
,
53 unsigned long *ptr
= (unsigned long *)area
;
55 for (i
= 0; i
< size
/ sizeof(hash
); i
++) {
56 /* Rotate by odd number of bits and XOR. */
57 hash
= (hash
<< ((sizeof(hash
) * 8) - 7)) | (hash
>> 7);
64 /* Attempt to create a simple but unpredictable starting entropy. */
65 static unsigned long get_boot_seed(void)
67 unsigned long hash
= 0;
69 hash
= rotate_xor(hash
, build_str
, sizeof(build_str
));
70 hash
= rotate_xor(hash
, boot_params
, sizeof(*boot_params
));
75 #define KASLR_COMPRESSED_BOOT
76 #include "../../lib/kaslr.c"
79 /* Only supporting at most 4 unusable memmap regions with kaslr */
80 #define MAX_MEMMAP_REGIONS 4
82 static bool memmap_too_large
;
86 * Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit.
87 * It may be reduced by "mem=nn[KMG]" or "memmap=nn[KMG]" command line options.
91 /* Number of immovable memory regions */
92 static int num_immovable_mem
;
94 enum mem_avoid_index
{
95 MEM_AVOID_ZO_RANGE
= 0,
99 MEM_AVOID_MEMMAP_BEGIN
,
100 MEM_AVOID_MEMMAP_END
= MEM_AVOID_MEMMAP_BEGIN
+ MAX_MEMMAP_REGIONS
- 1,
104 static struct mem_vector mem_avoid
[MEM_AVOID_MAX
];
106 static bool mem_overlaps(struct mem_vector
*one
, struct mem_vector
*two
)
108 /* Item one is entirely before item two. */
109 if (one
->start
+ one
->size
<= two
->start
)
111 /* Item one is entirely after item two. */
112 if (one
->start
>= two
->start
+ two
->size
)
117 char *skip_spaces(const char *str
)
119 while (isspace(*str
))
123 #include "../../../../lib/ctype.c"
124 #include "../../../../lib/cmdline.c"
132 parse_memmap(char *p
, u64
*start
, u64
*size
, enum parse_mode mode
)
139 /* We don't care about this option here */
140 if (!strncmp(p
, "exactmap", 8))
144 *size
= memparse(p
, &p
);
152 *start
= memparse(p
+ 1, &p
);
155 if (mode
== PARSE_MEMMAP
) {
157 * memmap=nn@ss specifies usable region, should
165 * efi_fake_mem=nn@ss:attr the attr specifies
166 * flags that might imply a soft-reservation.
168 *start
= memparse(p
+ 1, &p
);
169 if (p
&& *p
== ':') {
171 if (kstrtoull(p
, 0, &flags
) < 0)
173 else if (flags
& EFI_MEMORY_SP
)
181 * If w/o offset, only size specified, memmap=nn[KMG] has the
182 * same behaviour as mem=nn[KMG]. It limits the max address
183 * system can use. Region above the limit should be avoided.
192 static void mem_avoid_memmap(enum parse_mode mode
, char *str
)
196 if (i
>= MAX_MEMMAP_REGIONS
)
199 while (str
&& (i
< MAX_MEMMAP_REGIONS
)) {
202 char *k
= strchr(str
, ',');
207 rc
= parse_memmap(str
, &start
, &size
, mode
);
213 /* Store the specified memory limit if size > 0 */
214 if (size
> 0 && size
< mem_limit
)
220 mem_avoid
[MEM_AVOID_MEMMAP_BEGIN
+ i
].start
= start
;
221 mem_avoid
[MEM_AVOID_MEMMAP_BEGIN
+ i
].size
= size
;
225 /* More than 4 memmaps, fail kaslr */
226 if ((i
>= MAX_MEMMAP_REGIONS
) && str
)
227 memmap_too_large
= true;
230 /* Store the number of 1GB huge pages which users specified: */
231 static unsigned long max_gb_huge_pages
;
233 static void parse_gb_huge_pages(char *param
, char *val
)
235 static bool gbpage_sz
;
238 if (!strcmp(param
, "hugepagesz")) {
240 if (memparse(p
, &p
) != PUD_SIZE
) {
246 warn("Repeatedly set hugeTLB page size of 1G!\n");
251 if (!strcmp(param
, "hugepages") && gbpage_sz
) {
253 max_gb_huge_pages
= simple_strtoull(p
, &p
, 0);
258 static void handle_mem_options(void)
260 char *args
= (char *)get_cmd_line_ptr();
269 len
= strnlen(args
, COMMAND_LINE_SIZE
-1);
270 tmp_cmdline
= malloc(len
+ 1);
272 error("Failed to allocate space for tmp_cmdline");
274 memcpy(tmp_cmdline
, args
, len
);
275 tmp_cmdline
[len
] = 0;
278 /* Chew leading spaces */
279 args
= skip_spaces(args
);
282 args
= next_arg(args
, ¶m
, &val
);
284 if (!val
&& strcmp(param
, "--") == 0)
287 if (!strcmp(param
, "memmap")) {
288 mem_avoid_memmap(PARSE_MEMMAP
, val
);
289 } else if (IS_ENABLED(CONFIG_X86_64
) && strstr(param
, "hugepages")) {
290 parse_gb_huge_pages(param
, val
);
291 } else if (!strcmp(param
, "mem")) {
294 if (!strcmp(p
, "nopentium"))
296 mem_size
= memparse(p
, &p
);
300 if (mem_size
< mem_limit
)
301 mem_limit
= mem_size
;
302 } else if (!strcmp(param
, "efi_fake_mem")) {
303 mem_avoid_memmap(PARSE_EFI
, val
);
312 * In theory, KASLR can put the kernel anywhere in the range of [16M, MAXMEM)
313 * on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit.
315 * The mem_avoid array is used to store the ranges that need to be avoided
316 * when KASLR searches for an appropriate random address. We must avoid any
317 * regions that are unsafe to overlap with during decompression, and other
318 * things like the initrd, cmdline and boot_params. This comment seeks to
319 * explain mem_avoid as clearly as possible since incorrect mem_avoid
320 * memory ranges lead to really hard to debug boot failures.
322 * The initrd, cmdline, and boot_params are trivial to identify for
323 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
324 * MEM_AVOID_BOOTPARAMS respectively below.
326 * What is not obvious how to avoid is the range of memory that is used
327 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
328 * the compressed kernel (ZO) and its run space, which is used to extract
329 * the uncompressed kernel (VO) and relocs.
331 * ZO's full run size sits against the end of the decompression buffer, so
332 * we can calculate where text, data, bss, etc of ZO are positioned more
335 * For additional background, the decompression calculations can be found
336 * in header.S, and the memory diagram is based on the one found in misc.c.
338 * The following conditions are already enforced by the image layouts and
340 * - input + input_size >= output + output_size
341 * - kernel_total_size <= init_size
342 * - kernel_total_size <= output_size (see Note below)
343 * - output + init_size >= output + output_size
345 * (Note that kernel_total_size and output_size have no fundamental
346 * relationship, but output_size is passed to choose_random_location
347 * as a maximum of the two. The diagram is showing a case where
348 * kernel_total_size is larger than output_size, but this case is
349 * handled by bumping output_size.)
351 * The above conditions can be illustrated by a diagram:
353 * 0 output input input+input_size output+init_size
356 * |-----|--------|--------|--------------|-----------|--|-------------|
359 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
361 * [output, output+init_size) is the entire memory range used for
362 * extracting the compressed image.
364 * [output, output+kernel_total_size) is the range needed for the
365 * uncompressed kernel (VO) and its run size (bss, brk, etc).
367 * [output, output+output_size) is VO plus relocs (i.e. the entire
368 * uncompressed payload contained by ZO). This is the area of the buffer
369 * written to during decompression.
371 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
372 * range of the copied ZO and decompression code. (i.e. the range
373 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
375 * [input, input+input_size) is the original copied compressed image (ZO)
376 * (i.e. it does not include its run size). This range must be avoided
377 * because it contains the data used for decompression.
379 * [input+input_size, output+init_size) is [_text, _end) for ZO. This
380 * range includes ZO's heap and stack, and must be avoided since it
381 * performs the decompression.
383 * Since the above two ranges need to be avoided and they are adjacent,
384 * they can be merged, resulting in: [input, output+init_size) which
385 * becomes the MEM_AVOID_ZO_RANGE below.
387 static void mem_avoid_init(unsigned long input
, unsigned long input_size
,
388 unsigned long output
)
390 unsigned long init_size
= boot_params
->hdr
.init_size
;
391 u64 initrd_start
, initrd_size
;
392 unsigned long cmd_line
, cmd_line_size
;
395 * Avoid the region that is unsafe to overlap during
398 mem_avoid
[MEM_AVOID_ZO_RANGE
].start
= input
;
399 mem_avoid
[MEM_AVOID_ZO_RANGE
].size
= (output
+ init_size
) - input
;
402 initrd_start
= (u64
)boot_params
->ext_ramdisk_image
<< 32;
403 initrd_start
|= boot_params
->hdr
.ramdisk_image
;
404 initrd_size
= (u64
)boot_params
->ext_ramdisk_size
<< 32;
405 initrd_size
|= boot_params
->hdr
.ramdisk_size
;
406 mem_avoid
[MEM_AVOID_INITRD
].start
= initrd_start
;
407 mem_avoid
[MEM_AVOID_INITRD
].size
= initrd_size
;
408 /* No need to set mapping for initrd, it will be handled in VO. */
410 /* Avoid kernel command line. */
411 cmd_line
= get_cmd_line_ptr();
412 /* Calculate size of cmd_line. */
414 cmd_line_size
= strnlen((char *)cmd_line
, COMMAND_LINE_SIZE
-1) + 1;
415 mem_avoid
[MEM_AVOID_CMDLINE
].start
= cmd_line
;
416 mem_avoid
[MEM_AVOID_CMDLINE
].size
= cmd_line_size
;
419 /* Avoid boot parameters. */
420 mem_avoid
[MEM_AVOID_BOOTPARAMS
].start
= (unsigned long)boot_params
;
421 mem_avoid
[MEM_AVOID_BOOTPARAMS
].size
= sizeof(*boot_params
);
423 /* We don't need to set a mapping for setup_data. */
425 /* Mark the memmap regions we need to avoid */
426 handle_mem_options();
428 /* Enumerate the immovable memory regions */
429 num_immovable_mem
= count_immovable_mem_regions();
433 * Does this memory vector overlap a known avoided area? If so, record the
434 * overlap region with the lowest address.
436 static bool mem_avoid_overlap(struct mem_vector
*img
,
437 struct mem_vector
*overlap
)
440 struct setup_data
*ptr
;
441 u64 earliest
= img
->start
+ img
->size
;
442 bool is_overlapping
= false;
444 for (i
= 0; i
< MEM_AVOID_MAX
; i
++) {
445 if (mem_overlaps(img
, &mem_avoid
[i
]) &&
446 mem_avoid
[i
].start
< earliest
) {
447 *overlap
= mem_avoid
[i
];
448 earliest
= overlap
->start
;
449 is_overlapping
= true;
453 /* Avoid all entries in the setup_data linked list. */
454 ptr
= (struct setup_data
*)(unsigned long)boot_params
->hdr
.setup_data
;
456 struct mem_vector avoid
;
458 avoid
.start
= (unsigned long)ptr
;
459 avoid
.size
= sizeof(*ptr
) + ptr
->len
;
461 if (mem_overlaps(img
, &avoid
) && (avoid
.start
< earliest
)) {
463 earliest
= overlap
->start
;
464 is_overlapping
= true;
467 if (ptr
->type
== SETUP_INDIRECT
&&
468 ((struct setup_indirect
*)ptr
->data
)->type
!= SETUP_INDIRECT
) {
469 avoid
.start
= ((struct setup_indirect
*)ptr
->data
)->addr
;
470 avoid
.size
= ((struct setup_indirect
*)ptr
->data
)->len
;
472 if (mem_overlaps(img
, &avoid
) && (avoid
.start
< earliest
)) {
474 earliest
= overlap
->start
;
475 is_overlapping
= true;
479 ptr
= (struct setup_data
*)(unsigned long)ptr
->next
;
482 return is_overlapping
;
490 #define MAX_SLOT_AREA 100
492 static struct slot_area slot_areas
[MAX_SLOT_AREA
];
493 static unsigned int slot_area_index
;
494 static unsigned long slot_max
;
496 static void store_slot_info(struct mem_vector
*region
, unsigned long image_size
)
498 struct slot_area slot_area
;
500 if (slot_area_index
== MAX_SLOT_AREA
)
503 slot_area
.addr
= region
->start
;
504 slot_area
.num
= 1 + (region
->size
- image_size
) / CONFIG_PHYSICAL_ALIGN
;
506 slot_areas
[slot_area_index
++] = slot_area
;
507 slot_max
+= slot_area
.num
;
511 * Skip as many 1GB huge pages as possible in the passed region
512 * according to the number which users specified:
515 process_gb_huge_pages(struct mem_vector
*region
, unsigned long image_size
)
517 u64 pud_start
, pud_end
;
518 unsigned long gb_huge_pages
;
519 struct mem_vector tmp
;
521 if (!IS_ENABLED(CONFIG_X86_64
) || !max_gb_huge_pages
) {
522 store_slot_info(region
, image_size
);
526 /* Are there any 1GB pages in the region? */
527 pud_start
= ALIGN(region
->start
, PUD_SIZE
);
528 pud_end
= ALIGN_DOWN(region
->start
+ region
->size
, PUD_SIZE
);
530 /* No good 1GB huge pages found: */
531 if (pud_start
>= pud_end
) {
532 store_slot_info(region
, image_size
);
536 /* Check if the head part of the region is usable. */
537 if (pud_start
>= region
->start
+ image_size
) {
538 tmp
.start
= region
->start
;
539 tmp
.size
= pud_start
- region
->start
;
540 store_slot_info(&tmp
, image_size
);
543 /* Skip the good 1GB pages. */
544 gb_huge_pages
= (pud_end
- pud_start
) >> PUD_SHIFT
;
545 if (gb_huge_pages
> max_gb_huge_pages
) {
546 pud_end
= pud_start
+ (max_gb_huge_pages
<< PUD_SHIFT
);
547 max_gb_huge_pages
= 0;
549 max_gb_huge_pages
-= gb_huge_pages
;
552 /* Check if the tail part of the region is usable. */
553 if (region
->start
+ region
->size
>= pud_end
+ image_size
) {
555 tmp
.size
= region
->start
+ region
->size
- pud_end
;
556 store_slot_info(&tmp
, image_size
);
560 static u64
slots_fetch_random(void)
565 /* Handle case of no slots stored. */
569 slot
= kaslr_get_random_long("Physical") % slot_max
;
571 for (i
= 0; i
< slot_area_index
; i
++) {
572 if (slot
>= slot_areas
[i
].num
) {
573 slot
-= slot_areas
[i
].num
;
576 return slot_areas
[i
].addr
+ ((u64
)slot
* CONFIG_PHYSICAL_ALIGN
);
579 if (i
== slot_area_index
)
580 debug_putstr("slots_fetch_random() failed!?\n");
584 static void __process_mem_region(struct mem_vector
*entry
,
585 unsigned long minimum
,
586 unsigned long image_size
)
588 struct mem_vector region
, overlap
;
591 /* Enforce minimum and memory limit. */
592 region
.start
= max_t(u64
, entry
->start
, minimum
);
593 region_end
= min(entry
->start
+ entry
->size
, mem_limit
);
595 /* Give up if slot area array is full. */
596 while (slot_area_index
< MAX_SLOT_AREA
) {
597 /* Potentially raise address to meet alignment needs. */
598 region
.start
= ALIGN(region
.start
, CONFIG_PHYSICAL_ALIGN
);
600 /* Did we raise the address above the passed in memory entry? */
601 if (region
.start
> region_end
)
604 /* Reduce size by any delta from the original address. */
605 region
.size
= region_end
- region
.start
;
607 /* Return if region can't contain decompressed kernel */
608 if (region
.size
< image_size
)
611 /* If nothing overlaps, store the region and return. */
612 if (!mem_avoid_overlap(®ion
, &overlap
)) {
613 process_gb_huge_pages(®ion
, image_size
);
617 /* Store beginning of region if holds at least image_size. */
618 if (overlap
.start
>= region
.start
+ image_size
) {
619 region
.size
= overlap
.start
- region
.start
;
620 process_gb_huge_pages(®ion
, image_size
);
623 /* Clip off the overlapping region and start over. */
624 region
.start
= overlap
.start
+ overlap
.size
;
628 static bool process_mem_region(struct mem_vector
*region
,
629 unsigned long minimum
,
630 unsigned long image_size
)
634 * If no immovable memory found, or MEMORY_HOTREMOVE disabled,
635 * use @region directly.
637 if (!num_immovable_mem
) {
638 __process_mem_region(region
, minimum
, image_size
);
640 if (slot_area_index
== MAX_SLOT_AREA
) {
641 debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n");
647 #if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI)
649 * If immovable memory found, filter the intersection between
650 * immovable memory and @region.
652 for (i
= 0; i
< num_immovable_mem
; i
++) {
653 u64 start
, end
, entry_end
, region_end
;
654 struct mem_vector entry
;
656 if (!mem_overlaps(region
, &immovable_mem
[i
]))
659 start
= immovable_mem
[i
].start
;
660 end
= start
+ immovable_mem
[i
].size
;
661 region_end
= region
->start
+ region
->size
;
663 entry
.start
= clamp(region
->start
, start
, end
);
664 entry_end
= clamp(region_end
, start
, end
);
665 entry
.size
= entry_end
- entry
.start
;
667 __process_mem_region(&entry
, minimum
, image_size
);
669 if (slot_area_index
== MAX_SLOT_AREA
) {
670 debug_putstr("Aborted e820/efi memmap scan when walking immovable regions(slot_areas full)!\n");
680 * Returns true if we processed the EFI memmap, which we prefer over the E820
681 * table if it is available.
684 process_efi_entries(unsigned long minimum
, unsigned long image_size
)
686 struct efi_info
*e
= &boot_params
->efi_info
;
687 bool efi_mirror_found
= false;
688 struct mem_vector region
;
689 efi_memory_desc_t
*md
;
695 signature
= (char *)&e
->efi_loader_signature
;
696 if (strncmp(signature
, EFI32_LOADER_SIGNATURE
, 4) &&
697 strncmp(signature
, EFI64_LOADER_SIGNATURE
, 4))
701 /* Can't handle data above 4GB at this time */
702 if (e
->efi_memmap_hi
) {
703 warn("EFI memmap is above 4GB, can't be handled now on x86_32. EFI should be disabled.\n");
706 pmap
= e
->efi_memmap
;
708 pmap
= (e
->efi_memmap
| ((__u64
)e
->efi_memmap_hi
<< 32));
711 nr_desc
= e
->efi_memmap_size
/ e
->efi_memdesc_size
;
712 for (i
= 0; i
< nr_desc
; i
++) {
713 md
= efi_early_memdesc_ptr(pmap
, e
->efi_memdesc_size
, i
);
714 if (md
->attribute
& EFI_MEMORY_MORE_RELIABLE
) {
715 efi_mirror_found
= true;
720 for (i
= 0; i
< nr_desc
; i
++) {
721 md
= efi_early_memdesc_ptr(pmap
, e
->efi_memdesc_size
, i
);
724 * Here we are more conservative in picking free memory than
725 * the EFI spec allows:
727 * According to the spec, EFI_BOOT_SERVICES_{CODE|DATA} are also
728 * free memory and thus available to place the kernel image into,
729 * but in practice there's firmware where using that memory leads
732 * Only EFI_CONVENTIONAL_MEMORY is guaranteed to be free.
734 if (md
->type
!= EFI_CONVENTIONAL_MEMORY
)
737 if (efi_soft_reserve_enabled() &&
738 (md
->attribute
& EFI_MEMORY_SP
))
741 if (efi_mirror_found
&&
742 !(md
->attribute
& EFI_MEMORY_MORE_RELIABLE
))
745 region
.start
= md
->phys_addr
;
746 region
.size
= md
->num_pages
<< EFI_PAGE_SHIFT
;
747 if (process_mem_region(®ion
, minimum
, image_size
))
754 process_efi_entries(unsigned long minimum
, unsigned long image_size
)
760 static void process_e820_entries(unsigned long minimum
,
761 unsigned long image_size
)
764 struct mem_vector region
;
765 struct boot_e820_entry
*entry
;
767 /* Verify potential e820 positions, appending to slots list. */
768 for (i
= 0; i
< boot_params
->e820_entries
; i
++) {
769 entry
= &boot_params
->e820_table
[i
];
770 /* Skip non-RAM entries. */
771 if (entry
->type
!= E820_TYPE_RAM
)
773 region
.start
= entry
->addr
;
774 region
.size
= entry
->size
;
775 if (process_mem_region(®ion
, minimum
, image_size
))
780 static unsigned long find_random_phys_addr(unsigned long minimum
,
781 unsigned long image_size
)
785 /* Bail out early if it's impossible to succeed. */
786 if (minimum
+ image_size
> mem_limit
)
789 /* Check if we had too many memmaps. */
790 if (memmap_too_large
) {
791 debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
795 if (!process_efi_entries(minimum
, image_size
))
796 process_e820_entries(minimum
, image_size
);
798 phys_addr
= slots_fetch_random();
800 /* Perform a final check to make sure the address is in range. */
801 if (phys_addr
< minimum
|| phys_addr
+ image_size
> mem_limit
) {
802 warn("Invalid physical address chosen!\n");
806 return (unsigned long)phys_addr
;
809 static unsigned long find_random_virt_addr(unsigned long minimum
,
810 unsigned long image_size
)
812 unsigned long slots
, random_addr
;
815 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
816 * that can hold image_size within the range of minimum to
819 slots
= 1 + (KERNEL_IMAGE_SIZE
- minimum
- image_size
) / CONFIG_PHYSICAL_ALIGN
;
821 random_addr
= kaslr_get_random_long("Virtual") % slots
;
823 return random_addr
* CONFIG_PHYSICAL_ALIGN
+ minimum
;
827 * Since this function examines addresses much more numerically,
828 * it takes the input and output pointers as 'unsigned long'.
830 void choose_random_location(unsigned long input
,
831 unsigned long input_size
,
832 unsigned long *output
,
833 unsigned long output_size
,
834 unsigned long *virt_addr
)
836 unsigned long random_addr
, min_addr
;
838 if (cmdline_find_option_bool("nokaslr")) {
839 warn("KASLR disabled: 'nokaslr' on cmdline.");
843 boot_params
->hdr
.loadflags
|= KASLR_FLAG
;
845 if (IS_ENABLED(CONFIG_X86_32
))
846 mem_limit
= KERNEL_IMAGE_SIZE
;
850 /* Record the various known unsafe memory ranges. */
851 mem_avoid_init(input
, input_size
, *output
);
854 * Low end of the randomization range should be the
855 * smaller of 512M or the initial kernel image
858 min_addr
= min(*output
, 512UL << 20);
859 /* Make sure minimum is aligned. */
860 min_addr
= ALIGN(min_addr
, CONFIG_PHYSICAL_ALIGN
);
862 /* Walk available memory entries to find a random address. */
863 random_addr
= find_random_phys_addr(min_addr
, output_size
);
865 warn("Physical KASLR disabled: no suitable memory region!");
867 /* Update the new physical address location. */
868 if (*output
!= random_addr
)
869 *output
= random_addr
;
873 /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
874 if (IS_ENABLED(CONFIG_X86_64
))
875 random_addr
= find_random_virt_addr(LOAD_PHYSICAL_ADDR
, output_size
);
876 *virt_addr
= random_addr
;