2 * This file implements KASLR memory randomization for x86_64. It randomizes
3 * the virtual address space of kernel memory regions (physical memory
4 * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
5 * exploits relying on predictable kernel addresses.
7 * Entropy is generated using the KASLR early boot functions now shared in
8 * the lib directory (originally written by Kees Cook). Randomization is
9 * done on PGD & PUD page table levels to increase possible addresses. The
10 * physical memory mapping code was adapted to support PUD level virtual
11 * addresses. This implementation on the best configuration provides 30,000
12 * possible virtual addresses in average for each memory region. An additional
13 * low memory page is used to ensure each CPU can start with a PGD aligned
14 * virtual address (for realmode).
16 * The order of each memory region is not changed. The feature looks at
17 * the available space for the regions based on different configuration
18 * options and randomizes the base and space between each. The size of the
19 * physical memory mapping is the available physical memory.
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/random.h>
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
28 #include <asm/setup.h>
29 #include <asm/kaslr.h>
31 #include "mm_internal.h"
36 * Virtual address start and end range for randomization. The end changes base
37 * on configuration to have the highest amount of space for randomization.
38 * It increases the possible random position for each randomized region.
40 * You need to add an if/def entry if you introduce a new memory region
41 * compatible with KASLR. Your entry must be in logical order with memory
42 * layout. For example, ESPFIX is before EFI because its virtual address is
43 * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
44 * ensure that this order is correct and won't be changed.
46 static const unsigned long vaddr_start
= __PAGE_OFFSET_BASE
;
48 #if defined(CONFIG_X86_ESPFIX64)
49 static const unsigned long vaddr_end
= ESPFIX_BASE_ADDR
;
50 #elif defined(CONFIG_EFI)
51 static const unsigned long vaddr_end
= EFI_VA_END
;
53 static const unsigned long vaddr_end
= __START_KERNEL_map
;
57 unsigned long page_offset_base
= __PAGE_OFFSET_BASE
;
58 EXPORT_SYMBOL(page_offset_base
);
59 unsigned long vmalloc_base
= __VMALLOC_BASE
;
60 EXPORT_SYMBOL(vmalloc_base
);
61 unsigned long vmemmap_base
= __VMEMMAP_BASE
;
62 EXPORT_SYMBOL(vmemmap_base
);
65 * Memory regions randomized by KASLR (except modules that use a separate logic
66 * earlier during boot). The list is ordered based on virtual addresses. This
67 * order is kept after randomization.
69 static __initdata
struct kaslr_memory_region
{
71 unsigned long size_tb
;
73 { &page_offset_base
, 64/* Maximum */ },
74 { &vmalloc_base
, VMALLOC_SIZE_TB
},
78 /* Get size in bytes used by the memory region */
79 static inline unsigned long get_padding(struct kaslr_memory_region
*region
)
81 return (region
->size_tb
<< TB_SHIFT
);
85 * Apply no randomization if KASLR was disabled at boot or if KASAN
86 * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
88 static inline bool kaslr_memory_enabled(void)
90 return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN
);
93 /* Initialize base and padding for each memory region randomized with KASLR */
94 void __init
kernel_randomize_memory(void)
97 unsigned long vaddr
= vaddr_start
;
98 unsigned long rand
, memory_tb
;
99 struct rnd_state rand_state
;
100 unsigned long remain_entropy
;
103 * All these BUILD_BUG_ON checks ensures the memory layout is
104 * consistent with the vaddr_start/vaddr_end variables.
106 BUILD_BUG_ON(vaddr_start
>= vaddr_end
);
107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64
) &&
108 vaddr_end
>= EFI_VA_END
);
109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64
) ||
110 IS_ENABLED(CONFIG_EFI
)) &&
111 vaddr_end
>= __START_KERNEL_map
);
112 BUILD_BUG_ON(vaddr_end
> __START_KERNEL_map
);
114 if (!kaslr_memory_enabled())
118 * Update Physical memory mapping to available and
119 * add padding if needed (especially for memory hotplug support).
121 BUG_ON(kaslr_regions
[0].base
!= &page_offset_base
);
122 memory_tb
= DIV_ROUND_UP(max_pfn
<< PAGE_SHIFT
, 1UL << TB_SHIFT
) +
123 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING
;
125 /* Adapt phyiscal memory region size based on available memory */
126 if (memory_tb
< kaslr_regions
[0].size_tb
)
127 kaslr_regions
[0].size_tb
= memory_tb
;
129 /* Calculate entropy available between regions */
130 remain_entropy
= vaddr_end
- vaddr_start
;
131 for (i
= 0; i
< ARRAY_SIZE(kaslr_regions
); i
++)
132 remain_entropy
-= get_padding(&kaslr_regions
[i
]);
134 prandom_seed_state(&rand_state
, kaslr_get_random_long("Memory"));
136 for (i
= 0; i
< ARRAY_SIZE(kaslr_regions
); i
++) {
137 unsigned long entropy
;
140 * Select a random virtual address using the extra entropy
143 entropy
= remain_entropy
/ (ARRAY_SIZE(kaslr_regions
) - i
);
144 prandom_bytes_state(&rand_state
, &rand
, sizeof(rand
));
145 entropy
= (rand
% (entropy
+ 1)) & PUD_MASK
;
147 *kaslr_regions
[i
].base
= vaddr
;
150 * Jump the region and add a minimum padding based on
151 * randomization alignment.
153 vaddr
+= get_padding(&kaslr_regions
[i
]);
154 vaddr
= round_up(vaddr
+ 1, PUD_SIZE
);
155 remain_entropy
-= entropy
;
160 * Create PGD aligned trampoline table to allow real mode initialization
161 * of additional CPUs. Consume only 1 low memory page.
163 void __meminit
init_trampoline(void)
165 unsigned long paddr
, paddr_next
;
167 pud_t
*pud_page
, *pud_page_tramp
;
170 if (!kaslr_memory_enabled()) {
171 init_trampoline_default();
175 pud_page_tramp
= alloc_low_page();
178 pgd
= pgd_offset_k((unsigned long)__va(paddr
));
179 pud_page
= (pud_t
*) pgd_page_vaddr(*pgd
);
181 for (i
= pud_index(paddr
); i
< PTRS_PER_PUD
; i
++, paddr
= paddr_next
) {
182 pud_t
*pud
, *pud_tramp
;
183 unsigned long vaddr
= (unsigned long)__va(paddr
);
185 pud_tramp
= pud_page_tramp
+ pud_index(paddr
);
186 pud
= pud_page
+ pud_index(vaddr
);
187 paddr_next
= (paddr
& PUD_MASK
) + PUD_SIZE
;
192 set_pgd(&trampoline_pgd_entry
,
193 __pgd(_KERNPG_TABLE
| __pa(pud_page_tramp
)));