tools uapi asm: Update asm-generic/unistd.h copy
[linux/fpc-iii.git] / arch / x86 / mm / kaslr.c
blob3f452ffed7e93f377aa1ae38150a1f2cf7e91a5c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file implements KASLR memory randomization for x86_64. It randomizes
4 * the virtual address space of kernel memory regions (physical memory
5 * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
6 * exploits relying on predictable kernel addresses.
8 * Entropy is generated using the KASLR early boot functions now shared in
9 * the lib directory (originally written by Kees Cook). Randomization is
10 * done on PGD & P4D/PUD page table levels to increase possible addresses.
11 * The physical memory mapping code was adapted to support P4D/PUD level
12 * virtual addresses. This implementation on the best configuration provides
13 * 30,000 possible virtual addresses in average for each memory region.
14 * An additional low memory page is used to ensure each CPU can start with
15 * a PGD aligned virtual address (for realmode).
17 * The order of each memory region is not changed. The feature looks at
18 * the available space for the regions based on different configuration
19 * options and randomizes the base and space between each. The size of the
20 * physical memory mapping is the available physical memory.
23 #include <linux/kernel.h>
24 #include <linux/init.h>
25 #include <linux/random.h>
26 #include <linux/memblock.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/setup.h>
31 #include <asm/kaslr.h>
33 #include "mm_internal.h"
35 #define TB_SHIFT 40
38 * The end address could depend on more configuration options to make the
39 * highest amount of space for randomization available, but that's too hard
40 * to keep straight and caused issues already.
42 static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
45 * Memory regions randomized by KASLR (except modules that use a separate logic
46 * earlier during boot). The list is ordered based on virtual addresses. This
47 * order is kept after randomization.
49 static __initdata struct kaslr_memory_region {
50 unsigned long *base;
51 unsigned long size_tb;
52 } kaslr_regions[] = {
53 { &page_offset_base, 0 },
54 { &vmalloc_base, 0 },
55 { &vmemmap_base, 1 },
58 /* Get size in bytes used by the memory region */
59 static inline unsigned long get_padding(struct kaslr_memory_region *region)
61 return (region->size_tb << TB_SHIFT);
65 * Apply no randomization if KASLR was disabled at boot or if KASAN
66 * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
68 static inline bool kaslr_memory_enabled(void)
70 return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
73 /* Initialize base and padding for each memory region randomized with KASLR */
74 void __init kernel_randomize_memory(void)
76 size_t i;
77 unsigned long vaddr_start, vaddr;
78 unsigned long rand, memory_tb;
79 struct rnd_state rand_state;
80 unsigned long remain_entropy;
82 vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
83 vaddr = vaddr_start;
86 * These BUILD_BUG_ON checks ensure the memory layout is consistent
87 * with the vaddr_start/vaddr_end variables. These checks are very
88 * limited....
90 BUILD_BUG_ON(vaddr_start >= vaddr_end);
91 BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
92 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
94 if (!kaslr_memory_enabled())
95 return;
97 kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
98 kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
101 * Update Physical memory mapping to available and
102 * add padding if needed (especially for memory hotplug support).
104 BUG_ON(kaslr_regions[0].base != &page_offset_base);
105 memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
106 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
108 /* Adapt phyiscal memory region size based on available memory */
109 if (memory_tb < kaslr_regions[0].size_tb)
110 kaslr_regions[0].size_tb = memory_tb;
112 /* Calculate entropy available between regions */
113 remain_entropy = vaddr_end - vaddr_start;
114 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
115 remain_entropy -= get_padding(&kaslr_regions[i]);
117 prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
119 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
120 unsigned long entropy;
123 * Select a random virtual address using the extra entropy
124 * available.
126 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
127 prandom_bytes_state(&rand_state, &rand, sizeof(rand));
128 if (pgtable_l5_enabled())
129 entropy = (rand % (entropy + 1)) & P4D_MASK;
130 else
131 entropy = (rand % (entropy + 1)) & PUD_MASK;
132 vaddr += entropy;
133 *kaslr_regions[i].base = vaddr;
136 * Jump the region and add a minimum padding based on
137 * randomization alignment.
139 vaddr += get_padding(&kaslr_regions[i]);
140 if (pgtable_l5_enabled())
141 vaddr = round_up(vaddr + 1, P4D_SIZE);
142 else
143 vaddr = round_up(vaddr + 1, PUD_SIZE);
144 remain_entropy -= entropy;
148 static void __meminit init_trampoline_pud(void)
150 unsigned long paddr, paddr_next;
151 pgd_t *pgd;
152 pud_t *pud_page, *pud_page_tramp;
153 int i;
155 pud_page_tramp = alloc_low_page();
157 paddr = 0;
158 pgd = pgd_offset_k((unsigned long)__va(paddr));
159 pud_page = (pud_t *) pgd_page_vaddr(*pgd);
161 for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
162 pud_t *pud, *pud_tramp;
163 unsigned long vaddr = (unsigned long)__va(paddr);
165 pud_tramp = pud_page_tramp + pud_index(paddr);
166 pud = pud_page + pud_index(vaddr);
167 paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
169 *pud_tramp = *pud;
172 set_pgd(&trampoline_pgd_entry,
173 __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
176 static void __meminit init_trampoline_p4d(void)
178 unsigned long paddr, paddr_next;
179 pgd_t *pgd;
180 p4d_t *p4d_page, *p4d_page_tramp;
181 int i;
183 p4d_page_tramp = alloc_low_page();
185 paddr = 0;
186 pgd = pgd_offset_k((unsigned long)__va(paddr));
187 p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
189 for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
190 p4d_t *p4d, *p4d_tramp;
191 unsigned long vaddr = (unsigned long)__va(paddr);
193 p4d_tramp = p4d_page_tramp + p4d_index(paddr);
194 p4d = p4d_page + p4d_index(vaddr);
195 paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
197 *p4d_tramp = *p4d;
200 set_pgd(&trampoline_pgd_entry,
201 __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
205 * Create PGD aligned trampoline table to allow real mode initialization
206 * of additional CPUs. Consume only 1 low memory page.
208 void __meminit init_trampoline(void)
211 if (!kaslr_memory_enabled()) {
212 init_trampoline_default();
213 return;
216 if (pgtable_l5_enabled())
217 init_trampoline_p4d();
218 else
219 init_trampoline_pud();