3 #ifdef CONFIG_RANDOMIZE_BASE
5 #include <asm/archrandom.h>
8 #include <generated/compile.h>
9 #include <linux/module.h>
10 #include <linux/uts.h>
11 #include <linux/utsname.h>
12 #include <generated/utsrelease.h>
14 /* Simplified build-specific string for starting entropy. */
15 static const char build_str
[] = UTS_RELEASE
" (" LINUX_COMPILE_BY
"@"
16 LINUX_COMPILE_HOST
") (" LINUX_COMPILER
") " UTS_VERSION
;
18 #define I8254_PORT_CONTROL 0x43
19 #define I8254_PORT_COUNTER0 0x40
20 #define I8254_CMD_READBACK 0xC0
21 #define I8254_SELECT_COUNTER0 0x02
22 #define I8254_STATUS_NOTREADY 0x40
23 static inline u16
i8254(void)
28 outb(I8254_PORT_CONTROL
,
29 I8254_CMD_READBACK
| I8254_SELECT_COUNTER0
);
30 status
= inb(I8254_PORT_COUNTER0
);
31 timer
= inb(I8254_PORT_COUNTER0
);
32 timer
|= inb(I8254_PORT_COUNTER0
) << 8;
33 } while (status
& I8254_STATUS_NOTREADY
);
38 static unsigned long rotate_xor(unsigned long hash
, const void *area
,
42 unsigned long *ptr
= (unsigned long *)area
;
44 for (i
= 0; i
< size
/ sizeof(hash
); i
++) {
45 /* Rotate by odd number of bits and XOR. */
46 hash
= (hash
<< ((sizeof(hash
) * 8) - 7)) | (hash
>> 7);
53 /* Attempt to create a simple but unpredictable starting entropy. */
54 static unsigned long get_random_boot(void)
56 unsigned long hash
= 0;
58 hash
= rotate_xor(hash
, build_str
, sizeof(build_str
));
59 hash
= rotate_xor(hash
, real_mode
, sizeof(*real_mode
));
64 static unsigned long get_random_long(void)
67 const unsigned long mix_const
= 0x5d6008cbf3848dd3UL
;
69 const unsigned long mix_const
= 0x3f39e593UL
;
71 unsigned long raw
, random
= get_random_boot();
72 bool use_i8254
= true;
74 debug_putstr("KASLR using");
76 if (has_cpuflag(X86_FEATURE_RDRAND
)) {
77 debug_putstr(" RDRAND");
78 if (rdrand_long(&raw
)) {
84 if (has_cpuflag(X86_FEATURE_TSC
)) {
85 debug_putstr(" RDTSC");
93 debug_putstr(" i8254");
97 /* Circular multiply for better bit diffusion */
99 : "=a" (random
), "=d" (raw
)
100 : "a" (random
), "rm" (mix_const
));
103 debug_putstr("...\n");
113 #define MEM_AVOID_MAX 5
114 struct mem_vector mem_avoid
[MEM_AVOID_MAX
];
116 static bool mem_contains(struct mem_vector
*region
, struct mem_vector
*item
)
118 /* Item at least partially before region. */
119 if (item
->start
< region
->start
)
121 /* Item at least partially after region. */
122 if (item
->start
+ item
->size
> region
->start
+ region
->size
)
127 static bool mem_overlaps(struct mem_vector
*one
, struct mem_vector
*two
)
129 /* Item one is entirely before item two. */
130 if (one
->start
+ one
->size
<= two
->start
)
132 /* Item one is entirely after item two. */
133 if (one
->start
>= two
->start
+ two
->size
)
138 static void mem_avoid_init(unsigned long input
, unsigned long input_size
,
139 unsigned long output
, unsigned long output_size
)
141 u64 initrd_start
, initrd_size
;
142 u64 cmd_line
, cmd_line_size
;
143 unsigned long unsafe
, unsafe_len
;
147 * Avoid the region that is unsafe to overlap during
148 * decompression (see calculations at top of misc.c).
150 unsafe_len
= (output_size
>> 12) + 32768 + 18;
151 unsafe
= (unsigned long)input
+ input_size
- unsafe_len
;
152 mem_avoid
[0].start
= unsafe
;
153 mem_avoid
[0].size
= unsafe_len
;
156 initrd_start
= (u64
)real_mode
->ext_ramdisk_image
<< 32;
157 initrd_start
|= real_mode
->hdr
.ramdisk_image
;
158 initrd_size
= (u64
)real_mode
->ext_ramdisk_size
<< 32;
159 initrd_size
|= real_mode
->hdr
.ramdisk_size
;
160 mem_avoid
[1].start
= initrd_start
;
161 mem_avoid
[1].size
= initrd_size
;
163 /* Avoid kernel command line. */
164 cmd_line
= (u64
)real_mode
->ext_cmd_line_ptr
<< 32;
165 cmd_line
|= real_mode
->hdr
.cmd_line_ptr
;
166 /* Calculate size of cmd_line. */
167 ptr
= (char *)(unsigned long)cmd_line
;
168 for (cmd_line_size
= 0; ptr
[cmd_line_size
++]; )
170 mem_avoid
[2].start
= cmd_line
;
171 mem_avoid
[2].size
= cmd_line_size
;
173 /* Avoid heap memory. */
174 mem_avoid
[3].start
= (unsigned long)free_mem_ptr
;
175 mem_avoid
[3].size
= BOOT_HEAP_SIZE
;
177 /* Avoid stack memory. */
178 mem_avoid
[4].start
= (unsigned long)free_mem_end_ptr
;
179 mem_avoid
[4].size
= BOOT_STACK_SIZE
;
182 /* Does this memory vector overlap a known avoided area? */
183 bool mem_avoid_overlap(struct mem_vector
*img
)
187 for (i
= 0; i
< MEM_AVOID_MAX
; i
++) {
188 if (mem_overlaps(img
, &mem_avoid
[i
]))
195 unsigned long slots
[CONFIG_RANDOMIZE_BASE_MAX_OFFSET
/ CONFIG_PHYSICAL_ALIGN
];
196 unsigned long slot_max
= 0;
198 static void slots_append(unsigned long addr
)
200 /* Overflowing the slots list should be impossible. */
201 if (slot_max
>= CONFIG_RANDOMIZE_BASE_MAX_OFFSET
/
202 CONFIG_PHYSICAL_ALIGN
)
205 slots
[slot_max
++] = addr
;
208 static unsigned long slots_fetch_random(void)
210 /* Handle case of no slots stored. */
214 return slots
[get_random_long() % slot_max
];
217 static void process_e820_entry(struct e820entry
*entry
,
218 unsigned long minimum
,
219 unsigned long image_size
)
221 struct mem_vector region
, img
;
223 /* Skip non-RAM entries. */
224 if (entry
->type
!= E820_RAM
)
227 /* Ignore entries entirely above our maximum. */
228 if (entry
->addr
>= CONFIG_RANDOMIZE_BASE_MAX_OFFSET
)
231 /* Ignore entries entirely below our minimum. */
232 if (entry
->addr
+ entry
->size
< minimum
)
235 region
.start
= entry
->addr
;
236 region
.size
= entry
->size
;
238 /* Potentially raise address to minimum location. */
239 if (region
.start
< minimum
)
240 region
.start
= minimum
;
242 /* Potentially raise address to meet alignment requirements. */
243 region
.start
= ALIGN(region
.start
, CONFIG_PHYSICAL_ALIGN
);
245 /* Did we raise the address above the bounds of this e820 region? */
246 if (region
.start
> entry
->addr
+ entry
->size
)
249 /* Reduce size by any delta from the original address. */
250 region
.size
-= region
.start
- entry
->addr
;
252 /* Reduce maximum size to fit end of image within maximum limit. */
253 if (region
.start
+ region
.size
> CONFIG_RANDOMIZE_BASE_MAX_OFFSET
)
254 region
.size
= CONFIG_RANDOMIZE_BASE_MAX_OFFSET
- region
.start
;
256 /* Walk each aligned slot and check for avoided areas. */
257 for (img
.start
= region
.start
, img
.size
= image_size
;
258 mem_contains(®ion
, &img
) ;
259 img
.start
+= CONFIG_PHYSICAL_ALIGN
) {
260 if (mem_avoid_overlap(&img
))
262 slots_append(img
.start
);
266 static unsigned long find_random_addr(unsigned long minimum
,
272 /* Make sure minimum is aligned. */
273 minimum
= ALIGN(minimum
, CONFIG_PHYSICAL_ALIGN
);
275 /* Verify potential e820 positions, appending to slots list. */
276 for (i
= 0; i
< real_mode
->e820_entries
; i
++) {
277 process_e820_entry(&real_mode
->e820_map
[i
], minimum
, size
);
280 return slots_fetch_random();
283 unsigned char *choose_kernel_location(unsigned char *input
,
284 unsigned long input_size
,
285 unsigned char *output
,
286 unsigned long output_size
)
288 unsigned long choice
= (unsigned long)output
;
289 unsigned long random
;
291 if (cmdline_find_option_bool("nokaslr")) {
292 debug_putstr("KASLR disabled...\n");
296 /* Record the various known unsafe memory ranges. */
297 mem_avoid_init((unsigned long)input
, input_size
,
298 (unsigned long)output
, output_size
);
300 /* Walk e820 and find a random address. */
301 random
= find_random_addr(choice
, output_size
);
303 debug_putstr("KASLR could not find suitable E820 region...\n");
307 /* Always enforce the minimum. */
313 return (unsigned char *)choice
;
316 #endif /* CONFIG_RANDOMIZE_BASE */