1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2019
5 #include <linux/pgtable.h>
6 #include <asm/mem_detect.h>
10 #include <asm/kasan.h>
11 #include "compressed/decompressor.h"
14 #define PRNG_MODE_TDES 1
15 #define PRNG_MODE_SHA512 2
16 #define PRNG_MODE_TRNG 3
32 static int check_prng(void)
34 if (!cpacf_query_func(CPACF_KMC
, CPACF_KMC_PRNG
)) {
35 sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
38 if (cpacf_query_func(CPACF_PRNO
, CPACF_PRNO_TRNG
))
39 return PRNG_MODE_TRNG
;
40 if (cpacf_query_func(CPACF_PRNO
, CPACF_PRNO_SHA512_DRNG_GEN
))
41 return PRNG_MODE_SHA512
;
43 return PRNG_MODE_TDES
;
46 static int get_random(unsigned long limit
, unsigned long *value
)
48 struct prng_parm prng
= {
49 /* initial parameter block for tdes mode, copied from libica */
51 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
52 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
53 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
54 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
57 unsigned long seed
, random
;
58 struct prno_parm prno
;
63 seed
= get_tod_clock_fast();
66 cpacf_trng(NULL
, 0, (u8
*) &random
, sizeof(random
));
68 case PRNG_MODE_SHA512
:
69 cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED
, &prno
, NULL
, 0,
70 (u8
*) &seed
, sizeof(seed
));
71 cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN
, &prno
, (u8
*) &random
,
72 sizeof(random
), NULL
, 0);
76 *(unsigned long *) prng
.parm_block
^= seed
;
77 for (i
= 0; i
< 16; i
++) {
78 cpacf_kmc(CPACF_KMC_PRNG
, prng
.parm_block
,
79 (u8
*) entropy
, (u8
*) entropy
,
81 memcpy(prng
.parm_block
, entropy
, sizeof(entropy
));
84 cpacf_kmc(CPACF_KMC_PRNG
, prng
.parm_block
, (u8
*) &random
,
85 (u8
*) &random
, sizeof(random
));
90 *value
= random
% limit
;
95 * To randomize kernel base address we have to consider several facts:
96 * 1. physical online memory might not be continuous and have holes. mem_detect
97 * info contains list of online memory ranges we should consider.
98 * 2. we have several memory regions which are occupied and we should not
99 * overlap and destroy them. Currently safe_addr tells us the border below
100 * which all those occupied regions are. We are safe to use anything above
102 * 3. the upper limit might apply as well, even if memory above that limit is
103 * online. Currently those limitations are:
104 * 3.1. Limit set by "mem=" kernel command line option
105 * 3.2. memory reserved at the end for kasan initialization.
106 * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
107 * Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
108 * (16 pages when the kernel is built with kasan enabled)
110 * 1. kernel size (including .bss size) and upper memory limit are page aligned.
111 * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
112 * aligned (in practice memory configurations granularity on z/VM and LPAR
115 * To guarantee uniform distribution of kernel base address among all suitable
116 * addresses we generate random value just once. For that we need to build a
117 * continuous range in which every value would be suitable. We can build this
118 * range by simply counting all suitable addresses (let's call them positions)
119 * which would be valid as kernel base address. To count positions we iterate
120 * over online memory ranges. For each range which is big enough for the
121 * kernel image we count all suitable addresses we can put the kernel image at
123 * (end - start - kernel_size) / THREAD_SIZE + 1
124 * Two functions count_valid_kernel_positions and position_to_address help
125 * to count positions in memory range given and then convert position back
128 static unsigned long count_valid_kernel_positions(unsigned long kernel_size
,
132 unsigned long start
, end
, pos
= 0;
135 for_each_mem_detect_block(i
, &start
, &end
) {
140 start
= max(_min
, start
);
141 end
= min(_max
, end
);
142 if (end
- start
< kernel_size
)
144 pos
+= (end
- start
- kernel_size
) / THREAD_SIZE
+ 1;
150 static unsigned long position_to_address(unsigned long pos
, unsigned long kernel_size
,
151 unsigned long _min
, unsigned long _max
)
153 unsigned long start
, end
;
156 for_each_mem_detect_block(i
, &start
, &end
) {
161 start
= max(_min
, start
);
162 end
= min(_max
, end
);
163 if (end
- start
< kernel_size
)
165 if ((end
- start
- kernel_size
) / THREAD_SIZE
+ 1 >= pos
)
166 return start
+ (pos
- 1) * THREAD_SIZE
;
167 pos
-= (end
- start
- kernel_size
) / THREAD_SIZE
+ 1;
173 unsigned long get_random_base(unsigned long safe_addr
)
175 unsigned long memory_limit
= get_mem_detect_end();
176 unsigned long base_pos
, max_pos
, kernel_size
;
177 unsigned long kasan_needs
;
180 memory_limit
= min(memory_limit
, ident_map_size
);
183 * Avoid putting kernel in the end of physical memory
184 * which kasan will use for shadow memory and early pgtable
185 * mapping allocations.
187 memory_limit
-= kasan_estimate_memory_needs(memory_limit
);
189 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD
) && INITRD_START
&& INITRD_SIZE
) {
190 if (safe_addr
< INITRD_START
+ INITRD_SIZE
)
191 safe_addr
= INITRD_START
+ INITRD_SIZE
;
193 safe_addr
= ALIGN(safe_addr
, THREAD_SIZE
);
195 kernel_size
= vmlinux
.image_size
+ vmlinux
.bss_size
;
196 if (safe_addr
+ kernel_size
> memory_limit
)
199 max_pos
= count_valid_kernel_positions(kernel_size
, safe_addr
, memory_limit
);
201 sclp_early_printk("KASLR disabled: not enough memory\n");
205 /* we need a value in the range [1, base_pos] inclusive */
206 if (get_random(max_pos
, &base_pos
))
208 return position_to_address(base_pos
+ 1, kernel_size
, safe_addr
, memory_limit
);