1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2019
5 #include <asm/mem_detect.h>
6 #include <asm/pgtable.h>
10 #include "compressed/decompressor.h"
13 #define PRNG_MODE_TDES 1
14 #define PRNG_MODE_SHA512 2
15 #define PRNG_MODE_TRNG 3
31 static int check_prng(void)
33 if (!cpacf_query_func(CPACF_KMC
, CPACF_KMC_PRNG
)) {
34 sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
37 if (cpacf_query_func(CPACF_PRNO
, CPACF_PRNO_TRNG
))
38 return PRNG_MODE_TRNG
;
39 if (cpacf_query_func(CPACF_PRNO
, CPACF_PRNO_SHA512_DRNG_GEN
))
40 return PRNG_MODE_SHA512
;
42 return PRNG_MODE_TDES
;
45 static unsigned long get_random(unsigned long limit
)
47 struct prng_parm prng
= {
48 /* initial parameter block for tdes mode, copied from libica */
50 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
51 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
52 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
53 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
56 unsigned long seed
, random
;
57 struct prno_parm prno
;
62 seed
= get_tod_clock_fast();
65 cpacf_trng(NULL
, 0, (u8
*) &random
, sizeof(random
));
67 case PRNG_MODE_SHA512
:
68 cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED
, &prno
, NULL
, 0,
69 (u8
*) &seed
, sizeof(seed
));
70 cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN
, &prno
, (u8
*) &random
,
71 sizeof(random
), NULL
, 0);
75 *(unsigned long *) prng
.parm_block
^= seed
;
76 for (i
= 0; i
< 16; i
++) {
77 cpacf_kmc(CPACF_KMC_PRNG
, prng
.parm_block
,
78 (char *) entropy
, (char *) entropy
,
80 memcpy(prng
.parm_block
, entropy
, sizeof(entropy
));
83 cpacf_kmc(CPACF_KMC_PRNG
, prng
.parm_block
, (u8
*) &random
,
84 (u8
*) &random
, sizeof(random
));
89 return random
% limit
;
92 unsigned long get_random_base(unsigned long safe_addr
)
94 unsigned long memory_limit
= memory_end_set
? memory_end
: 0;
95 unsigned long base
, start
, end
, kernel_size
;
96 unsigned long block_sum
, offset
;
97 unsigned long kasan_needs
;
100 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD
) && INITRD_START
&& INITRD_SIZE
) {
101 if (safe_addr
< INITRD_START
+ INITRD_SIZE
)
102 safe_addr
= INITRD_START
+ INITRD_SIZE
;
104 safe_addr
= ALIGN(safe_addr
, THREAD_SIZE
);
106 if ((IS_ENABLED(CONFIG_KASAN
))) {
108 * Estimate kasan memory requirements, which it will reserve
109 * at the very end of available physical memory. To estimate
110 * that, we take into account that kasan would require
111 * 1/8 of available physical memory (for shadow memory) +
112 * creating page tables for the whole memory + shadow memory
113 * region (1 + 1/8). To keep page tables estimates simple take
114 * the double of combined ptes size.
116 memory_limit
= get_mem_detect_end();
117 if (memory_end_set
&& memory_limit
> memory_end
)
118 memory_limit
= memory_end
;
120 /* for shadow memory */
121 kasan_needs
= memory_limit
/ 8;
122 /* for paging structures */
123 kasan_needs
+= (memory_limit
+ kasan_needs
) / PAGE_SIZE
/
124 _PAGE_ENTRIES
* _PAGE_TABLE_SIZE
* 2;
125 memory_limit
-= kasan_needs
;
128 kernel_size
= vmlinux
.image_size
+ vmlinux
.bss_size
;
130 for_each_mem_detect_block(i
, &start
, &end
) {
132 if (start
>= memory_limit
)
134 if (end
> memory_limit
)
137 if (end
- start
< kernel_size
)
139 block_sum
+= end
- start
- kernel_size
;
142 sclp_early_printk("KASLR disabled: not enough memory\n");
146 base
= get_random(block_sum
);
149 if (base
< safe_addr
)
151 block_sum
= offset
= 0;
152 for_each_mem_detect_block(i
, &start
, &end
) {
154 if (start
>= memory_limit
)
156 if (end
> memory_limit
)
159 if (end
- start
< kernel_size
)
161 block_sum
+= end
- start
- kernel_size
;
162 if (base
<= block_sum
) {
163 base
= start
+ base
- offset
;
164 base
= ALIGN_DOWN(base
, THREAD_SIZE
);