1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/memblock.h>
5 #include <linux/pgtable.h>
6 #include <asm/pgalloc.h>
8 #include <asm/mem_detect.h>
9 #include <asm/processor.h>
11 #include <asm/facility.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
16 unsigned long kasan_vmax
;
17 static unsigned long segment_pos __initdata
;
18 static unsigned long segment_low __initdata
;
19 static unsigned long pgalloc_pos __initdata
;
20 static unsigned long pgalloc_low __initdata
;
21 static unsigned long pgalloc_freeable __initdata
;
22 static bool has_edat __initdata
;
23 static bool has_nx __initdata
;
25 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
27 static pgd_t early_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PAGE_SIZE
);
29 static void __init
kasan_early_panic(const char *reason
)
31 sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
32 sclp_early_printk(reason
);
36 static void * __init
kasan_early_alloc_segment(void)
38 segment_pos
-= _SEGMENT_SIZE
;
40 if (segment_pos
< segment_low
)
41 kasan_early_panic("out of memory during initialisation\n");
43 return (void *)segment_pos
;
46 static void * __init
kasan_early_alloc_pages(unsigned int order
)
48 pgalloc_pos
-= (PAGE_SIZE
<< order
);
50 if (pgalloc_pos
< pgalloc_low
)
51 kasan_early_panic("out of memory during initialisation\n");
53 return (void *)pgalloc_pos
;
56 static void * __init
kasan_early_crst_alloc(unsigned long val
)
60 table
= kasan_early_alloc_pages(CRST_ALLOC_ORDER
);
62 crst_table_init(table
, val
);
66 static pte_t
* __init
kasan_early_pte_alloc(void)
68 static void *pte_leftover
;
71 BUILD_BUG_ON(_PAGE_TABLE_SIZE
* 2 != PAGE_SIZE
);
74 pte_leftover
= kasan_early_alloc_pages(0);
75 pte
= pte_leftover
+ _PAGE_TABLE_SIZE
;
80 memset64((u64
*)pte
, _PAGE_INVALID
, PTRS_PER_PTE
);
90 static void __init
kasan_early_pgtable_populate(unsigned long address
,
92 enum populate_mode mode
)
94 unsigned long pgt_prot_zero
, pgt_prot
, sgt_prot
;
101 pgt_prot_zero
= pgprot_val(PAGE_KERNEL_RO
);
103 pgt_prot_zero
&= ~_PAGE_NOEXEC
;
104 pgt_prot
= pgprot_val(PAGE_KERNEL
);
105 sgt_prot
= pgprot_val(SEGMENT_KERNEL
);
106 if (!has_nx
|| mode
== POPULATE_ONE2ONE
) {
107 pgt_prot
&= ~_PAGE_NOEXEC
;
108 sgt_prot
&= ~_SEGMENT_ENTRY_NOEXEC
;
111 while (address
< end
) {
112 pg_dir
= pgd_offset_k(address
);
113 if (pgd_none(*pg_dir
)) {
114 if (mode
== POPULATE_ZERO_SHADOW
&&
115 IS_ALIGNED(address
, PGDIR_SIZE
) &&
116 end
- address
>= PGDIR_SIZE
) {
117 pgd_populate(&init_mm
, pg_dir
,
118 kasan_early_shadow_p4d
);
119 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
122 p4_dir
= kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY
);
123 pgd_populate(&init_mm
, pg_dir
, p4_dir
);
126 if (mode
== POPULATE_SHALLOW
) {
127 address
= (address
+ P4D_SIZE
) & P4D_MASK
;
131 p4_dir
= p4d_offset(pg_dir
, address
);
132 if (p4d_none(*p4_dir
)) {
133 if (mode
== POPULATE_ZERO_SHADOW
&&
134 IS_ALIGNED(address
, P4D_SIZE
) &&
135 end
- address
>= P4D_SIZE
) {
136 p4d_populate(&init_mm
, p4_dir
,
137 kasan_early_shadow_pud
);
138 address
= (address
+ P4D_SIZE
) & P4D_MASK
;
141 pu_dir
= kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY
);
142 p4d_populate(&init_mm
, p4_dir
, pu_dir
);
145 pu_dir
= pud_offset(p4_dir
, address
);
146 if (pud_none(*pu_dir
)) {
147 if (mode
== POPULATE_ZERO_SHADOW
&&
148 IS_ALIGNED(address
, PUD_SIZE
) &&
149 end
- address
>= PUD_SIZE
) {
150 pud_populate(&init_mm
, pu_dir
,
151 kasan_early_shadow_pmd
);
152 address
= (address
+ PUD_SIZE
) & PUD_MASK
;
155 pm_dir
= kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
156 pud_populate(&init_mm
, pu_dir
, pm_dir
);
159 pm_dir
= pmd_offset(pu_dir
, address
);
160 if (pmd_none(*pm_dir
)) {
161 if (mode
== POPULATE_ZERO_SHADOW
&&
162 IS_ALIGNED(address
, PMD_SIZE
) &&
163 end
- address
>= PMD_SIZE
) {
164 pmd_populate(&init_mm
, pm_dir
,
165 kasan_early_shadow_pte
);
166 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
169 /* the first megabyte of 1:1 is mapped with 4k pages */
170 if (has_edat
&& address
&& end
- address
>= PMD_SIZE
&&
171 mode
!= POPULATE_ZERO_SHADOW
) {
174 if (mode
== POPULATE_ONE2ONE
) {
175 page
= (void *)address
;
177 page
= kasan_early_alloc_segment();
178 memset(page
, 0, _SEGMENT_SIZE
);
180 pmd_val(*pm_dir
) = __pa(page
) | sgt_prot
;
181 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
185 pt_dir
= kasan_early_pte_alloc();
186 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
187 } else if (pmd_large(*pm_dir
)) {
188 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
192 pt_dir
= pte_offset_kernel(pm_dir
, address
);
193 if (pte_none(*pt_dir
)) {
197 case POPULATE_ONE2ONE
:
198 page
= (void *)address
;
199 pte_val(*pt_dir
) = __pa(page
) | pgt_prot
;
202 page
= kasan_early_alloc_pages(0);
203 memset(page
, 0, PAGE_SIZE
);
204 pte_val(*pt_dir
) = __pa(page
) | pgt_prot
;
206 case POPULATE_ZERO_SHADOW
:
207 page
= kasan_early_shadow_page
;
208 pte_val(*pt_dir
) = __pa(page
) | pgt_prot_zero
;
210 case POPULATE_SHALLOW
:
211 /* should never happen */
215 address
+= PAGE_SIZE
;
219 static void __init
kasan_set_pgd(pgd_t
*pgd
, unsigned long asce_type
)
221 unsigned long asce_bits
;
223 asce_bits
= asce_type
| _ASCE_TABLE_LENGTH
;
224 S390_lowcore
.kernel_asce
= (__pa(pgd
) & PAGE_MASK
) | asce_bits
;
225 S390_lowcore
.user_asce
= S390_lowcore
.kernel_asce
;
227 __ctl_load(S390_lowcore
.kernel_asce
, 1, 1);
228 __ctl_load(S390_lowcore
.kernel_asce
, 7, 7);
229 __ctl_load(S390_lowcore
.kernel_asce
, 13, 13);
232 static void __init
kasan_enable_dat(void)
236 psw
.mask
= __extract_psw();
237 psw_bits(psw
).dat
= 1;
238 psw_bits(psw
).as
= PSW_BITS_AS_HOME
;
239 __load_psw_mask(psw
.mask
);
242 static void __init
kasan_early_detect_facilities(void)
244 if (test_facility(8)) {
246 __ctl_set_bit(0, 23);
248 if (!noexec_disabled
&& test_facility(130)) {
250 __ctl_set_bit(0, 20);
254 static bool __init
has_uv_sec_stor_limit(void)
257 * keep these conditions in line with setup_uv()
259 if (!is_prot_virt_host())
262 if (is_prot_virt_guest())
265 if (!test_facility(158))
268 return !!uv_info
.max_sec_stor_addr
;
271 void __init
kasan_early_init(void)
273 unsigned long untracked_mem_end
;
274 unsigned long shadow_alloc_size
;
275 unsigned long vmax_unlimited
;
276 unsigned long initrd_end
;
277 unsigned long memsize
;
278 unsigned long pgt_prot
= pgprot_val(PAGE_KERNEL_RO
);
280 pmd_t pmd_z
= __pmd(__pa(kasan_early_shadow_pte
) | _SEGMENT_ENTRY
);
281 pud_t pud_z
= __pud(__pa(kasan_early_shadow_pmd
) | _REGION3_ENTRY
);
282 p4d_t p4d_z
= __p4d(__pa(kasan_early_shadow_pud
) | _REGION2_ENTRY
);
284 kasan_early_detect_facilities();
286 pgt_prot
&= ~_PAGE_NOEXEC
;
287 pte_z
= __pte(__pa(kasan_early_shadow_page
) | pgt_prot
);
289 memsize
= get_mem_detect_end();
291 kasan_early_panic("cannot detect physical memory size\n");
293 * Kasan currently supports standby memory but only if it follows
294 * online memory (default allocation), i.e. no memory holes.
295 * - memsize represents end of online memory
296 * - ident_map_size represents online + standby and memory limits
298 * Kasan maps "memsize" right away.
299 * [0, memsize] - as identity mapping
300 * [__sha(0), __sha(memsize)] - shadow memory for identity mapping
301 * The rest [memsize, ident_map_size] if memsize < ident_map_size
302 * could be mapped/unmapped dynamically later during memory hotplug.
304 memsize
= min(memsize
, ident_map_size
);
306 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, P4D_SIZE
));
307 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, P4D_SIZE
));
308 crst_table_init((unsigned long *)early_pg_dir
, _REGION2_ENTRY_EMPTY
);
309 untracked_mem_end
= kasan_vmax
= vmax_unlimited
= _REGION1_SIZE
;
310 if (has_uv_sec_stor_limit())
311 kasan_vmax
= min(vmax_unlimited
, uv_info
.max_sec_stor_addr
);
313 /* init kasan zero shadow */
314 crst_table_init((unsigned long *)kasan_early_shadow_p4d
,
316 crst_table_init((unsigned long *)kasan_early_shadow_pud
,
318 crst_table_init((unsigned long *)kasan_early_shadow_pmd
,
320 memset64((u64
*)kasan_early_shadow_pte
, pte_val(pte_z
), PTRS_PER_PTE
);
322 shadow_alloc_size
= memsize
>> KASAN_SHADOW_SCALE_SHIFT
;
323 pgalloc_low
= round_up((unsigned long)_end
, _SEGMENT_SIZE
);
324 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD
)) {
326 round_up(INITRD_START
+ INITRD_SIZE
, _SEGMENT_SIZE
);
327 pgalloc_low
= max(pgalloc_low
, initrd_end
);
330 if (pgalloc_low
+ shadow_alloc_size
> memsize
)
331 kasan_early_panic("out of memory during initialisation\n");
334 segment_pos
= round_down(memsize
, _SEGMENT_SIZE
);
335 segment_low
= segment_pos
- shadow_alloc_size
;
336 pgalloc_pos
= segment_low
;
338 pgalloc_pos
= memsize
;
340 init_mm
.pgd
= early_pg_dir
;
342 * Current memory layout:
343 * +- 0 -------------+ +- shadow start -+
344 * | 1:1 ram mapping | /| 1/8 ram |
346 * +- end of ram ----+ / +----------------+
347 * | ... gap ... | / | |
349 * +- shadow start --+ | zero |
350 * | 1/8 addr space | | page |
351 * +- shadow end -+ | mapping |
352 * | ... gap ... |\ | (untracked) |
353 * +- vmalloc area -+ \ | |
354 * | vmalloc_size | \ | |
355 * +- modules vaddr -+ \ +----------------+
356 * | 2Gb | \| unmapped | allocated per module
357 * +-----------------+ +- shadow end ---+
359 * Current memory layout (KASAN_VMALLOC):
360 * +- 0 -------------+ +- shadow start -+
361 * | 1:1 ram mapping | /| 1/8 ram |
363 * +- end of ram ----+ / +----------------+
364 * | ... gap ... | / | kasan |
366 * +- shadow start --+ | page |
367 * | 1/8 addr space | | mapping |
368 * +- shadow end -+ | (untracked) |
369 * | ... gap ... |\ | |
370 * +- vmalloc area -+ \ +- vmalloc area -+
371 * | vmalloc_size | \ |shallow populate|
372 * +- modules vaddr -+ \ +- modules area -+
373 * | 2Gb | \|shallow populate|
374 * +-----------------+ +- shadow end ---+
376 /* populate kasan shadow (for identity mapping and zero page mapping) */
377 kasan_early_pgtable_populate(__sha(0), __sha(memsize
), POPULATE_MAP
);
378 if (IS_ENABLED(CONFIG_MODULES
))
379 untracked_mem_end
= kasan_vmax
- MODULES_LEN
;
380 if (IS_ENABLED(CONFIG_KASAN_VMALLOC
)) {
381 untracked_mem_end
= kasan_vmax
- vmalloc_size
- MODULES_LEN
;
382 /* shallowly populate kasan shadow for vmalloc and modules */
383 kasan_early_pgtable_populate(__sha(untracked_mem_end
), __sha(kasan_vmax
),
386 /* populate kasan shadow for untracked memory */
387 kasan_early_pgtable_populate(__sha(ident_map_size
), __sha(untracked_mem_end
),
388 POPULATE_ZERO_SHADOW
);
389 kasan_early_pgtable_populate(__sha(kasan_vmax
), __sha(vmax_unlimited
),
390 POPULATE_ZERO_SHADOW
);
391 /* memory allocated for identity mapping structs will be freed later */
392 pgalloc_freeable
= pgalloc_pos
;
393 /* populate identity mapping */
394 kasan_early_pgtable_populate(0, memsize
, POPULATE_ONE2ONE
);
395 kasan_set_pgd(early_pg_dir
, _ASCE_TYPE_REGION2
);
398 init_task
.kasan_depth
= 0;
399 memblock_reserve(pgalloc_pos
, memsize
- pgalloc_pos
);
400 sclp_early_printk("KernelAddressSanitizer initialized\n");
403 void __init
kasan_copy_shadow_mapping(void)
406 * At this point we are still running on early pages setup early_pg_dir,
407 * while swapper_pg_dir has just been initialized with identity mapping.
408 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
416 pg_dir_src
= pgd_offset_raw(early_pg_dir
, KASAN_SHADOW_START
);
417 pg_dir_dst
= pgd_offset_raw(init_mm
.pgd
, KASAN_SHADOW_START
);
418 p4_dir_src
= p4d_offset(pg_dir_src
, KASAN_SHADOW_START
);
419 p4_dir_dst
= p4d_offset(pg_dir_dst
, KASAN_SHADOW_START
);
420 memcpy(p4_dir_dst
, p4_dir_src
,
421 (KASAN_SHADOW_SIZE
>> P4D_SHIFT
) * sizeof(p4d_t
));
424 void __init
kasan_free_early_identity(void)
426 memblock_free(pgalloc_pos
, pgalloc_freeable
- pgalloc_pos
);