1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/memblock.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
8 #include <asm/mem_detect.h>
9 #include <asm/processor.h>
11 #include <asm/facility.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
15 static unsigned long segment_pos __initdata
;
16 static unsigned long segment_low __initdata
;
17 static unsigned long pgalloc_pos __initdata
;
18 static unsigned long pgalloc_low __initdata
;
19 static unsigned long pgalloc_freeable __initdata
;
20 static bool has_edat __initdata
;
21 static bool has_nx __initdata
;
23 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
25 static pgd_t early_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PAGE_SIZE
);
27 static void __init
kasan_early_panic(const char *reason
)
29 sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
30 sclp_early_printk(reason
);
34 static void * __init
kasan_early_alloc_segment(void)
36 segment_pos
-= _SEGMENT_SIZE
;
38 if (segment_pos
< segment_low
)
39 kasan_early_panic("out of memory during initialisation\n");
41 return (void *)segment_pos
;
44 static void * __init
kasan_early_alloc_pages(unsigned int order
)
46 pgalloc_pos
-= (PAGE_SIZE
<< order
);
48 if (pgalloc_pos
< pgalloc_low
)
49 kasan_early_panic("out of memory during initialisation\n");
51 return (void *)pgalloc_pos
;
54 static void * __init
kasan_early_crst_alloc(unsigned long val
)
58 table
= kasan_early_alloc_pages(CRST_ALLOC_ORDER
);
60 crst_table_init(table
, val
);
64 static pte_t
* __init
kasan_early_pte_alloc(void)
66 static void *pte_leftover
;
69 BUILD_BUG_ON(_PAGE_TABLE_SIZE
* 2 != PAGE_SIZE
);
72 pte_leftover
= kasan_early_alloc_pages(0);
73 pte
= pte_leftover
+ _PAGE_TABLE_SIZE
;
78 memset64((u64
*)pte
, _PAGE_INVALID
, PTRS_PER_PTE
);
88 static void __init
kasan_early_vmemmap_populate(unsigned long address
,
90 enum populate_mode mode
)
92 unsigned long pgt_prot_zero
, pgt_prot
, sgt_prot
;
99 pgt_prot_zero
= pgprot_val(PAGE_KERNEL_RO
);
101 pgt_prot_zero
&= ~_PAGE_NOEXEC
;
102 pgt_prot
= pgprot_val(PAGE_KERNEL_EXEC
);
103 sgt_prot
= pgprot_val(SEGMENT_KERNEL_EXEC
);
105 while (address
< end
) {
106 pg_dir
= pgd_offset_k(address
);
107 if (pgd_none(*pg_dir
)) {
108 if (mode
== POPULATE_ZERO_SHADOW
&&
109 IS_ALIGNED(address
, PGDIR_SIZE
) &&
110 end
- address
>= PGDIR_SIZE
) {
111 pgd_populate(&init_mm
, pg_dir
,
112 kasan_early_shadow_p4d
);
113 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
116 p4_dir
= kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY
);
117 pgd_populate(&init_mm
, pg_dir
, p4_dir
);
120 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING
) &&
121 mode
== POPULATE_SHALLOW
) {
122 address
= (address
+ P4D_SIZE
) & P4D_MASK
;
126 p4_dir
= p4d_offset(pg_dir
, address
);
127 if (p4d_none(*p4_dir
)) {
128 if (mode
== POPULATE_ZERO_SHADOW
&&
129 IS_ALIGNED(address
, P4D_SIZE
) &&
130 end
- address
>= P4D_SIZE
) {
131 p4d_populate(&init_mm
, p4_dir
,
132 kasan_early_shadow_pud
);
133 address
= (address
+ P4D_SIZE
) & P4D_MASK
;
136 pu_dir
= kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY
);
137 p4d_populate(&init_mm
, p4_dir
, pu_dir
);
140 if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING
) &&
141 mode
== POPULATE_SHALLOW
) {
142 address
= (address
+ PUD_SIZE
) & PUD_MASK
;
146 pu_dir
= pud_offset(p4_dir
, address
);
147 if (pud_none(*pu_dir
)) {
148 if (mode
== POPULATE_ZERO_SHADOW
&&
149 IS_ALIGNED(address
, PUD_SIZE
) &&
150 end
- address
>= PUD_SIZE
) {
151 pud_populate(&init_mm
, pu_dir
,
152 kasan_early_shadow_pmd
);
153 address
= (address
+ PUD_SIZE
) & PUD_MASK
;
156 pm_dir
= kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
157 pud_populate(&init_mm
, pu_dir
, pm_dir
);
160 pm_dir
= pmd_offset(pu_dir
, address
);
161 if (pmd_none(*pm_dir
)) {
162 if (mode
== POPULATE_ZERO_SHADOW
&&
163 IS_ALIGNED(address
, PMD_SIZE
) &&
164 end
- address
>= PMD_SIZE
) {
165 pmd_populate(&init_mm
, pm_dir
,
166 kasan_early_shadow_pte
);
167 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
170 /* the first megabyte of 1:1 is mapped with 4k pages */
171 if (has_edat
&& address
&& end
- address
>= PMD_SIZE
&&
172 mode
!= POPULATE_ZERO_SHADOW
) {
175 if (mode
== POPULATE_ONE2ONE
) {
176 page
= (void *)address
;
178 page
= kasan_early_alloc_segment();
179 memset(page
, 0, _SEGMENT_SIZE
);
181 pmd_val(*pm_dir
) = __pa(page
) | sgt_prot
;
182 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
186 pt_dir
= kasan_early_pte_alloc();
187 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
188 } else if (pmd_large(*pm_dir
)) {
189 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
193 pt_dir
= pte_offset_kernel(pm_dir
, address
);
194 if (pte_none(*pt_dir
)) {
198 case POPULATE_ONE2ONE
:
199 page
= (void *)address
;
200 pte_val(*pt_dir
) = __pa(page
) | pgt_prot
;
203 page
= kasan_early_alloc_pages(0);
204 memset(page
, 0, PAGE_SIZE
);
205 pte_val(*pt_dir
) = __pa(page
) | pgt_prot
;
207 case POPULATE_ZERO_SHADOW
:
208 page
= kasan_early_shadow_page
;
209 pte_val(*pt_dir
) = __pa(page
) | pgt_prot_zero
;
211 case POPULATE_SHALLOW
:
212 /* should never happen */
216 address
+= PAGE_SIZE
;
220 static void __init
kasan_set_pgd(pgd_t
*pgd
, unsigned long asce_type
)
222 unsigned long asce_bits
;
224 asce_bits
= asce_type
| _ASCE_TABLE_LENGTH
;
225 S390_lowcore
.kernel_asce
= (__pa(pgd
) & PAGE_MASK
) | asce_bits
;
226 S390_lowcore
.user_asce
= S390_lowcore
.kernel_asce
;
228 __ctl_load(S390_lowcore
.kernel_asce
, 1, 1);
229 __ctl_load(S390_lowcore
.kernel_asce
, 7, 7);
230 __ctl_load(S390_lowcore
.kernel_asce
, 13, 13);
233 static void __init
kasan_enable_dat(void)
237 psw
.mask
= __extract_psw();
238 psw_bits(psw
).dat
= 1;
239 psw_bits(psw
).as
= PSW_BITS_AS_HOME
;
240 __load_psw_mask(psw
.mask
);
243 static void __init
kasan_early_detect_facilities(void)
245 if (test_facility(8)) {
247 __ctl_set_bit(0, 23);
249 if (!noexec_disabled
&& test_facility(130)) {
251 __ctl_set_bit(0, 20);
255 void __init
kasan_early_init(void)
257 unsigned long untracked_mem_end
;
258 unsigned long shadow_alloc_size
;
259 unsigned long initrd_end
;
260 unsigned long asce_type
;
261 unsigned long memsize
;
263 unsigned long pgt_prot
= pgprot_val(PAGE_KERNEL_RO
);
265 pmd_t pmd_z
= __pmd(__pa(kasan_early_shadow_pte
) | _SEGMENT_ENTRY
);
266 pud_t pud_z
= __pud(__pa(kasan_early_shadow_pmd
) | _REGION3_ENTRY
);
267 p4d_t p4d_z
= __p4d(__pa(kasan_early_shadow_pud
) | _REGION2_ENTRY
);
269 kasan_early_detect_facilities();
271 pgt_prot
&= ~_PAGE_NOEXEC
;
272 pte_z
= __pte(__pa(kasan_early_shadow_page
) | pgt_prot
);
274 memsize
= get_mem_detect_end();
276 kasan_early_panic("cannot detect physical memory size\n");
277 /* respect mem= cmdline parameter */
278 if (memory_end_set
&& memsize
> memory_end
)
279 memsize
= memory_end
;
280 if (IS_ENABLED(CONFIG_CRASH_DUMP
) && OLDMEM_BASE
)
281 memsize
= min(memsize
, OLDMEM_SIZE
);
282 memsize
= min(memsize
, KASAN_SHADOW_START
);
284 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING
)) {
286 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, P4D_SIZE
));
287 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, P4D_SIZE
));
288 crst_table_init((unsigned long *)early_pg_dir
,
289 _REGION2_ENTRY_EMPTY
);
290 untracked_mem_end
= vmax
= _REGION1_SIZE
;
291 asce_type
= _ASCE_TYPE_REGION2
;
294 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, PUD_SIZE
));
295 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, PUD_SIZE
));
296 crst_table_init((unsigned long *)early_pg_dir
,
297 _REGION3_ENTRY_EMPTY
);
298 untracked_mem_end
= vmax
= _REGION2_SIZE
;
299 asce_type
= _ASCE_TYPE_REGION3
;
302 /* init kasan zero shadow */
303 crst_table_init((unsigned long *)kasan_early_shadow_p4d
,
305 crst_table_init((unsigned long *)kasan_early_shadow_pud
,
307 crst_table_init((unsigned long *)kasan_early_shadow_pmd
,
309 memset64((u64
*)kasan_early_shadow_pte
, pte_val(pte_z
), PTRS_PER_PTE
);
311 shadow_alloc_size
= memsize
>> KASAN_SHADOW_SCALE_SHIFT
;
312 pgalloc_low
= round_up((unsigned long)_end
, _SEGMENT_SIZE
);
313 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD
)) {
315 round_up(INITRD_START
+ INITRD_SIZE
, _SEGMENT_SIZE
);
316 pgalloc_low
= max(pgalloc_low
, initrd_end
);
319 if (pgalloc_low
+ shadow_alloc_size
> memsize
)
320 kasan_early_panic("out of memory during initialisation\n");
323 segment_pos
= round_down(memsize
, _SEGMENT_SIZE
);
324 segment_low
= segment_pos
- shadow_alloc_size
;
325 pgalloc_pos
= segment_low
;
327 pgalloc_pos
= memsize
;
329 init_mm
.pgd
= early_pg_dir
;
331 * Current memory layout:
332 * +- 0 -------------+ +- shadow start -+
333 * | 1:1 ram mapping | /| 1/8 ram |
335 * +- end of ram ----+ / +----------------+
336 * | ... gap ... | / | |
338 * +- shadow start --+ | zero |
339 * | 1/8 addr space | | page |
340 * +- shadow end -+ | mapping |
341 * | ... gap ... |\ | (untracked) |
342 * +- vmalloc area -+ \ | |
343 * | vmalloc_size | \ | |
344 * +- modules vaddr -+ \ +----------------+
345 * | 2Gb | \| unmapped | allocated per module
346 * +-----------------+ +- shadow end ---+
348 * Current memory layout (KASAN_VMALLOC):
349 * +- 0 -------------+ +- shadow start -+
350 * | 1:1 ram mapping | /| 1/8 ram |
352 * +- end of ram ----+ / +----------------+
353 * | ... gap ... | / | kasan |
355 * +- shadow start --+ | page |
356 * | 1/8 addr space | | mapping |
357 * +- shadow end -+ | (untracked) |
358 * | ... gap ... |\ | |
359 * +- vmalloc area -+ \ +- vmalloc area -+
360 * | vmalloc_size | \ |shallow populate|
361 * +- modules vaddr -+ \ +- modules area -+
362 * | 2Gb | \|shallow populate|
363 * +-----------------+ +- shadow end ---+
365 /* populate kasan shadow (for identity mapping and zero page mapping) */
366 kasan_early_vmemmap_populate(__sha(0), __sha(memsize
), POPULATE_MAP
);
367 if (IS_ENABLED(CONFIG_MODULES
))
368 untracked_mem_end
= vmax
- MODULES_LEN
;
369 if (IS_ENABLED(CONFIG_KASAN_VMALLOC
)) {
370 untracked_mem_end
= vmax
- vmalloc_size
- MODULES_LEN
;
371 /* shallowly populate kasan shadow for vmalloc and modules */
372 kasan_early_vmemmap_populate(__sha(untracked_mem_end
),
373 __sha(vmax
), POPULATE_SHALLOW
);
375 /* populate kasan shadow for untracked memory */
376 kasan_early_vmemmap_populate(__sha(max_physmem_end
),
377 __sha(untracked_mem_end
),
378 POPULATE_ZERO_SHADOW
);
379 /* memory allocated for identity mapping structs will be freed later */
380 pgalloc_freeable
= pgalloc_pos
;
381 /* populate identity mapping */
382 kasan_early_vmemmap_populate(0, memsize
, POPULATE_ONE2ONE
);
383 kasan_set_pgd(early_pg_dir
, asce_type
);
386 init_task
.kasan_depth
= 0;
387 memblock_reserve(pgalloc_pos
, memsize
- pgalloc_pos
);
388 sclp_early_printk("KernelAddressSanitizer initialized\n");
391 void __init
kasan_copy_shadow(pgd_t
*pg_dir
)
394 * At this point we are still running on early pages setup early_pg_dir,
395 * while swapper_pg_dir has just been initialized with identity mapping.
396 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
406 pg_dir_src
= pgd_offset_raw(early_pg_dir
, KASAN_SHADOW_START
);
407 pg_dir_dst
= pgd_offset_raw(pg_dir
, KASAN_SHADOW_START
);
408 p4_dir_src
= p4d_offset(pg_dir_src
, KASAN_SHADOW_START
);
409 p4_dir_dst
= p4d_offset(pg_dir_dst
, KASAN_SHADOW_START
);
410 if (!p4d_folded(*p4_dir_src
)) {
412 memcpy(p4_dir_dst
, p4_dir_src
,
413 (KASAN_SHADOW_SIZE
>> P4D_SHIFT
) * sizeof(p4d_t
));
417 pu_dir_src
= pud_offset(p4_dir_src
, KASAN_SHADOW_START
);
418 pu_dir_dst
= pud_offset(p4_dir_dst
, KASAN_SHADOW_START
);
419 memcpy(pu_dir_dst
, pu_dir_src
,
420 (KASAN_SHADOW_SIZE
>> PUD_SHIFT
) * sizeof(pud_t
));
423 void __init
kasan_free_early_identity(void)
425 memblock_free(pgalloc_pos
, pgalloc_freeable
- pgalloc_pos
);