1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/memblock.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
8 #include <asm/mem_detect.h>
9 #include <asm/processor.h>
11 #include <asm/facility.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
15 static unsigned long segment_pos __initdata
;
16 static unsigned long segment_low __initdata
;
17 static unsigned long pgalloc_pos __initdata
;
18 static unsigned long pgalloc_low __initdata
;
19 static unsigned long pgalloc_freeable __initdata
;
20 static bool has_edat __initdata
;
21 static bool has_nx __initdata
;
23 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
25 static pgd_t early_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PAGE_SIZE
);
27 static void __init
kasan_early_panic(const char *reason
)
29 sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
30 sclp_early_printk(reason
);
34 static void * __init
kasan_early_alloc_segment(void)
36 segment_pos
-= _SEGMENT_SIZE
;
38 if (segment_pos
< segment_low
)
39 kasan_early_panic("out of memory during initialisation\n");
41 return (void *)segment_pos
;
44 static void * __init
kasan_early_alloc_pages(unsigned int order
)
46 pgalloc_pos
-= (PAGE_SIZE
<< order
);
48 if (pgalloc_pos
< pgalloc_low
)
49 kasan_early_panic("out of memory during initialisation\n");
51 return (void *)pgalloc_pos
;
54 static void * __init
kasan_early_crst_alloc(unsigned long val
)
58 table
= kasan_early_alloc_pages(CRST_ALLOC_ORDER
);
60 crst_table_init(table
, val
);
64 static pte_t
* __init
kasan_early_pte_alloc(void)
66 static void *pte_leftover
;
69 BUILD_BUG_ON(_PAGE_TABLE_SIZE
* 2 != PAGE_SIZE
);
72 pte_leftover
= kasan_early_alloc_pages(0);
73 pte
= pte_leftover
+ _PAGE_TABLE_SIZE
;
78 memset64((u64
*)pte
, _PAGE_INVALID
, PTRS_PER_PTE
);
87 static void __init
kasan_early_vmemmap_populate(unsigned long address
,
89 enum populate_mode mode
)
91 unsigned long pgt_prot_zero
, pgt_prot
, sgt_prot
;
98 pgt_prot_zero
= pgprot_val(PAGE_KERNEL_RO
);
100 pgt_prot_zero
&= ~_PAGE_NOEXEC
;
101 pgt_prot
= pgprot_val(PAGE_KERNEL_EXEC
);
102 sgt_prot
= pgprot_val(SEGMENT_KERNEL_EXEC
);
104 while (address
< end
) {
105 pg_dir
= pgd_offset_k(address
);
106 if (pgd_none(*pg_dir
)) {
107 if (mode
== POPULATE_ZERO_SHADOW
&&
108 IS_ALIGNED(address
, PGDIR_SIZE
) &&
109 end
- address
>= PGDIR_SIZE
) {
110 pgd_populate(&init_mm
, pg_dir
,
111 kasan_early_shadow_p4d
);
112 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
115 p4_dir
= kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY
);
116 pgd_populate(&init_mm
, pg_dir
, p4_dir
);
119 p4_dir
= p4d_offset(pg_dir
, address
);
120 if (p4d_none(*p4_dir
)) {
121 if (mode
== POPULATE_ZERO_SHADOW
&&
122 IS_ALIGNED(address
, P4D_SIZE
) &&
123 end
- address
>= P4D_SIZE
) {
124 p4d_populate(&init_mm
, p4_dir
,
125 kasan_early_shadow_pud
);
126 address
= (address
+ P4D_SIZE
) & P4D_MASK
;
129 pu_dir
= kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY
);
130 p4d_populate(&init_mm
, p4_dir
, pu_dir
);
133 pu_dir
= pud_offset(p4_dir
, address
);
134 if (pud_none(*pu_dir
)) {
135 if (mode
== POPULATE_ZERO_SHADOW
&&
136 IS_ALIGNED(address
, PUD_SIZE
) &&
137 end
- address
>= PUD_SIZE
) {
138 pud_populate(&init_mm
, pu_dir
,
139 kasan_early_shadow_pmd
);
140 address
= (address
+ PUD_SIZE
) & PUD_MASK
;
143 pm_dir
= kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
144 pud_populate(&init_mm
, pu_dir
, pm_dir
);
147 pm_dir
= pmd_offset(pu_dir
, address
);
148 if (pmd_none(*pm_dir
)) {
149 if (mode
== POPULATE_ZERO_SHADOW
&&
150 IS_ALIGNED(address
, PMD_SIZE
) &&
151 end
- address
>= PMD_SIZE
) {
152 pmd_populate(&init_mm
, pm_dir
,
153 kasan_early_shadow_pte
);
154 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
157 /* the first megabyte of 1:1 is mapped with 4k pages */
158 if (has_edat
&& address
&& end
- address
>= PMD_SIZE
&&
159 mode
!= POPULATE_ZERO_SHADOW
) {
162 if (mode
== POPULATE_ONE2ONE
) {
163 page
= (void *)address
;
165 page
= kasan_early_alloc_segment();
166 memset(page
, 0, _SEGMENT_SIZE
);
168 pmd_val(*pm_dir
) = __pa(page
) | sgt_prot
;
169 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
173 pt_dir
= kasan_early_pte_alloc();
174 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
175 } else if (pmd_large(*pm_dir
)) {
176 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
180 pt_dir
= pte_offset_kernel(pm_dir
, address
);
181 if (pte_none(*pt_dir
)) {
185 case POPULATE_ONE2ONE
:
186 page
= (void *)address
;
187 pte_val(*pt_dir
) = __pa(page
) | pgt_prot
;
190 page
= kasan_early_alloc_pages(0);
191 memset(page
, 0, PAGE_SIZE
);
192 pte_val(*pt_dir
) = __pa(page
) | pgt_prot
;
194 case POPULATE_ZERO_SHADOW
:
195 page
= kasan_early_shadow_page
;
196 pte_val(*pt_dir
) = __pa(page
) | pgt_prot_zero
;
200 address
+= PAGE_SIZE
;
204 static void __init
kasan_set_pgd(pgd_t
*pgd
, unsigned long asce_type
)
206 unsigned long asce_bits
;
208 asce_bits
= asce_type
| _ASCE_TABLE_LENGTH
;
209 S390_lowcore
.kernel_asce
= (__pa(pgd
) & PAGE_MASK
) | asce_bits
;
210 S390_lowcore
.user_asce
= S390_lowcore
.kernel_asce
;
212 __ctl_load(S390_lowcore
.kernel_asce
, 1, 1);
213 __ctl_load(S390_lowcore
.kernel_asce
, 7, 7);
214 __ctl_load(S390_lowcore
.kernel_asce
, 13, 13);
217 static void __init
kasan_enable_dat(void)
221 psw
.mask
= __extract_psw();
222 psw_bits(psw
).dat
= 1;
223 psw_bits(psw
).as
= PSW_BITS_AS_HOME
;
224 __load_psw_mask(psw
.mask
);
227 static void __init
kasan_early_detect_facilities(void)
229 if (test_facility(8)) {
231 __ctl_set_bit(0, 23);
233 if (!noexec_disabled
&& test_facility(130)) {
235 __ctl_set_bit(0, 20);
239 static unsigned long __init
get_mem_detect_end(void)
244 if (mem_detect
.count
) {
245 __get_mem_detect_block(mem_detect
.count
- 1, &start
, &end
);
251 void __init
kasan_early_init(void)
253 unsigned long untracked_mem_end
;
254 unsigned long shadow_alloc_size
;
255 unsigned long initrd_end
;
256 unsigned long asce_type
;
257 unsigned long memsize
;
259 unsigned long pgt_prot
= pgprot_val(PAGE_KERNEL_RO
);
261 pmd_t pmd_z
= __pmd(__pa(kasan_early_shadow_pte
) | _SEGMENT_ENTRY
);
262 pud_t pud_z
= __pud(__pa(kasan_early_shadow_pmd
) | _REGION3_ENTRY
);
263 p4d_t p4d_z
= __p4d(__pa(kasan_early_shadow_pud
) | _REGION2_ENTRY
);
265 kasan_early_detect_facilities();
267 pgt_prot
&= ~_PAGE_NOEXEC
;
268 pte_z
= __pte(__pa(kasan_early_shadow_page
) | pgt_prot
);
270 memsize
= get_mem_detect_end();
272 kasan_early_panic("cannot detect physical memory size\n");
273 /* respect mem= cmdline parameter */
274 if (memory_end_set
&& memsize
> memory_end
)
275 memsize
= memory_end
;
276 memsize
= min(memsize
, KASAN_SHADOW_START
);
278 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING
)) {
280 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, P4D_SIZE
));
281 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, P4D_SIZE
));
282 crst_table_init((unsigned long *)early_pg_dir
,
283 _REGION2_ENTRY_EMPTY
);
284 untracked_mem_end
= vmax
= _REGION1_SIZE
;
285 asce_type
= _ASCE_TYPE_REGION2
;
288 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, PUD_SIZE
));
289 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, PUD_SIZE
));
290 crst_table_init((unsigned long *)early_pg_dir
,
291 _REGION3_ENTRY_EMPTY
);
292 untracked_mem_end
= vmax
= _REGION2_SIZE
;
293 asce_type
= _ASCE_TYPE_REGION3
;
296 /* init kasan zero shadow */
297 crst_table_init((unsigned long *)kasan_early_shadow_p4d
,
299 crst_table_init((unsigned long *)kasan_early_shadow_pud
,
301 crst_table_init((unsigned long *)kasan_early_shadow_pmd
,
303 memset64((u64
*)kasan_early_shadow_pte
, pte_val(pte_z
), PTRS_PER_PTE
);
305 shadow_alloc_size
= memsize
>> KASAN_SHADOW_SCALE_SHIFT
;
306 pgalloc_low
= round_up((unsigned long)_end
, _SEGMENT_SIZE
);
307 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD
)) {
309 round_up(INITRD_START
+ INITRD_SIZE
, _SEGMENT_SIZE
);
310 pgalloc_low
= max(pgalloc_low
, initrd_end
);
313 if (pgalloc_low
+ shadow_alloc_size
> memsize
)
314 kasan_early_panic("out of memory during initialisation\n");
317 segment_pos
= round_down(memsize
, _SEGMENT_SIZE
);
318 segment_low
= segment_pos
- shadow_alloc_size
;
319 pgalloc_pos
= segment_low
;
321 pgalloc_pos
= memsize
;
323 init_mm
.pgd
= early_pg_dir
;
325 * Current memory layout:
326 * +- 0 -------------+ +- shadow start -+
327 * | 1:1 ram mapping | /| 1/8 ram |
328 * +- end of ram ----+ / +----------------+
329 * | ... gap ... |/ | kasan |
330 * +- shadow start --+ | zero |
331 * | 1/8 addr space | | page |
332 * +- shadow end -+ | mapping |
333 * | ... gap ... |\ | (untracked) |
334 * +- modules vaddr -+ \ +----------------+
335 * | 2Gb | \| unmapped | allocated per module
336 * +-----------------+ +- shadow end ---+
338 /* populate kasan shadow (for identity mapping and zero page mapping) */
339 kasan_early_vmemmap_populate(__sha(0), __sha(memsize
), POPULATE_MAP
);
340 if (IS_ENABLED(CONFIG_MODULES
))
341 untracked_mem_end
= vmax
- MODULES_LEN
;
342 kasan_early_vmemmap_populate(__sha(max_physmem_end
),
343 __sha(untracked_mem_end
),
344 POPULATE_ZERO_SHADOW
);
345 /* memory allocated for identity mapping structs will be freed later */
346 pgalloc_freeable
= pgalloc_pos
;
347 /* populate identity mapping */
348 kasan_early_vmemmap_populate(0, memsize
, POPULATE_ONE2ONE
);
349 kasan_set_pgd(early_pg_dir
, asce_type
);
352 init_task
.kasan_depth
= 0;
353 memblock_reserve(pgalloc_pos
, memsize
- pgalloc_pos
);
354 sclp_early_printk("KernelAddressSanitizer initialized\n");
357 void __init
kasan_copy_shadow(pgd_t
*pg_dir
)
360 * At this point we are still running on early pages setup early_pg_dir,
361 * while swapper_pg_dir has just been initialized with identity mapping.
362 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
372 pg_dir_src
= pgd_offset_raw(early_pg_dir
, KASAN_SHADOW_START
);
373 pg_dir_dst
= pgd_offset_raw(pg_dir
, KASAN_SHADOW_START
);
374 p4_dir_src
= p4d_offset(pg_dir_src
, KASAN_SHADOW_START
);
375 p4_dir_dst
= p4d_offset(pg_dir_dst
, KASAN_SHADOW_START
);
376 if (!p4d_folded(*p4_dir_src
)) {
378 memcpy(p4_dir_dst
, p4_dir_src
,
379 (KASAN_SHADOW_SIZE
>> P4D_SHIFT
) * sizeof(p4d_t
));
383 pu_dir_src
= pud_offset(p4_dir_src
, KASAN_SHADOW_START
);
384 pu_dir_dst
= pud_offset(p4_dir_dst
, KASAN_SHADOW_START
);
385 memcpy(pu_dir_dst
, pu_dir_src
,
386 (KASAN_SHADOW_SIZE
>> PUD_SHIFT
) * sizeof(pud_t
));
389 void __init
kasan_free_early_identity(void)
391 memblock_free(pgalloc_pos
, pgalloc_freeable
- pgalloc_pos
);