1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/memblock.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
8 #include <asm/mem_detect.h>
9 #include <asm/processor.h>
11 #include <asm/facility.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
15 static unsigned long segment_pos __initdata
;
16 static unsigned long segment_low __initdata
;
17 static unsigned long pgalloc_pos __initdata
;
18 static unsigned long pgalloc_low __initdata
;
19 static unsigned long pgalloc_freeable __initdata
;
20 static bool has_edat __initdata
;
21 static bool has_nx __initdata
;
23 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
25 static pgd_t early_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PAGE_SIZE
);
27 static void __init
kasan_early_panic(const char *reason
)
29 sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
30 sclp_early_printk(reason
);
34 static void * __init
kasan_early_alloc_segment(void)
36 segment_pos
-= _SEGMENT_SIZE
;
38 if (segment_pos
< segment_low
)
39 kasan_early_panic("out of memory during initialisation\n");
41 return (void *)segment_pos
;
44 static void * __init
kasan_early_alloc_pages(unsigned int order
)
46 pgalloc_pos
-= (PAGE_SIZE
<< order
);
48 if (pgalloc_pos
< pgalloc_low
)
49 kasan_early_panic("out of memory during initialisation\n");
51 return (void *)pgalloc_pos
;
54 static void * __init
kasan_early_crst_alloc(unsigned long val
)
58 table
= kasan_early_alloc_pages(CRST_ALLOC_ORDER
);
60 crst_table_init(table
, val
);
64 static pte_t
* __init
kasan_early_pte_alloc(void)
66 static void *pte_leftover
;
69 BUILD_BUG_ON(_PAGE_TABLE_SIZE
* 2 != PAGE_SIZE
);
72 pte_leftover
= kasan_early_alloc_pages(0);
73 pte
= pte_leftover
+ _PAGE_TABLE_SIZE
;
78 memset64((u64
*)pte
, _PAGE_INVALID
, PTRS_PER_PTE
);
87 static void __init
kasan_early_vmemmap_populate(unsigned long address
,
89 enum populate_mode mode
)
91 unsigned long pgt_prot_zero
, pgt_prot
, sgt_prot
;
98 pgt_prot_zero
= pgprot_val(PAGE_KERNEL_RO
);
100 pgt_prot_zero
&= ~_PAGE_NOEXEC
;
101 pgt_prot
= pgprot_val(PAGE_KERNEL_EXEC
);
102 sgt_prot
= pgprot_val(SEGMENT_KERNEL_EXEC
);
104 while (address
< end
) {
105 pg_dir
= pgd_offset_k(address
);
106 if (pgd_none(*pg_dir
)) {
107 if (mode
== POPULATE_ZERO_SHADOW
&&
108 IS_ALIGNED(address
, PGDIR_SIZE
) &&
109 end
- address
>= PGDIR_SIZE
) {
110 pgd_populate(&init_mm
, pg_dir
, kasan_zero_p4d
);
111 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
114 p4_dir
= kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY
);
115 pgd_populate(&init_mm
, pg_dir
, p4_dir
);
118 p4_dir
= p4d_offset(pg_dir
, address
);
119 if (p4d_none(*p4_dir
)) {
120 if (mode
== POPULATE_ZERO_SHADOW
&&
121 IS_ALIGNED(address
, P4D_SIZE
) &&
122 end
- address
>= P4D_SIZE
) {
123 p4d_populate(&init_mm
, p4_dir
, kasan_zero_pud
);
124 address
= (address
+ P4D_SIZE
) & P4D_MASK
;
127 pu_dir
= kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY
);
128 p4d_populate(&init_mm
, p4_dir
, pu_dir
);
131 pu_dir
= pud_offset(p4_dir
, address
);
132 if (pud_none(*pu_dir
)) {
133 if (mode
== POPULATE_ZERO_SHADOW
&&
134 IS_ALIGNED(address
, PUD_SIZE
) &&
135 end
- address
>= PUD_SIZE
) {
136 pud_populate(&init_mm
, pu_dir
, kasan_zero_pmd
);
137 address
= (address
+ PUD_SIZE
) & PUD_MASK
;
140 pm_dir
= kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
141 pud_populate(&init_mm
, pu_dir
, pm_dir
);
144 pm_dir
= pmd_offset(pu_dir
, address
);
145 if (pmd_none(*pm_dir
)) {
146 if (mode
== POPULATE_ZERO_SHADOW
&&
147 IS_ALIGNED(address
, PMD_SIZE
) &&
148 end
- address
>= PMD_SIZE
) {
149 pmd_populate(&init_mm
, pm_dir
, kasan_zero_pte
);
150 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
153 /* the first megabyte of 1:1 is mapped with 4k pages */
154 if (has_edat
&& address
&& end
- address
>= PMD_SIZE
&&
155 mode
!= POPULATE_ZERO_SHADOW
) {
158 if (mode
== POPULATE_ONE2ONE
) {
159 page
= (void *)address
;
161 page
= kasan_early_alloc_segment();
162 memset(page
, 0, _SEGMENT_SIZE
);
164 pmd_val(*pm_dir
) = __pa(page
) | sgt_prot
;
165 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
169 pt_dir
= kasan_early_pte_alloc();
170 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
171 } else if (pmd_large(*pm_dir
)) {
172 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
176 pt_dir
= pte_offset_kernel(pm_dir
, address
);
177 if (pte_none(*pt_dir
)) {
181 case POPULATE_ONE2ONE
:
182 page
= (void *)address
;
183 pte_val(*pt_dir
) = __pa(page
) | pgt_prot
;
186 page
= kasan_early_alloc_pages(0);
187 memset(page
, 0, PAGE_SIZE
);
188 pte_val(*pt_dir
) = __pa(page
) | pgt_prot
;
190 case POPULATE_ZERO_SHADOW
:
191 page
= kasan_zero_page
;
192 pte_val(*pt_dir
) = __pa(page
) | pgt_prot_zero
;
196 address
+= PAGE_SIZE
;
200 static void __init
kasan_set_pgd(pgd_t
*pgd
, unsigned long asce_type
)
202 unsigned long asce_bits
;
204 asce_bits
= asce_type
| _ASCE_TABLE_LENGTH
;
205 S390_lowcore
.kernel_asce
= (__pa(pgd
) & PAGE_MASK
) | asce_bits
;
206 S390_lowcore
.user_asce
= S390_lowcore
.kernel_asce
;
208 __ctl_load(S390_lowcore
.kernel_asce
, 1, 1);
209 __ctl_load(S390_lowcore
.kernel_asce
, 7, 7);
210 __ctl_load(S390_lowcore
.kernel_asce
, 13, 13);
213 static void __init
kasan_enable_dat(void)
217 psw
.mask
= __extract_psw();
218 psw_bits(psw
).dat
= 1;
219 psw_bits(psw
).as
= PSW_BITS_AS_HOME
;
220 __load_psw_mask(psw
.mask
);
223 static void __init
kasan_early_detect_facilities(void)
225 __stfle(S390_lowcore
.stfle_fac_list
,
226 ARRAY_SIZE(S390_lowcore
.stfle_fac_list
));
227 if (test_facility(8)) {
229 __ctl_set_bit(0, 23);
231 if (!noexec_disabled
&& test_facility(130)) {
233 __ctl_set_bit(0, 20);
237 static unsigned long __init
get_mem_detect_end(void)
242 if (mem_detect
.count
) {
243 __get_mem_detect_block(mem_detect
.count
- 1, &start
, &end
);
249 void __init
kasan_early_init(void)
251 unsigned long untracked_mem_end
;
252 unsigned long shadow_alloc_size
;
253 unsigned long initrd_end
;
254 unsigned long asce_type
;
255 unsigned long memsize
;
257 unsigned long pgt_prot
= pgprot_val(PAGE_KERNEL_RO
);
259 pmd_t pmd_z
= __pmd(__pa(kasan_zero_pte
) | _SEGMENT_ENTRY
);
260 pud_t pud_z
= __pud(__pa(kasan_zero_pmd
) | _REGION3_ENTRY
);
261 p4d_t p4d_z
= __p4d(__pa(kasan_zero_pud
) | _REGION2_ENTRY
);
263 kasan_early_detect_facilities();
265 pgt_prot
&= ~_PAGE_NOEXEC
;
266 pte_z
= __pte(__pa(kasan_zero_page
) | pgt_prot
);
268 memsize
= get_mem_detect_end();
270 kasan_early_panic("cannot detect physical memory size\n");
271 /* respect mem= cmdline parameter */
272 if (memory_end_set
&& memsize
> memory_end
)
273 memsize
= memory_end
;
274 memsize
= min(memsize
, KASAN_SHADOW_START
);
276 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING
)) {
278 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, P4D_SIZE
));
279 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, P4D_SIZE
));
280 crst_table_init((unsigned long *)early_pg_dir
,
281 _REGION2_ENTRY_EMPTY
);
282 untracked_mem_end
= vmax
= _REGION1_SIZE
;
283 asce_type
= _ASCE_TYPE_REGION2
;
286 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, PUD_SIZE
));
287 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, PUD_SIZE
));
288 crst_table_init((unsigned long *)early_pg_dir
,
289 _REGION3_ENTRY_EMPTY
);
290 untracked_mem_end
= vmax
= _REGION2_SIZE
;
291 asce_type
= _ASCE_TYPE_REGION3
;
294 /* init kasan zero shadow */
295 crst_table_init((unsigned long *)kasan_zero_p4d
, p4d_val(p4d_z
));
296 crst_table_init((unsigned long *)kasan_zero_pud
, pud_val(pud_z
));
297 crst_table_init((unsigned long *)kasan_zero_pmd
, pmd_val(pmd_z
));
298 memset64((u64
*)kasan_zero_pte
, pte_val(pte_z
), PTRS_PER_PTE
);
300 shadow_alloc_size
= memsize
>> KASAN_SHADOW_SCALE_SHIFT
;
301 pgalloc_low
= round_up((unsigned long)_end
, _SEGMENT_SIZE
);
302 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD
)) {
304 round_up(INITRD_START
+ INITRD_SIZE
, _SEGMENT_SIZE
);
305 pgalloc_low
= max(pgalloc_low
, initrd_end
);
308 if (pgalloc_low
+ shadow_alloc_size
> memsize
)
309 kasan_early_panic("out of memory during initialisation\n");
312 segment_pos
= round_down(memsize
, _SEGMENT_SIZE
);
313 segment_low
= segment_pos
- shadow_alloc_size
;
314 pgalloc_pos
= segment_low
;
316 pgalloc_pos
= memsize
;
318 init_mm
.pgd
= early_pg_dir
;
320 * Current memory layout:
321 * +- 0 -------------+ +- shadow start -+
322 * | 1:1 ram mapping | /| 1/8 ram |
323 * +- end of ram ----+ / +----------------+
324 * | ... gap ... |/ | kasan |
325 * +- shadow start --+ | zero |
326 * | 1/8 addr space | | page |
327 * +- shadow end -+ | mapping |
328 * | ... gap ... |\ | (untracked) |
329 * +- modules vaddr -+ \ +----------------+
330 * | 2Gb | \| unmapped | allocated per module
331 * +-----------------+ +- shadow end ---+
333 /* populate kasan shadow (for identity mapping and zero page mapping) */
334 kasan_early_vmemmap_populate(__sha(0), __sha(memsize
), POPULATE_MAP
);
335 if (IS_ENABLED(CONFIG_MODULES
))
336 untracked_mem_end
= vmax
- MODULES_LEN
;
337 kasan_early_vmemmap_populate(__sha(max_physmem_end
),
338 __sha(untracked_mem_end
),
339 POPULATE_ZERO_SHADOW
);
340 /* memory allocated for identity mapping structs will be freed later */
341 pgalloc_freeable
= pgalloc_pos
;
342 /* populate identity mapping */
343 kasan_early_vmemmap_populate(0, memsize
, POPULATE_ONE2ONE
);
344 kasan_set_pgd(early_pg_dir
, asce_type
);
347 init_task
.kasan_depth
= 0;
348 memblock_reserve(pgalloc_pos
, memsize
- pgalloc_pos
);
349 sclp_early_printk("KernelAddressSanitizer initialized\n");
352 void __init
kasan_copy_shadow(pgd_t
*pg_dir
)
355 * At this point we are still running on early pages setup early_pg_dir,
356 * while swapper_pg_dir has just been initialized with identity mapping.
357 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
367 pg_dir_src
= pgd_offset_raw(early_pg_dir
, KASAN_SHADOW_START
);
368 pg_dir_dst
= pgd_offset_raw(pg_dir
, KASAN_SHADOW_START
);
369 p4_dir_src
= p4d_offset(pg_dir_src
, KASAN_SHADOW_START
);
370 p4_dir_dst
= p4d_offset(pg_dir_dst
, KASAN_SHADOW_START
);
371 if (!p4d_folded(*p4_dir_src
)) {
373 memcpy(p4_dir_dst
, p4_dir_src
,
374 (KASAN_SHADOW_SIZE
>> P4D_SHIFT
) * sizeof(p4d_t
));
378 pu_dir_src
= pud_offset(p4_dir_src
, KASAN_SHADOW_START
);
379 pu_dir_dst
= pud_offset(p4_dir_dst
, KASAN_SHADOW_START
);
380 memcpy(pu_dir_dst
, pu_dir_src
,
381 (KASAN_SHADOW_SIZE
>> PUD_SHIFT
) * sizeof(pud_t
));
384 void __init
kasan_free_early_identity(void)
386 memblock_free(pgalloc_pos
, pgalloc_freeable
- pgalloc_pos
);