fs/reiserfs/journal.c: change return type of dirty_one_transaction
[linux/fpc-iii.git] / arch / s390 / mm / kasan_init.c
blob0c1f257be422247b3e5835233582e07c80465e5c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/memblock.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
7 #include <asm/kasan.h>
8 #include <asm/mem_detect.h>
9 #include <asm/processor.h>
10 #include <asm/sclp.h>
11 #include <asm/facility.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
15 static unsigned long segment_pos __initdata;
16 static unsigned long segment_low __initdata;
17 static unsigned long pgalloc_pos __initdata;
18 static unsigned long pgalloc_low __initdata;
19 static unsigned long pgalloc_freeable __initdata;
20 static bool has_edat __initdata;
21 static bool has_nx __initdata;
23 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
25 static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
27 static void __init kasan_early_panic(const char *reason)
29 sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
30 sclp_early_printk(reason);
31 disabled_wait();
34 static void * __init kasan_early_alloc_segment(void)
36 segment_pos -= _SEGMENT_SIZE;
38 if (segment_pos < segment_low)
39 kasan_early_panic("out of memory during initialisation\n");
41 return (void *)segment_pos;
44 static void * __init kasan_early_alloc_pages(unsigned int order)
46 pgalloc_pos -= (PAGE_SIZE << order);
48 if (pgalloc_pos < pgalloc_low)
49 kasan_early_panic("out of memory during initialisation\n");
51 return (void *)pgalloc_pos;
54 static void * __init kasan_early_crst_alloc(unsigned long val)
56 unsigned long *table;
58 table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
59 if (table)
60 crst_table_init(table, val);
61 return table;
64 static pte_t * __init kasan_early_pte_alloc(void)
66 static void *pte_leftover;
67 pte_t *pte;
69 BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
71 if (!pte_leftover) {
72 pte_leftover = kasan_early_alloc_pages(0);
73 pte = pte_leftover + _PAGE_TABLE_SIZE;
74 } else {
75 pte = pte_leftover;
76 pte_leftover = NULL;
78 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
79 return pte;
82 enum populate_mode {
83 POPULATE_ONE2ONE,
84 POPULATE_MAP,
85 POPULATE_ZERO_SHADOW
87 static void __init kasan_early_vmemmap_populate(unsigned long address,
88 unsigned long end,
89 enum populate_mode mode)
91 unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
92 pgd_t *pg_dir;
93 p4d_t *p4_dir;
94 pud_t *pu_dir;
95 pmd_t *pm_dir;
96 pte_t *pt_dir;
98 pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
99 if (!has_nx)
100 pgt_prot_zero &= ~_PAGE_NOEXEC;
101 pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
102 sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
104 while (address < end) {
105 pg_dir = pgd_offset_k(address);
106 if (pgd_none(*pg_dir)) {
107 if (mode == POPULATE_ZERO_SHADOW &&
108 IS_ALIGNED(address, PGDIR_SIZE) &&
109 end - address >= PGDIR_SIZE) {
110 pgd_populate(&init_mm, pg_dir,
111 kasan_early_shadow_p4d);
112 address = (address + PGDIR_SIZE) & PGDIR_MASK;
113 continue;
115 p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
116 pgd_populate(&init_mm, pg_dir, p4_dir);
119 p4_dir = p4d_offset(pg_dir, address);
120 if (p4d_none(*p4_dir)) {
121 if (mode == POPULATE_ZERO_SHADOW &&
122 IS_ALIGNED(address, P4D_SIZE) &&
123 end - address >= P4D_SIZE) {
124 p4d_populate(&init_mm, p4_dir,
125 kasan_early_shadow_pud);
126 address = (address + P4D_SIZE) & P4D_MASK;
127 continue;
129 pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
130 p4d_populate(&init_mm, p4_dir, pu_dir);
133 pu_dir = pud_offset(p4_dir, address);
134 if (pud_none(*pu_dir)) {
135 if (mode == POPULATE_ZERO_SHADOW &&
136 IS_ALIGNED(address, PUD_SIZE) &&
137 end - address >= PUD_SIZE) {
138 pud_populate(&init_mm, pu_dir,
139 kasan_early_shadow_pmd);
140 address = (address + PUD_SIZE) & PUD_MASK;
141 continue;
143 pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
144 pud_populate(&init_mm, pu_dir, pm_dir);
147 pm_dir = pmd_offset(pu_dir, address);
148 if (pmd_none(*pm_dir)) {
149 if (mode == POPULATE_ZERO_SHADOW &&
150 IS_ALIGNED(address, PMD_SIZE) &&
151 end - address >= PMD_SIZE) {
152 pmd_populate(&init_mm, pm_dir,
153 kasan_early_shadow_pte);
154 address = (address + PMD_SIZE) & PMD_MASK;
155 continue;
157 /* the first megabyte of 1:1 is mapped with 4k pages */
158 if (has_edat && address && end - address >= PMD_SIZE &&
159 mode != POPULATE_ZERO_SHADOW) {
160 void *page;
162 if (mode == POPULATE_ONE2ONE) {
163 page = (void *)address;
164 } else {
165 page = kasan_early_alloc_segment();
166 memset(page, 0, _SEGMENT_SIZE);
168 pmd_val(*pm_dir) = __pa(page) | sgt_prot;
169 address = (address + PMD_SIZE) & PMD_MASK;
170 continue;
173 pt_dir = kasan_early_pte_alloc();
174 pmd_populate(&init_mm, pm_dir, pt_dir);
175 } else if (pmd_large(*pm_dir)) {
176 address = (address + PMD_SIZE) & PMD_MASK;
177 continue;
180 pt_dir = pte_offset_kernel(pm_dir, address);
181 if (pte_none(*pt_dir)) {
182 void *page;
184 switch (mode) {
185 case POPULATE_ONE2ONE:
186 page = (void *)address;
187 pte_val(*pt_dir) = __pa(page) | pgt_prot;
188 break;
189 case POPULATE_MAP:
190 page = kasan_early_alloc_pages(0);
191 memset(page, 0, PAGE_SIZE);
192 pte_val(*pt_dir) = __pa(page) | pgt_prot;
193 break;
194 case POPULATE_ZERO_SHADOW:
195 page = kasan_early_shadow_page;
196 pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
197 break;
200 address += PAGE_SIZE;
204 static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
206 unsigned long asce_bits;
208 asce_bits = asce_type | _ASCE_TABLE_LENGTH;
209 S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
210 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
212 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
213 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
214 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
217 static void __init kasan_enable_dat(void)
219 psw_t psw;
221 psw.mask = __extract_psw();
222 psw_bits(psw).dat = 1;
223 psw_bits(psw).as = PSW_BITS_AS_HOME;
224 __load_psw_mask(psw.mask);
227 static void __init kasan_early_detect_facilities(void)
229 if (test_facility(8)) {
230 has_edat = true;
231 __ctl_set_bit(0, 23);
233 if (!noexec_disabled && test_facility(130)) {
234 has_nx = true;
235 __ctl_set_bit(0, 20);
239 static unsigned long __init get_mem_detect_end(void)
241 unsigned long start;
242 unsigned long end;
244 if (mem_detect.count) {
245 __get_mem_detect_block(mem_detect.count - 1, &start, &end);
246 return end;
248 return 0;
251 void __init kasan_early_init(void)
253 unsigned long untracked_mem_end;
254 unsigned long shadow_alloc_size;
255 unsigned long initrd_end;
256 unsigned long asce_type;
257 unsigned long memsize;
258 unsigned long vmax;
259 unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
260 pte_t pte_z;
261 pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
262 pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
263 p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
265 kasan_early_detect_facilities();
266 if (!has_nx)
267 pgt_prot &= ~_PAGE_NOEXEC;
268 pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
270 memsize = get_mem_detect_end();
271 if (!memsize)
272 kasan_early_panic("cannot detect physical memory size\n");
273 /* respect mem= cmdline parameter */
274 if (memory_end_set && memsize > memory_end)
275 memsize = memory_end;
276 memsize = min(memsize, KASAN_SHADOW_START);
278 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
279 /* 4 level paging */
280 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
281 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
282 crst_table_init((unsigned long *)early_pg_dir,
283 _REGION2_ENTRY_EMPTY);
284 untracked_mem_end = vmax = _REGION1_SIZE;
285 asce_type = _ASCE_TYPE_REGION2;
286 } else {
287 /* 3 level paging */
288 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
289 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
290 crst_table_init((unsigned long *)early_pg_dir,
291 _REGION3_ENTRY_EMPTY);
292 untracked_mem_end = vmax = _REGION2_SIZE;
293 asce_type = _ASCE_TYPE_REGION3;
296 /* init kasan zero shadow */
297 crst_table_init((unsigned long *)kasan_early_shadow_p4d,
298 p4d_val(p4d_z));
299 crst_table_init((unsigned long *)kasan_early_shadow_pud,
300 pud_val(pud_z));
301 crst_table_init((unsigned long *)kasan_early_shadow_pmd,
302 pmd_val(pmd_z));
303 memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
305 shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
306 pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
307 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
308 initrd_end =
309 round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
310 pgalloc_low = max(pgalloc_low, initrd_end);
313 if (pgalloc_low + shadow_alloc_size > memsize)
314 kasan_early_panic("out of memory during initialisation\n");
316 if (has_edat) {
317 segment_pos = round_down(memsize, _SEGMENT_SIZE);
318 segment_low = segment_pos - shadow_alloc_size;
319 pgalloc_pos = segment_low;
320 } else {
321 pgalloc_pos = memsize;
323 init_mm.pgd = early_pg_dir;
325 * Current memory layout:
326 * +- 0 -------------+ +- shadow start -+
327 * | 1:1 ram mapping | /| 1/8 ram |
328 * +- end of ram ----+ / +----------------+
329 * | ... gap ... |/ | kasan |
330 * +- shadow start --+ | zero |
331 * | 1/8 addr space | | page |
332 * +- shadow end -+ | mapping |
333 * | ... gap ... |\ | (untracked) |
334 * +- modules vaddr -+ \ +----------------+
335 * | 2Gb | \| unmapped | allocated per module
336 * +-----------------+ +- shadow end ---+
338 /* populate kasan shadow (for identity mapping and zero page mapping) */
339 kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
340 if (IS_ENABLED(CONFIG_MODULES))
341 untracked_mem_end = vmax - MODULES_LEN;
342 kasan_early_vmemmap_populate(__sha(max_physmem_end),
343 __sha(untracked_mem_end),
344 POPULATE_ZERO_SHADOW);
345 /* memory allocated for identity mapping structs will be freed later */
346 pgalloc_freeable = pgalloc_pos;
347 /* populate identity mapping */
348 kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
349 kasan_set_pgd(early_pg_dir, asce_type);
350 kasan_enable_dat();
351 /* enable kasan */
352 init_task.kasan_depth = 0;
353 memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
354 sclp_early_printk("KernelAddressSanitizer initialized\n");
357 void __init kasan_copy_shadow(pgd_t *pg_dir)
360 * At this point we are still running on early pages setup early_pg_dir,
361 * while swapper_pg_dir has just been initialized with identity mapping.
362 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
365 pgd_t *pg_dir_src;
366 pgd_t *pg_dir_dst;
367 p4d_t *p4_dir_src;
368 p4d_t *p4_dir_dst;
369 pud_t *pu_dir_src;
370 pud_t *pu_dir_dst;
372 pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
373 pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
374 p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
375 p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
376 if (!p4d_folded(*p4_dir_src)) {
377 /* 4 level paging */
378 memcpy(p4_dir_dst, p4_dir_src,
379 (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
380 return;
382 /* 3 level paging */
383 pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
384 pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
385 memcpy(pu_dir_dst, pu_dir_src,
386 (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
389 void __init kasan_free_early_identity(void)
391 memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);