1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
15 * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16 * which is right before the kernel.
18 * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19 * the page global directory with kasan_early_shadow_pmd.
21 * For sv48 and sv57, the region start is aligned on PGDIR_SIZE whereas the end
22 * region is not and then we have to go down to the PUD level.
25 static pgd_t tmp_pg_dir
[PTRS_PER_PGD
] __page_aligned_bss
;
26 static p4d_t tmp_p4d
[PTRS_PER_P4D
] __page_aligned_bss
;
27 static pud_t tmp_pud
[PTRS_PER_PUD
] __page_aligned_bss
;
29 static void __init
kasan_populate_pte(pmd_t
*pmd
, unsigned long vaddr
, unsigned long end
)
31 phys_addr_t phys_addr
;
34 if (pmd_none(pmdp_get(pmd
))) {
35 p
= memblock_alloc(PTRS_PER_PTE
* sizeof(pte_t
), PAGE_SIZE
);
36 set_pmd(pmd
, pfn_pmd(PFN_DOWN(__pa(p
)), PAGE_TABLE
));
39 ptep
= pte_offset_kernel(pmd
, vaddr
);
42 if (pte_none(ptep_get(ptep
))) {
43 phys_addr
= memblock_phys_alloc(PAGE_SIZE
, PAGE_SIZE
);
44 set_pte(ptep
, pfn_pte(PFN_DOWN(phys_addr
), PAGE_KERNEL
));
45 memset(__va(phys_addr
), KASAN_SHADOW_INIT
, PAGE_SIZE
);
47 } while (ptep
++, vaddr
+= PAGE_SIZE
, vaddr
!= end
);
50 static void __init
kasan_populate_pmd(pud_t
*pud
, unsigned long vaddr
, unsigned long end
)
52 phys_addr_t phys_addr
;
56 if (pud_none(pudp_get(pud
))) {
57 p
= memblock_alloc(PTRS_PER_PMD
* sizeof(pmd_t
), PAGE_SIZE
);
58 set_pud(pud
, pfn_pud(PFN_DOWN(__pa(p
)), PAGE_TABLE
));
61 pmdp
= pmd_offset(pud
, vaddr
);
64 next
= pmd_addr_end(vaddr
, end
);
66 if (pmd_none(pmdp_get(pmdp
)) && IS_ALIGNED(vaddr
, PMD_SIZE
) &&
67 (next
- vaddr
) >= PMD_SIZE
) {
68 phys_addr
= memblock_phys_alloc(PMD_SIZE
, PMD_SIZE
);
70 set_pmd(pmdp
, pfn_pmd(PFN_DOWN(phys_addr
), PAGE_KERNEL
));
71 memset(__va(phys_addr
), KASAN_SHADOW_INIT
, PMD_SIZE
);
76 kasan_populate_pte(pmdp
, vaddr
, next
);
77 } while (pmdp
++, vaddr
= next
, vaddr
!= end
);
80 static void __init
kasan_populate_pud(p4d_t
*p4d
,
81 unsigned long vaddr
, unsigned long end
)
83 phys_addr_t phys_addr
;
87 if (p4d_none(p4dp_get(p4d
))) {
88 p
= memblock_alloc(PTRS_PER_PUD
* sizeof(pud_t
), PAGE_SIZE
);
89 set_p4d(p4d
, pfn_p4d(PFN_DOWN(__pa(p
)), PAGE_TABLE
));
92 pudp
= pud_offset(p4d
, vaddr
);
95 next
= pud_addr_end(vaddr
, end
);
97 if (pud_none(pudp_get(pudp
)) && IS_ALIGNED(vaddr
, PUD_SIZE
) &&
98 (next
- vaddr
) >= PUD_SIZE
) {
99 phys_addr
= memblock_phys_alloc(PUD_SIZE
, PUD_SIZE
);
101 set_pud(pudp
, pfn_pud(PFN_DOWN(phys_addr
), PAGE_KERNEL
));
102 memset(__va(phys_addr
), KASAN_SHADOW_INIT
, PUD_SIZE
);
107 kasan_populate_pmd(pudp
, vaddr
, next
);
108 } while (pudp
++, vaddr
= next
, vaddr
!= end
);
111 static void __init
kasan_populate_p4d(pgd_t
*pgd
,
112 unsigned long vaddr
, unsigned long end
)
114 phys_addr_t phys_addr
;
118 if (pgd_none(pgdp_get(pgd
))) {
119 p
= memblock_alloc(PTRS_PER_P4D
* sizeof(p4d_t
), PAGE_SIZE
);
120 set_pgd(pgd
, pfn_pgd(PFN_DOWN(__pa(p
)), PAGE_TABLE
));
123 p4dp
= p4d_offset(pgd
, vaddr
);
126 next
= p4d_addr_end(vaddr
, end
);
128 if (p4d_none(p4dp_get(p4dp
)) && IS_ALIGNED(vaddr
, P4D_SIZE
) &&
129 (next
- vaddr
) >= P4D_SIZE
) {
130 phys_addr
= memblock_phys_alloc(P4D_SIZE
, P4D_SIZE
);
132 set_p4d(p4dp
, pfn_p4d(PFN_DOWN(phys_addr
), PAGE_KERNEL
));
133 memset(__va(phys_addr
), KASAN_SHADOW_INIT
, P4D_SIZE
);
138 kasan_populate_pud(p4dp
, vaddr
, next
);
139 } while (p4dp
++, vaddr
= next
, vaddr
!= end
);
142 static void __init
kasan_populate_pgd(pgd_t
*pgdp
,
143 unsigned long vaddr
, unsigned long end
)
145 phys_addr_t phys_addr
;
149 next
= pgd_addr_end(vaddr
, end
);
151 if (pgd_none(pgdp_get(pgdp
)) && IS_ALIGNED(vaddr
, PGDIR_SIZE
) &&
152 (next
- vaddr
) >= PGDIR_SIZE
) {
153 phys_addr
= memblock_phys_alloc(PGDIR_SIZE
, PGDIR_SIZE
);
155 set_pgd(pgdp
, pfn_pgd(PFN_DOWN(phys_addr
), PAGE_KERNEL
));
156 memset(__va(phys_addr
), KASAN_SHADOW_INIT
, PGDIR_SIZE
);
161 kasan_populate_p4d(pgdp
, vaddr
, next
);
162 } while (pgdp
++, vaddr
= next
, vaddr
!= end
);
165 static void __init
kasan_early_clear_pud(p4d_t
*p4dp
,
166 unsigned long vaddr
, unsigned long end
)
168 pud_t
*pudp
, *base_pud
;
171 if (!pgtable_l4_enabled
) {
172 pudp
= (pud_t
*)p4dp
;
174 base_pud
= pt_ops
.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp
))));
175 pudp
= base_pud
+ pud_index(vaddr
);
179 next
= pud_addr_end(vaddr
, end
);
181 if (IS_ALIGNED(vaddr
, PUD_SIZE
) && (next
- vaddr
) >= PUD_SIZE
) {
187 } while (pudp
++, vaddr
= next
, vaddr
!= end
);
190 static void __init
kasan_early_clear_p4d(pgd_t
*pgdp
,
191 unsigned long vaddr
, unsigned long end
)
193 p4d_t
*p4dp
, *base_p4d
;
196 if (!pgtable_l5_enabled
) {
197 p4dp
= (p4d_t
*)pgdp
;
199 base_p4d
= pt_ops
.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp
))));
200 p4dp
= base_p4d
+ p4d_index(vaddr
);
204 next
= p4d_addr_end(vaddr
, end
);
206 if (pgtable_l4_enabled
&& IS_ALIGNED(vaddr
, P4D_SIZE
) &&
207 (next
- vaddr
) >= P4D_SIZE
) {
212 kasan_early_clear_pud(p4dp
, vaddr
, next
);
213 } while (p4dp
++, vaddr
= next
, vaddr
!= end
);
216 static void __init
kasan_early_clear_pgd(pgd_t
*pgdp
,
217 unsigned long vaddr
, unsigned long end
)
222 next
= pgd_addr_end(vaddr
, end
);
224 if (pgtable_l5_enabled
&& IS_ALIGNED(vaddr
, PGDIR_SIZE
) &&
225 (next
- vaddr
) >= PGDIR_SIZE
) {
230 kasan_early_clear_p4d(pgdp
, vaddr
, next
);
231 } while (pgdp
++, vaddr
= next
, vaddr
!= end
);
234 static void __init
kasan_early_populate_pud(p4d_t
*p4dp
,
238 pud_t
*pudp
, *base_pud
;
239 phys_addr_t phys_addr
;
242 if (!pgtable_l4_enabled
) {
243 pudp
= (pud_t
*)p4dp
;
245 base_pud
= pt_ops
.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp
))));
246 pudp
= base_pud
+ pud_index(vaddr
);
250 next
= pud_addr_end(vaddr
, end
);
252 if (pud_none(pudp_get(pudp
)) && IS_ALIGNED(vaddr
, PUD_SIZE
) &&
253 (next
- vaddr
) >= PUD_SIZE
) {
254 phys_addr
= __pa((uintptr_t)kasan_early_shadow_pmd
);
255 set_pud(pudp
, pfn_pud(PFN_DOWN(phys_addr
), PAGE_TABLE
));
260 } while (pudp
++, vaddr
= next
, vaddr
!= end
);
263 static void __init
kasan_early_populate_p4d(pgd_t
*pgdp
,
267 p4d_t
*p4dp
, *base_p4d
;
268 phys_addr_t phys_addr
;
272 * We can't use pgd_page_vaddr here as it would return a linear
273 * mapping address but it is not mapped yet, but when populating
274 * early_pg_dir, we need the physical address and when populating
275 * swapper_pg_dir, we need the kernel virtual address so use
277 * Note that this test is then completely equivalent to
278 * p4dp = p4d_offset(pgdp, vaddr)
280 if (!pgtable_l5_enabled
) {
281 p4dp
= (p4d_t
*)pgdp
;
283 base_p4d
= pt_ops
.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp
))));
284 p4dp
= base_p4d
+ p4d_index(vaddr
);
288 next
= p4d_addr_end(vaddr
, end
);
290 if (p4d_none(p4dp_get(p4dp
)) && IS_ALIGNED(vaddr
, P4D_SIZE
) &&
291 (next
- vaddr
) >= P4D_SIZE
) {
292 phys_addr
= __pa((uintptr_t)kasan_early_shadow_pud
);
293 set_p4d(p4dp
, pfn_p4d(PFN_DOWN(phys_addr
), PAGE_TABLE
));
297 kasan_early_populate_pud(p4dp
, vaddr
, next
);
298 } while (p4dp
++, vaddr
= next
, vaddr
!= end
);
301 static void __init
kasan_early_populate_pgd(pgd_t
*pgdp
,
305 phys_addr_t phys_addr
;
309 next
= pgd_addr_end(vaddr
, end
);
311 if (pgd_none(pgdp_get(pgdp
)) && IS_ALIGNED(vaddr
, PGDIR_SIZE
) &&
312 (next
- vaddr
) >= PGDIR_SIZE
) {
313 phys_addr
= __pa((uintptr_t)kasan_early_shadow_p4d
);
314 set_pgd(pgdp
, pfn_pgd(PFN_DOWN(phys_addr
), PAGE_TABLE
));
318 kasan_early_populate_p4d(pgdp
, vaddr
, next
);
319 } while (pgdp
++, vaddr
= next
, vaddr
!= end
);
322 asmlinkage
void __init
kasan_early_init(void)
326 BUILD_BUG_ON(KASAN_SHADOW_OFFSET
!=
327 KASAN_SHADOW_END
- (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT
)));
329 for (i
= 0; i
< PTRS_PER_PTE
; ++i
)
330 set_pte(kasan_early_shadow_pte
+ i
,
331 pfn_pte(virt_to_pfn(kasan_early_shadow_page
), PAGE_KERNEL
));
333 for (i
= 0; i
< PTRS_PER_PMD
; ++i
)
334 set_pmd(kasan_early_shadow_pmd
+ i
,
336 (__pa((uintptr_t)kasan_early_shadow_pte
)),
339 if (pgtable_l4_enabled
) {
340 for (i
= 0; i
< PTRS_PER_PUD
; ++i
)
341 set_pud(kasan_early_shadow_pud
+ i
,
343 (__pa(((uintptr_t)kasan_early_shadow_pmd
))),
347 if (pgtable_l5_enabled
) {
348 for (i
= 0; i
< PTRS_PER_P4D
; ++i
)
349 set_p4d(kasan_early_shadow_p4d
+ i
,
351 (__pa(((uintptr_t)kasan_early_shadow_pud
))),
355 kasan_early_populate_pgd(early_pg_dir
+ pgd_index(KASAN_SHADOW_START
),
356 KASAN_SHADOW_START
, KASAN_SHADOW_END
);
358 local_flush_tlb_all();
361 void __init
kasan_swapper_init(void)
363 kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START
),
364 KASAN_SHADOW_START
, KASAN_SHADOW_END
);
366 local_flush_tlb_all();
369 static void __init
kasan_populate(void *start
, void *end
)
371 unsigned long vaddr
= (unsigned long)start
& PAGE_MASK
;
372 unsigned long vend
= PAGE_ALIGN((unsigned long)end
);
374 kasan_populate_pgd(pgd_offset_k(vaddr
), vaddr
, vend
);
377 static void __init
kasan_shallow_populate_pud(p4d_t
*p4d
,
378 unsigned long vaddr
, unsigned long end
)
382 pud_t
*pud_k
= pud_offset(p4d
, vaddr
);
385 next
= pud_addr_end(vaddr
, end
);
387 if (pud_none(pudp_get(pud_k
))) {
388 p
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
389 set_pud(pud_k
, pfn_pud(PFN_DOWN(__pa(p
)), PAGE_TABLE
));
394 } while (pud_k
++, vaddr
= next
, vaddr
!= end
);
397 static void __init
kasan_shallow_populate_p4d(pgd_t
*pgd
,
398 unsigned long vaddr
, unsigned long end
)
402 p4d_t
*p4d_k
= p4d_offset(pgd
, vaddr
);
405 next
= p4d_addr_end(vaddr
, end
);
407 if (p4d_none(p4dp_get(p4d_k
))) {
408 p
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
409 set_p4d(p4d_k
, pfn_p4d(PFN_DOWN(__pa(p
)), PAGE_TABLE
));
413 kasan_shallow_populate_pud(p4d_k
, vaddr
, end
);
414 } while (p4d_k
++, vaddr
= next
, vaddr
!= end
);
417 static void __init
kasan_shallow_populate_pgd(unsigned long vaddr
, unsigned long end
)
421 pgd_t
*pgd_k
= pgd_offset_k(vaddr
);
424 next
= pgd_addr_end(vaddr
, end
);
426 if (pgd_none(pgdp_get(pgd_k
))) {
427 p
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
428 set_pgd(pgd_k
, pfn_pgd(PFN_DOWN(__pa(p
)), PAGE_TABLE
));
432 kasan_shallow_populate_p4d(pgd_k
, vaddr
, next
);
433 } while (pgd_k
++, vaddr
= next
, vaddr
!= end
);
436 static void __init
kasan_shallow_populate(void *start
, void *end
)
438 unsigned long vaddr
= (unsigned long)start
& PAGE_MASK
;
439 unsigned long vend
= PAGE_ALIGN((unsigned long)end
);
441 kasan_shallow_populate_pgd(vaddr
, vend
);
444 #ifdef CONFIG_KASAN_VMALLOC
445 void __init
kasan_populate_early_vm_area_shadow(void *start
, unsigned long size
)
447 kasan_populate(kasan_mem_to_shadow(start
),
448 kasan_mem_to_shadow(start
+ size
));
452 static void __init
create_tmp_mapping(void)
458 * We need to clean the early mapping: this is hard to achieve "in-place",
459 * so install a temporary mapping like arm64 and x86 do.
461 memcpy(tmp_pg_dir
, swapper_pg_dir
, sizeof(pgd_t
) * PTRS_PER_PGD
);
463 /* Copy the last p4d since it is shared with the kernel mapping. */
464 if (pgtable_l5_enabled
) {
465 ptr
= (p4d_t
*)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END
)));
466 memcpy(tmp_p4d
, ptr
, sizeof(p4d_t
) * PTRS_PER_P4D
);
467 set_pgd(&tmp_pg_dir
[pgd_index(KASAN_SHADOW_END
)],
468 pfn_pgd(PFN_DOWN(__pa(tmp_p4d
)), PAGE_TABLE
));
471 base_p4d
= (p4d_t
*)tmp_pg_dir
;
474 /* Copy the last pud since it is shared with the kernel mapping. */
475 if (pgtable_l4_enabled
) {
476 ptr
= (pud_t
*)p4d_page_vaddr(p4dp_get(base_p4d
+ p4d_index(KASAN_SHADOW_END
)));
477 memcpy(tmp_pud
, ptr
, sizeof(pud_t
) * PTRS_PER_PUD
);
478 set_p4d(&base_p4d
[p4d_index(KASAN_SHADOW_END
)],
479 pfn_p4d(PFN_DOWN(__pa(tmp_pud
)), PAGE_TABLE
));
483 void __init
kasan_init(void)
485 phys_addr_t p_start
, p_end
;
488 create_tmp_mapping();
489 csr_write(CSR_SATP
, PFN_DOWN(__pa(tmp_pg_dir
)) | satp_mode
);
491 kasan_early_clear_pgd(pgd_offset_k(KASAN_SHADOW_START
),
492 KASAN_SHADOW_START
, KASAN_SHADOW_END
);
494 kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)FIXADDR_START
),
495 (void *)kasan_mem_to_shadow((void *)VMALLOC_START
));
497 if (IS_ENABLED(CONFIG_KASAN_VMALLOC
)) {
498 kasan_shallow_populate(
499 (void *)kasan_mem_to_shadow((void *)VMALLOC_START
),
500 (void *)kasan_mem_to_shadow((void *)VMALLOC_END
));
501 /* Shallow populate modules and BPF which are vmalloc-allocated */
502 kasan_shallow_populate(
503 (void *)kasan_mem_to_shadow((void *)MODULES_VADDR
),
504 (void *)kasan_mem_to_shadow((void *)MODULES_END
));
506 kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)VMALLOC_START
),
507 (void *)kasan_mem_to_shadow((void *)VMALLOC_END
));
510 /* Populate the linear mapping */
511 for_each_mem_range(i
, &p_start
, &p_end
) {
512 void *start
= (void *)__va(p_start
);
513 void *end
= (void *)__va(p_end
);
518 kasan_populate(kasan_mem_to_shadow(start
), kasan_mem_to_shadow(end
));
521 /* Populate kernel */
522 kasan_populate(kasan_mem_to_shadow((const void *)MODULES_END
),
523 kasan_mem_to_shadow((const void *)MODULES_VADDR
+ SZ_2G
));
525 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
526 set_pte(&kasan_early_shadow_pte
[i
],
527 mk_pte(virt_to_page(kasan_early_shadow_page
),
528 __pgprot(_PAGE_PRESENT
| _PAGE_READ
|
531 memset(kasan_early_shadow_page
, KASAN_SHADOW_INIT
, PAGE_SIZE
);
532 init_task
.kasan_depth
= 0;
534 csr_write(CSR_SATP
, PFN_DOWN(__pa(swapper_pg_dir
)) | satp_mode
);
535 local_flush_tlb_all();