2 * Xtensa KASAN shadow map initialization
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2017 Cadence Design Systems Inc.
11 #include <linux/memblock.h>
12 #include <linux/init_task.h>
13 #include <linux/kasan.h>
14 #include <linux/kernel.h>
15 #include <asm/initialize_mmu.h>
16 #include <asm/tlbflush.h>
17 #include <asm/traps.h>
19 void __init
kasan_early_init(void)
21 unsigned long vaddr
= KASAN_SHADOW_START
;
22 pgd_t
*pgd
= pgd_offset_k(vaddr
);
23 p4d_t
*p4d
= p4d_offset(pgd
, vaddr
);
24 pud_t
*pud
= pud_offset(p4d
, vaddr
);
25 pmd_t
*pmd
= pmd_offset(pud
, vaddr
);
28 for (i
= 0; i
< PTRS_PER_PTE
; ++i
)
29 set_pte(kasan_early_shadow_pte
+ i
,
30 mk_pte(virt_to_page(kasan_early_shadow_page
),
33 for (vaddr
= 0; vaddr
< KASAN_SHADOW_SIZE
; vaddr
+= PMD_SIZE
, ++pmd
) {
34 BUG_ON(!pmd_none(*pmd
));
35 set_pmd(pmd
, __pmd((unsigned long)kasan_early_shadow_pte
));
40 static void __init
populate(void *start
, void *end
)
42 unsigned long n_pages
= (end
- start
) / PAGE_SIZE
;
43 unsigned long n_pmds
= n_pages
/ PTRS_PER_PTE
;
45 unsigned long vaddr
= (unsigned long)start
;
46 pgd_t
*pgd
= pgd_offset_k(vaddr
);
47 p4d_t
*p4d
= p4d_offset(pgd
, vaddr
);
48 pud_t
*pud
= pud_offset(p4d
, vaddr
);
49 pmd_t
*pmd
= pmd_offset(pud
, vaddr
);
50 pte_t
*pte
= memblock_alloc(n_pages
* sizeof(pte_t
), PAGE_SIZE
);
53 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
54 __func__
, n_pages
* sizeof(pte_t
), PAGE_SIZE
);
56 pr_debug("%s: %p - %p\n", __func__
, start
, end
);
58 for (i
= j
= 0; i
< n_pmds
; ++i
) {
61 for (k
= 0; k
< PTRS_PER_PTE
; ++k
, ++j
) {
63 memblock_phys_alloc_range(PAGE_SIZE
, PAGE_SIZE
,
65 MEMBLOCK_ALLOC_ANYWHERE
);
68 panic("Failed to allocate page table page\n");
70 set_pte(pte
+ j
, pfn_pte(PHYS_PFN(phys
), PAGE_KERNEL
));
74 for (i
= 0; i
< n_pmds
; ++i
, pte
+= PTRS_PER_PTE
)
75 set_pmd(pmd
+ i
, __pmd((unsigned long)pte
));
77 local_flush_tlb_all();
78 memset(start
, 0, end
- start
);
81 void __init
kasan_init(void)
85 BUILD_BUG_ON(KASAN_SHADOW_OFFSET
!= KASAN_SHADOW_START
-
86 (KASAN_START_VADDR
>> KASAN_SHADOW_SCALE_SHIFT
));
87 BUILD_BUG_ON(VMALLOC_START
< KASAN_START_VADDR
);
90 * Replace shadow map pages that cover addresses from VMALLOC area
91 * start to the end of KSEG with clean writable pages.
93 populate(kasan_mem_to_shadow((void *)VMALLOC_START
),
94 kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR
));
97 * Write protect kasan_early_shadow_page and zero-initialize it again.
99 for (i
= 0; i
< PTRS_PER_PTE
; ++i
)
100 set_pte(kasan_early_shadow_pte
+ i
,
101 mk_pte(virt_to_page(kasan_early_shadow_page
),
104 local_flush_tlb_all();
105 memset(kasan_early_shadow_page
, 0, PAGE_SIZE
);
107 /* At this point kasan is fully initialized. Enable error messages. */
108 current
->kasan_depth
= 0;
109 pr_info("KernelAddressSanitizer initialized\n");