1 #include <linux/bootmem.h>
2 #include <linux/kasan.h>
3 #include <linux/kdebug.h>
5 #include <linux/sched.h>
6 #include <linux/vmalloc.h>
8 #include <asm/tlbflush.h>
9 #include <asm/sections.h>
11 extern pgd_t early_level4_pgt
[PTRS_PER_PGD
];
12 extern struct range pfn_mapped
[E820_X_MAX
];
14 static pud_t kasan_zero_pud
[PTRS_PER_PUD
] __page_aligned_bss
;
15 static pmd_t kasan_zero_pmd
[PTRS_PER_PMD
] __page_aligned_bss
;
16 static pte_t kasan_zero_pte
[PTRS_PER_PTE
] __page_aligned_bss
;
19 * This page used as early shadow. We don't use empty_zero_page
20 * at early stages, stack instrumentation could write some garbage
22 * Latter we reuse it as zero shadow for large ranges of memory
23 * that allowed to access, but not instrumented by kasan
24 * (vmalloc/vmemmap ...).
26 static unsigned char kasan_zero_page
[PAGE_SIZE
] __page_aligned_bss
;
28 static int __init
map_range(struct range
*range
)
33 start
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->start
));
34 end
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->end
));
37 * end + 1 here is intentional. We check several shadow bytes in advance
38 * to slightly speed up fastpath. In some rare cases we could cross
39 * boundary of mapped shadow, so we just map some more here.
41 return vmemmap_populate(start
, end
+ 1, NUMA_NO_NODE
);
44 static void __init
clear_pgds(unsigned long start
,
47 for (; start
< end
; start
+= PGDIR_SIZE
)
48 pgd_clear(pgd_offset_k(start
));
51 static void __init
kasan_map_early_shadow(pgd_t
*pgd
)
54 unsigned long start
= KASAN_SHADOW_START
;
55 unsigned long end
= KASAN_SHADOW_END
;
57 for (i
= pgd_index(start
); start
< end
; i
++) {
58 pgd
[i
] = __pgd(__pa_nodebug(kasan_zero_pud
)
64 static int __init
zero_pte_populate(pmd_t
*pmd
, unsigned long addr
,
67 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
69 while (addr
+ PAGE_SIZE
<= end
) {
70 WARN_ON(!pte_none(*pte
));
71 set_pte(pte
, __pte(__pa_nodebug(kasan_zero_page
)
74 pte
= pte_offset_kernel(pmd
, addr
);
79 static int __init
zero_pmd_populate(pud_t
*pud
, unsigned long addr
,
83 pmd_t
*pmd
= pmd_offset(pud
, addr
);
85 while (IS_ALIGNED(addr
, PMD_SIZE
) && addr
+ PMD_SIZE
<= end
) {
86 WARN_ON(!pmd_none(*pmd
));
87 set_pmd(pmd
, __pmd(__pa_nodebug(kasan_zero_pte
)
90 pmd
= pmd_offset(pud
, addr
);
94 void *p
= vmemmap_alloc_block(PAGE_SIZE
, NUMA_NO_NODE
);
97 set_pmd(pmd
, __pmd(__pa_nodebug(p
) | _KERNPG_TABLE
));
99 ret
= zero_pte_populate(pmd
, addr
, end
);
105 static int __init
zero_pud_populate(pgd_t
*pgd
, unsigned long addr
,
109 pud_t
*pud
= pud_offset(pgd
, addr
);
111 while (IS_ALIGNED(addr
, PUD_SIZE
) && addr
+ PUD_SIZE
<= end
) {
112 WARN_ON(!pud_none(*pud
));
113 set_pud(pud
, __pud(__pa_nodebug(kasan_zero_pmd
)
116 pud
= pud_offset(pgd
, addr
);
120 if (pud_none(*pud
)) {
121 void *p
= vmemmap_alloc_block(PAGE_SIZE
, NUMA_NO_NODE
);
124 set_pud(pud
, __pud(__pa_nodebug(p
) | _KERNPG_TABLE
));
126 ret
= zero_pmd_populate(pud
, addr
, end
);
131 static int __init
zero_pgd_populate(unsigned long addr
, unsigned long end
)
134 pgd_t
*pgd
= pgd_offset_k(addr
);
136 while (IS_ALIGNED(addr
, PGDIR_SIZE
) && addr
+ PGDIR_SIZE
<= end
) {
137 WARN_ON(!pgd_none(*pgd
));
138 set_pgd(pgd
, __pgd(__pa_nodebug(kasan_zero_pud
)
141 pgd
= pgd_offset_k(addr
);
145 if (pgd_none(*pgd
)) {
146 void *p
= vmemmap_alloc_block(PAGE_SIZE
, NUMA_NO_NODE
);
149 set_pgd(pgd
, __pgd(__pa_nodebug(p
) | _KERNPG_TABLE
));
151 ret
= zero_pud_populate(pgd
, addr
, end
);
157 static void __init
populate_zero_shadow(const void *start
, const void *end
)
159 if (zero_pgd_populate((unsigned long)start
, (unsigned long)end
))
160 panic("kasan: unable to map zero shadow!");
164 #ifdef CONFIG_KASAN_INLINE
165 static int kasan_die_handler(struct notifier_block
*self
,
169 if (val
== DIE_GPF
) {
170 pr_emerg("CONFIG_KASAN_INLINE enabled");
171 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
176 static struct notifier_block kasan_die_notifier
= {
177 .notifier_call
= kasan_die_handler
,
181 void __init
kasan_early_init(void)
184 pteval_t pte_val
= __pa_nodebug(kasan_zero_page
) | __PAGE_KERNEL
;
185 pmdval_t pmd_val
= __pa_nodebug(kasan_zero_pte
) | _KERNPG_TABLE
;
186 pudval_t pud_val
= __pa_nodebug(kasan_zero_pmd
) | _KERNPG_TABLE
;
188 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
189 kasan_zero_pte
[i
] = __pte(pte_val
);
191 for (i
= 0; i
< PTRS_PER_PMD
; i
++)
192 kasan_zero_pmd
[i
] = __pmd(pmd_val
);
194 for (i
= 0; i
< PTRS_PER_PUD
; i
++)
195 kasan_zero_pud
[i
] = __pud(pud_val
);
197 kasan_map_early_shadow(early_level4_pgt
);
198 kasan_map_early_shadow(init_level4_pgt
);
201 void __init
kasan_init(void)
205 #ifdef CONFIG_KASAN_INLINE
206 register_die_notifier(&kasan_die_notifier
);
209 memcpy(early_level4_pgt
, init_level4_pgt
, sizeof(early_level4_pgt
));
210 load_cr3(early_level4_pgt
);
213 clear_pgds(KASAN_SHADOW_START
, KASAN_SHADOW_END
);
215 populate_zero_shadow((void *)KASAN_SHADOW_START
,
216 kasan_mem_to_shadow((void *)PAGE_OFFSET
));
218 for (i
= 0; i
< E820_X_MAX
; i
++) {
219 if (pfn_mapped
[i
].end
== 0)
222 if (map_range(&pfn_mapped
[i
]))
223 panic("kasan: unable to allocate shadow!");
225 populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET
+ MAXMEM
),
226 kasan_mem_to_shadow((void *)__START_KERNEL_map
));
228 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext
),
229 (unsigned long)kasan_mem_to_shadow(_end
),
232 populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END
),
233 (void *)KASAN_SHADOW_END
);
235 memset(kasan_zero_page
, 0, PAGE_SIZE
);
237 load_cr3(init_level4_pgt
);
239 init_task
.kasan_depth
= 0;