1 // SPDX-License-Identifier: GPL-2.0
2 #define DISABLE_BRANCH_PROFILING
3 #define pr_fmt(fmt) "kasan: " fmt
5 /* cpu_feature_enabled() cannot be used this early */
6 #define USE_EARLY_PGTABLE_L5
8 #include <linux/bootmem.h>
9 #include <linux/kasan.h>
10 #include <linux/kdebug.h>
11 #include <linux/memblock.h>
13 #include <linux/sched.h>
14 #include <linux/sched/task.h>
15 #include <linux/vmalloc.h>
17 #include <asm/e820/types.h>
18 #include <asm/pgalloc.h>
19 #include <asm/tlbflush.h>
20 #include <asm/sections.h>
21 #include <asm/pgtable.h>
22 #include <asm/cpu_entry_area.h>
24 extern struct range pfn_mapped
[E820_MAX_ENTRIES
];
26 static p4d_t tmp_p4d_table
[MAX_PTRS_PER_P4D
] __initdata
__aligned(PAGE_SIZE
);
28 static __init
void *early_alloc(size_t size
, int nid
, bool panic
)
31 return memblock_virt_alloc_try_nid(size
, size
,
32 __pa(MAX_DMA_ADDRESS
), BOOTMEM_ALLOC_ACCESSIBLE
, nid
);
34 return memblock_virt_alloc_try_nid_nopanic(size
, size
,
35 __pa(MAX_DMA_ADDRESS
), BOOTMEM_ALLOC_ACCESSIBLE
, nid
);
38 static void __init
kasan_populate_pmd(pmd_t
*pmd
, unsigned long addr
,
39 unsigned long end
, int nid
)
46 if (boot_cpu_has(X86_FEATURE_PSE
) &&
47 ((end
- addr
) == PMD_SIZE
) &&
48 IS_ALIGNED(addr
, PMD_SIZE
)) {
49 p
= early_alloc(PMD_SIZE
, nid
, false);
50 if (p
&& pmd_set_huge(pmd
, __pa(p
), PAGE_KERNEL
))
53 memblock_free(__pa(p
), PMD_SIZE
);
56 p
= early_alloc(PAGE_SIZE
, nid
, true);
57 pmd_populate_kernel(&init_mm
, pmd
, p
);
60 pte
= pte_offset_kernel(pmd
, addr
);
68 p
= early_alloc(PAGE_SIZE
, nid
, true);
69 entry
= pfn_pte(PFN_DOWN(__pa(p
)), PAGE_KERNEL
);
70 set_pte_at(&init_mm
, addr
, pte
, entry
);
71 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
74 static void __init
kasan_populate_pud(pud_t
*pud
, unsigned long addr
,
75 unsigned long end
, int nid
)
83 if (boot_cpu_has(X86_FEATURE_GBPAGES
) &&
84 ((end
- addr
) == PUD_SIZE
) &&
85 IS_ALIGNED(addr
, PUD_SIZE
)) {
86 p
= early_alloc(PUD_SIZE
, nid
, false);
87 if (p
&& pud_set_huge(pud
, __pa(p
), PAGE_KERNEL
))
90 memblock_free(__pa(p
), PUD_SIZE
);
93 p
= early_alloc(PAGE_SIZE
, nid
, true);
94 pud_populate(&init_mm
, pud
, p
);
97 pmd
= pmd_offset(pud
, addr
);
99 next
= pmd_addr_end(addr
, end
);
100 if (!pmd_large(*pmd
))
101 kasan_populate_pmd(pmd
, addr
, next
, nid
);
102 } while (pmd
++, addr
= next
, addr
!= end
);
105 static void __init
kasan_populate_p4d(p4d_t
*p4d
, unsigned long addr
,
106 unsigned long end
, int nid
)
111 if (p4d_none(*p4d
)) {
112 void *p
= early_alloc(PAGE_SIZE
, nid
, true);
114 p4d_populate(&init_mm
, p4d
, p
);
117 pud
= pud_offset(p4d
, addr
);
119 next
= pud_addr_end(addr
, end
);
120 if (!pud_large(*pud
))
121 kasan_populate_pud(pud
, addr
, next
, nid
);
122 } while (pud
++, addr
= next
, addr
!= end
);
125 static void __init
kasan_populate_pgd(pgd_t
*pgd
, unsigned long addr
,
126 unsigned long end
, int nid
)
132 if (pgd_none(*pgd
)) {
133 p
= early_alloc(PAGE_SIZE
, nid
, true);
134 pgd_populate(&init_mm
, pgd
, p
);
137 p4d
= p4d_offset(pgd
, addr
);
139 next
= p4d_addr_end(addr
, end
);
140 kasan_populate_p4d(p4d
, addr
, next
, nid
);
141 } while (p4d
++, addr
= next
, addr
!= end
);
144 static void __init
kasan_populate_shadow(unsigned long addr
, unsigned long end
,
150 addr
= addr
& PAGE_MASK
;
151 end
= round_up(end
, PAGE_SIZE
);
152 pgd
= pgd_offset_k(addr
);
154 next
= pgd_addr_end(addr
, end
);
155 kasan_populate_pgd(pgd
, addr
, next
, nid
);
156 } while (pgd
++, addr
= next
, addr
!= end
);
159 static void __init
map_range(struct range
*range
)
164 start
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->start
));
165 end
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->end
));
167 kasan_populate_shadow(start
, end
, early_pfn_to_nid(range
->start
));
170 static void __init
clear_pgds(unsigned long start
,
174 /* See comment in kasan_init() */
175 unsigned long pgd_end
= end
& PGDIR_MASK
;
177 for (; start
< pgd_end
; start
+= PGDIR_SIZE
) {
178 pgd
= pgd_offset_k(start
);
180 * With folded p4d, pgd_clear() is nop, use p4d_clear()
183 if (pgtable_l5_enabled())
186 p4d_clear(p4d_offset(pgd
, start
));
189 pgd
= pgd_offset_k(start
);
190 for (; start
< end
; start
+= P4D_SIZE
)
191 p4d_clear(p4d_offset(pgd
, start
));
194 static inline p4d_t
*early_p4d_offset(pgd_t
*pgd
, unsigned long addr
)
198 if (!pgtable_l5_enabled())
201 p4d
= pgd_val(*pgd
) & PTE_PFN_MASK
;
202 p4d
+= __START_KERNEL_map
- phys_base
;
203 return (p4d_t
*)p4d
+ p4d_index(addr
);
206 static void __init
kasan_early_p4d_populate(pgd_t
*pgd
,
211 p4d_t
*p4d
, p4d_entry
;
214 if (pgd_none(*pgd
)) {
215 pgd_entry
= __pgd(_KERNPG_TABLE
| __pa_nodebug(kasan_zero_p4d
));
216 set_pgd(pgd
, pgd_entry
);
219 p4d
= early_p4d_offset(pgd
, addr
);
221 next
= p4d_addr_end(addr
, end
);
226 p4d_entry
= __p4d(_KERNPG_TABLE
| __pa_nodebug(kasan_zero_pud
));
227 set_p4d(p4d
, p4d_entry
);
228 } while (p4d
++, addr
= next
, addr
!= end
&& p4d_none(*p4d
));
231 static void __init
kasan_map_early_shadow(pgd_t
*pgd
)
233 /* See comment in kasan_init() */
234 unsigned long addr
= KASAN_SHADOW_START
& PGDIR_MASK
;
235 unsigned long end
= KASAN_SHADOW_END
;
238 pgd
+= pgd_index(addr
);
240 next
= pgd_addr_end(addr
, end
);
241 kasan_early_p4d_populate(pgd
, addr
, next
);
242 } while (pgd
++, addr
= next
, addr
!= end
);
245 #ifdef CONFIG_KASAN_INLINE
246 static int kasan_die_handler(struct notifier_block
*self
,
250 if (val
== DIE_GPF
) {
251 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
252 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
257 static struct notifier_block kasan_die_notifier
= {
258 .notifier_call
= kasan_die_handler
,
262 void __init
kasan_early_init(void)
265 pteval_t pte_val
= __pa_nodebug(kasan_zero_page
) | __PAGE_KERNEL
| _PAGE_ENC
;
266 pmdval_t pmd_val
= __pa_nodebug(kasan_zero_pte
) | _KERNPG_TABLE
;
267 pudval_t pud_val
= __pa_nodebug(kasan_zero_pmd
) | _KERNPG_TABLE
;
268 p4dval_t p4d_val
= __pa_nodebug(kasan_zero_pud
) | _KERNPG_TABLE
;
270 /* Mask out unsupported __PAGE_KERNEL bits: */
271 pte_val
&= __default_kernel_pte_mask
;
272 pmd_val
&= __default_kernel_pte_mask
;
273 pud_val
&= __default_kernel_pte_mask
;
274 p4d_val
&= __default_kernel_pte_mask
;
276 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
277 kasan_zero_pte
[i
] = __pte(pte_val
);
279 for (i
= 0; i
< PTRS_PER_PMD
; i
++)
280 kasan_zero_pmd
[i
] = __pmd(pmd_val
);
282 for (i
= 0; i
< PTRS_PER_PUD
; i
++)
283 kasan_zero_pud
[i
] = __pud(pud_val
);
285 for (i
= 0; pgtable_l5_enabled() && i
< PTRS_PER_P4D
; i
++)
286 kasan_zero_p4d
[i
] = __p4d(p4d_val
);
288 kasan_map_early_shadow(early_top_pgt
);
289 kasan_map_early_shadow(init_top_pgt
);
292 void __init
kasan_init(void)
295 void *shadow_cpu_entry_begin
, *shadow_cpu_entry_end
;
297 #ifdef CONFIG_KASAN_INLINE
298 register_die_notifier(&kasan_die_notifier
);
301 memcpy(early_top_pgt
, init_top_pgt
, sizeof(early_top_pgt
));
304 * We use the same shadow offset for 4- and 5-level paging to
305 * facilitate boot-time switching between paging modes.
306 * As result in 5-level paging mode KASAN_SHADOW_START and
307 * KASAN_SHADOW_END are not aligned to PGD boundary.
309 * KASAN_SHADOW_START doesn't share PGD with anything else.
310 * We claim whole PGD entry to make things easier.
312 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
313 * bunch of things like kernel code, modules, EFI mapping, etc.
314 * We need to take extra steps to not overwrite them.
316 if (pgtable_l5_enabled()) {
319 ptr
= (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END
));
320 memcpy(tmp_p4d_table
, (void *)ptr
, sizeof(tmp_p4d_table
));
321 set_pgd(&early_top_pgt
[pgd_index(KASAN_SHADOW_END
)],
322 __pgd(__pa(tmp_p4d_table
) | _KERNPG_TABLE
));
325 load_cr3(early_top_pgt
);
328 clear_pgds(KASAN_SHADOW_START
& PGDIR_MASK
, KASAN_SHADOW_END
);
330 kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START
& PGDIR_MASK
),
331 kasan_mem_to_shadow((void *)PAGE_OFFSET
));
333 for (i
= 0; i
< E820_MAX_ENTRIES
; i
++) {
334 if (pfn_mapped
[i
].end
== 0)
337 map_range(&pfn_mapped
[i
]);
340 shadow_cpu_entry_begin
= (void *)CPU_ENTRY_AREA_BASE
;
341 shadow_cpu_entry_begin
= kasan_mem_to_shadow(shadow_cpu_entry_begin
);
342 shadow_cpu_entry_begin
= (void *)round_down((unsigned long)shadow_cpu_entry_begin
,
345 shadow_cpu_entry_end
= (void *)(CPU_ENTRY_AREA_BASE
+
346 CPU_ENTRY_AREA_MAP_SIZE
);
347 shadow_cpu_entry_end
= kasan_mem_to_shadow(shadow_cpu_entry_end
);
348 shadow_cpu_entry_end
= (void *)round_up((unsigned long)shadow_cpu_entry_end
,
351 kasan_populate_zero_shadow(
352 kasan_mem_to_shadow((void *)PAGE_OFFSET
+ MAXMEM
),
353 shadow_cpu_entry_begin
);
355 kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin
,
356 (unsigned long)shadow_cpu_entry_end
, 0);
358 kasan_populate_zero_shadow(shadow_cpu_entry_end
,
359 kasan_mem_to_shadow((void *)__START_KERNEL_map
));
361 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext
),
362 (unsigned long)kasan_mem_to_shadow(_end
),
363 early_pfn_to_nid(__pa(_stext
)));
365 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END
),
366 (void *)KASAN_SHADOW_END
);
368 load_cr3(init_top_pgt
);
372 * kasan_zero_page has been used as early shadow memory, thus it may
373 * contain some garbage. Now we can clear and write protect it, since
374 * after the TLB flush no one should write to it.
376 memset(kasan_zero_page
, 0, PAGE_SIZE
);
377 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
381 prot
= __pgprot(__PAGE_KERNEL_RO
| _PAGE_ENC
);
382 pgprot_val(prot
) &= __default_kernel_pte_mask
;
384 pte
= __pte(__pa(kasan_zero_page
) | pgprot_val(prot
));
385 set_pte(&kasan_zero_pte
[i
], pte
);
387 /* Flush TLBs again to be sure that write protection applied. */
390 init_task
.kasan_depth
= 0;
391 pr_info("KernelAddressSanitizer initialized\n");