1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file contains kasan initialization code for ARM64.
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
9 #define pr_fmt(fmt) "kasan: " fmt
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/task.h>
13 #include <linux/memblock.h>
14 #include <linux/start_kernel.h>
17 #include <asm/mmu_context.h>
18 #include <asm/kernel-pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/sections.h>
22 #include <asm/tlbflush.h>
24 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
26 static pgd_t tmp_pg_dir
[PTRS_PER_PTE
] __initdata
__aligned(PAGE_SIZE
);
29 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
30 * directly on kernel symbols (bm_p*d). All the early functions are called too
31 * early to use lm_alias so __p*d_populate functions must be used to populate
32 * with the physical address from __pa_symbol.
35 static phys_addr_t __init
kasan_alloc_zeroed_page(int node
)
37 void *p
= memblock_alloc_try_nid(PAGE_SIZE
, PAGE_SIZE
,
38 __pa(MAX_DMA_ADDRESS
),
39 MEMBLOCK_ALLOC_NOLEAKTRACE
, node
);
41 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
42 __func__
, PAGE_SIZE
, PAGE_SIZE
, node
,
43 __pa(MAX_DMA_ADDRESS
));
48 static phys_addr_t __init
kasan_alloc_raw_page(int node
)
50 void *p
= memblock_alloc_try_nid_raw(PAGE_SIZE
, PAGE_SIZE
,
51 __pa(MAX_DMA_ADDRESS
),
52 MEMBLOCK_ALLOC_NOLEAKTRACE
,
55 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
56 __func__
, PAGE_SIZE
, PAGE_SIZE
, node
,
57 __pa(MAX_DMA_ADDRESS
));
62 static pte_t
*__init
kasan_pte_offset(pmd_t
*pmdp
, unsigned long addr
, int node
,
65 if (pmd_none(READ_ONCE(*pmdp
))) {
66 phys_addr_t pte_phys
= early
?
67 __pa_symbol(kasan_early_shadow_pte
)
68 : kasan_alloc_zeroed_page(node
);
69 __pmd_populate(pmdp
, pte_phys
, PMD_TYPE_TABLE
);
72 return early
? pte_offset_kimg(pmdp
, addr
)
73 : pte_offset_kernel(pmdp
, addr
);
76 static pmd_t
*__init
kasan_pmd_offset(pud_t
*pudp
, unsigned long addr
, int node
,
79 if (pud_none(READ_ONCE(*pudp
))) {
80 phys_addr_t pmd_phys
= early
?
81 __pa_symbol(kasan_early_shadow_pmd
)
82 : kasan_alloc_zeroed_page(node
);
83 __pud_populate(pudp
, pmd_phys
, PUD_TYPE_TABLE
);
86 return early
? pmd_offset_kimg(pudp
, addr
) : pmd_offset(pudp
, addr
);
89 static pud_t
*__init
kasan_pud_offset(p4d_t
*p4dp
, unsigned long addr
, int node
,
92 if (p4d_none(READ_ONCE(*p4dp
))) {
93 phys_addr_t pud_phys
= early
?
94 __pa_symbol(kasan_early_shadow_pud
)
95 : kasan_alloc_zeroed_page(node
);
96 __p4d_populate(p4dp
, pud_phys
, P4D_TYPE_TABLE
);
99 return early
? pud_offset_kimg(p4dp
, addr
) : pud_offset(p4dp
, addr
);
102 static p4d_t
*__init
kasan_p4d_offset(pgd_t
*pgdp
, unsigned long addr
, int node
,
105 if (pgd_none(READ_ONCE(*pgdp
))) {
106 phys_addr_t p4d_phys
= early
?
107 __pa_symbol(kasan_early_shadow_p4d
)
108 : kasan_alloc_zeroed_page(node
);
109 __pgd_populate(pgdp
, p4d_phys
, PGD_TYPE_TABLE
);
112 return early
? p4d_offset_kimg(pgdp
, addr
) : p4d_offset(pgdp
, addr
);
115 static void __init
kasan_pte_populate(pmd_t
*pmdp
, unsigned long addr
,
116 unsigned long end
, int node
, bool early
)
119 pte_t
*ptep
= kasan_pte_offset(pmdp
, addr
, node
, early
);
122 phys_addr_t page_phys
= early
?
123 __pa_symbol(kasan_early_shadow_page
)
124 : kasan_alloc_raw_page(node
);
126 memset(__va(page_phys
), KASAN_SHADOW_INIT
, PAGE_SIZE
);
127 next
= addr
+ PAGE_SIZE
;
128 __set_pte(ptep
, pfn_pte(__phys_to_pfn(page_phys
), PAGE_KERNEL
));
129 } while (ptep
++, addr
= next
, addr
!= end
&& pte_none(__ptep_get(ptep
)));
132 static void __init
kasan_pmd_populate(pud_t
*pudp
, unsigned long addr
,
133 unsigned long end
, int node
, bool early
)
136 pmd_t
*pmdp
= kasan_pmd_offset(pudp
, addr
, node
, early
);
139 next
= pmd_addr_end(addr
, end
);
140 kasan_pte_populate(pmdp
, addr
, next
, node
, early
);
141 } while (pmdp
++, addr
= next
, addr
!= end
&& pmd_none(READ_ONCE(*pmdp
)));
144 static void __init
kasan_pud_populate(p4d_t
*p4dp
, unsigned long addr
,
145 unsigned long end
, int node
, bool early
)
148 pud_t
*pudp
= kasan_pud_offset(p4dp
, addr
, node
, early
);
151 next
= pud_addr_end(addr
, end
);
152 kasan_pmd_populate(pudp
, addr
, next
, node
, early
);
153 } while (pudp
++, addr
= next
, addr
!= end
&& pud_none(READ_ONCE(*pudp
)));
156 static void __init
kasan_p4d_populate(pgd_t
*pgdp
, unsigned long addr
,
157 unsigned long end
, int node
, bool early
)
160 p4d_t
*p4dp
= kasan_p4d_offset(pgdp
, addr
, node
, early
);
163 next
= p4d_addr_end(addr
, end
);
164 kasan_pud_populate(p4dp
, addr
, next
, node
, early
);
165 } while (p4dp
++, addr
= next
, addr
!= end
&& p4d_none(READ_ONCE(*p4dp
)));
168 static void __init
kasan_pgd_populate(unsigned long addr
, unsigned long end
,
169 int node
, bool early
)
174 pgdp
= pgd_offset_k(addr
);
176 next
= pgd_addr_end(addr
, end
);
177 kasan_p4d_populate(pgdp
, addr
, next
, node
, early
);
178 } while (pgdp
++, addr
= next
, addr
!= end
);
181 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS > 4
182 #define SHADOW_ALIGN P4D_SIZE
184 #define SHADOW_ALIGN PUD_SIZE
188 * Return whether 'addr' is aligned to the size covered by a root level
191 static bool __init
root_level_aligned(u64 addr
)
193 int shift
= (ARM64_HW_PGTABLE_LEVELS(vabits_actual
) - 1) * (PAGE_SHIFT
- 3);
195 return (addr
% (PAGE_SIZE
<< shift
)) == 0;
198 /* The early shadow maps everything to a single page of zeroes */
199 asmlinkage
void __init
kasan_early_init(void)
201 BUILD_BUG_ON(KASAN_SHADOW_OFFSET
!=
202 KASAN_SHADOW_END
- (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT
)));
203 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS
), SHADOW_ALIGN
));
204 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN
), SHADOW_ALIGN
));
205 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, SHADOW_ALIGN
));
207 if (!root_level_aligned(KASAN_SHADOW_START
)) {
209 * The start address is misaligned, and so the next level table
210 * will be shared with the linear region. This can happen with
211 * 4 or 5 level paging, so install a generic pte_t[] as the
212 * next level. This prevents the kasan_pgd_populate call below
213 * from inserting an entry that refers to the shared KASAN zero
214 * shadow pud_t[]/p4d_t[], which could end up getting corrupted
215 * when the linear region is mapped.
217 static pte_t tbl
[PTRS_PER_PTE
] __page_aligned_bss
;
218 pgd_t
*pgdp
= pgd_offset_k(KASAN_SHADOW_START
);
220 set_pgd(pgdp
, __pgd(__pa_symbol(tbl
) | PGD_TYPE_TABLE
));
223 kasan_pgd_populate(KASAN_SHADOW_START
, KASAN_SHADOW_END
, NUMA_NO_NODE
,
227 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
228 static void __init
kasan_map_populate(unsigned long start
, unsigned long end
,
231 kasan_pgd_populate(start
& PAGE_MASK
, PAGE_ALIGN(end
), node
, false);
235 * Return the descriptor index of 'addr' in the root level table
237 static int __init
root_level_idx(u64 addr
)
240 * On 64k pages, the TTBR1 range root tables are extended for 52-bit
241 * virtual addressing, and TTBR1 will simply point to the pgd_t entry
242 * that covers the start of the 48-bit addressable VA space if LVA is
243 * not implemented. This means we need to index the table as usual,
244 * instead of masking off bits based on vabits_actual.
246 u64 vabits
= IS_ENABLED(CONFIG_ARM64_64K_PAGES
) ? VA_BITS
248 int shift
= (ARM64_HW_PGTABLE_LEVELS(vabits
) - 1) * (PAGE_SHIFT
- 3);
250 return (addr
& ~_PAGE_OFFSET(vabits
)) >> (shift
+ PAGE_SHIFT
);
254 * Clone a next level table from swapper_pg_dir into tmp_pg_dir
256 static void __init
clone_next_level(u64 addr
, pgd_t
*tmp_pg_dir
, pud_t
*pud
)
258 int idx
= root_level_idx(addr
);
259 pgd_t pgd
= READ_ONCE(swapper_pg_dir
[idx
]);
260 pud_t
*pudp
= (pud_t
*)__phys_to_kimg(__pgd_to_phys(pgd
));
262 memcpy(pud
, pudp
, PAGE_SIZE
);
263 tmp_pg_dir
[idx
] = __pgd(__phys_to_pgd_val(__pa_symbol(pud
)) |
268 * Return the descriptor index of 'addr' in the next level table
270 static int __init
next_level_idx(u64 addr
)
272 int shift
= (ARM64_HW_PGTABLE_LEVELS(vabits_actual
) - 2) * (PAGE_SHIFT
- 3);
274 return (addr
>> (shift
+ PAGE_SHIFT
)) % PTRS_PER_PTE
;
278 * Dereference the table descriptor at 'pgd_idx' and clear the entries from
279 * 'start' to 'end' (exclusive) from the table.
281 static void __init
clear_next_level(int pgd_idx
, int start
, int end
)
283 pgd_t pgd
= READ_ONCE(swapper_pg_dir
[pgd_idx
]);
284 pud_t
*pudp
= (pud_t
*)__phys_to_kimg(__pgd_to_phys(pgd
));
286 memset(&pudp
[start
], 0, (end
- start
) * sizeof(pud_t
));
289 static void __init
clear_shadow(u64 start
, u64 end
)
291 int l
= root_level_idx(start
), m
= root_level_idx(end
);
293 if (!root_level_aligned(start
))
294 clear_next_level(l
++, next_level_idx(start
), PTRS_PER_PTE
);
295 if (!root_level_aligned(end
))
296 clear_next_level(m
, 0, next_level_idx(end
));
297 memset(&swapper_pg_dir
[l
], 0, (m
- l
) * sizeof(pgd_t
));
300 static void __init
kasan_init_shadow(void)
302 static pud_t pud
[2][PTRS_PER_PUD
] __initdata
__aligned(PAGE_SIZE
);
303 u64 kimg_shadow_start
, kimg_shadow_end
;
304 u64 mod_shadow_start
;
305 u64 vmalloc_shadow_end
;
306 phys_addr_t pa_start
, pa_end
;
309 kimg_shadow_start
= (u64
)kasan_mem_to_shadow(KERNEL_START
) & PAGE_MASK
;
310 kimg_shadow_end
= PAGE_ALIGN((u64
)kasan_mem_to_shadow(KERNEL_END
));
312 mod_shadow_start
= (u64
)kasan_mem_to_shadow((void *)MODULES_VADDR
);
314 vmalloc_shadow_end
= (u64
)kasan_mem_to_shadow((void *)VMALLOC_END
);
317 * We are going to perform proper setup of shadow memory.
318 * At first we should unmap early shadow (clear_pgds() call below).
319 * However, instrumented code couldn't execute without shadow memory.
320 * tmp_pg_dir used to keep early shadow mapped until full shadow
321 * setup will be finished.
323 memcpy(tmp_pg_dir
, swapper_pg_dir
, sizeof(tmp_pg_dir
));
326 * If the start or end address of the shadow region is not aligned to
327 * the root level size, we have to allocate a temporary next-level table
328 * in each case, clone the next level of descriptors, and install the
329 * table into tmp_pg_dir. Note that with 5 levels of paging, the next
330 * level will in fact be p4d_t, but that makes no difference in this
333 if (!root_level_aligned(KASAN_SHADOW_START
))
334 clone_next_level(KASAN_SHADOW_START
, tmp_pg_dir
, pud
[0]);
335 if (!root_level_aligned(KASAN_SHADOW_END
))
336 clone_next_level(KASAN_SHADOW_END
, tmp_pg_dir
, pud
[1]);
338 cpu_replace_ttbr1(lm_alias(tmp_pg_dir
));
340 clear_shadow(KASAN_SHADOW_START
, KASAN_SHADOW_END
);
342 kasan_map_populate(kimg_shadow_start
, kimg_shadow_end
,
343 early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START
))));
345 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END
),
346 (void *)mod_shadow_start
);
348 BUILD_BUG_ON(VMALLOC_START
!= MODULES_END
);
349 kasan_populate_early_shadow((void *)vmalloc_shadow_end
,
350 (void *)KASAN_SHADOW_END
);
352 for_each_mem_range(i
, &pa_start
, &pa_end
) {
353 void *start
= (void *)__phys_to_virt(pa_start
);
354 void *end
= (void *)__phys_to_virt(pa_end
);
359 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start
),
360 (unsigned long)kasan_mem_to_shadow(end
),
361 early_pfn_to_nid(virt_to_pfn(start
)));
365 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
366 * so we should make sure that it maps the zero page read-only.
368 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
369 __set_pte(&kasan_early_shadow_pte
[i
],
370 pfn_pte(sym_to_pfn(kasan_early_shadow_page
),
373 memset(kasan_early_shadow_page
, KASAN_SHADOW_INIT
, PAGE_SIZE
);
374 cpu_replace_ttbr1(lm_alias(swapper_pg_dir
));
377 static void __init
kasan_init_depth(void)
379 init_task
.kasan_depth
= 0;
382 #ifdef CONFIG_KASAN_VMALLOC
383 void __init
kasan_populate_early_vm_area_shadow(void *start
, unsigned long size
)
385 unsigned long shadow_start
, shadow_end
;
387 if (!is_vmalloc_or_module_addr(start
))
390 shadow_start
= (unsigned long)kasan_mem_to_shadow(start
);
391 shadow_start
= ALIGN_DOWN(shadow_start
, PAGE_SIZE
);
392 shadow_end
= (unsigned long)kasan_mem_to_shadow(start
+ size
);
393 shadow_end
= ALIGN(shadow_end
, PAGE_SIZE
);
394 kasan_map_populate(shadow_start
, shadow_end
, NUMA_NO_NODE
);
398 void __init
kasan_init(void)
402 #if defined(CONFIG_KASAN_GENERIC)
404 * Generic KASAN is now fully initialized.
405 * Software and Hardware Tag-Based modes still require
406 * kasan_init_sw_tags() and kasan_init_hw_tags() correspondingly.
408 pr_info("KernelAddressSanitizer initialized (generic)\n");
412 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */