2 * This file contains kasan initialization code for ARM64.
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) "kasan: " fmt
14 #include <linux/bootmem.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/sched/task.h>
18 #include <linux/memblock.h>
19 #include <linux/start_kernel.h>
22 #include <asm/mmu_context.h>
23 #include <asm/kernel-pgtable.h>
25 #include <asm/pgalloc.h>
26 #include <asm/pgtable.h>
27 #include <asm/sections.h>
28 #include <asm/tlbflush.h>
30 static pgd_t tmp_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PGD_SIZE
);
33 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
34 * directly on kernel symbols (bm_p*d). All the early functions are called too
35 * early to use lm_alias so __p*d_populate functions must be used to populate
36 * with the physical address from __pa_symbol.
39 static phys_addr_t __init
kasan_alloc_zeroed_page(int node
)
41 void *p
= memblock_virt_alloc_try_nid(PAGE_SIZE
, PAGE_SIZE
,
42 __pa(MAX_DMA_ADDRESS
),
43 MEMBLOCK_ALLOC_ACCESSIBLE
, node
);
47 static pte_t
*__init
kasan_pte_offset(pmd_t
*pmdp
, unsigned long addr
, int node
,
50 if (pmd_none(READ_ONCE(*pmdp
))) {
51 phys_addr_t pte_phys
= early
? __pa_symbol(kasan_zero_pte
)
52 : kasan_alloc_zeroed_page(node
);
53 __pmd_populate(pmdp
, pte_phys
, PMD_TYPE_TABLE
);
56 return early
? pte_offset_kimg(pmdp
, addr
)
57 : pte_offset_kernel(pmdp
, addr
);
60 static pmd_t
*__init
kasan_pmd_offset(pud_t
*pudp
, unsigned long addr
, int node
,
63 if (pud_none(READ_ONCE(*pudp
))) {
64 phys_addr_t pmd_phys
= early
? __pa_symbol(kasan_zero_pmd
)
65 : kasan_alloc_zeroed_page(node
);
66 __pud_populate(pudp
, pmd_phys
, PMD_TYPE_TABLE
);
69 return early
? pmd_offset_kimg(pudp
, addr
) : pmd_offset(pudp
, addr
);
72 static pud_t
*__init
kasan_pud_offset(pgd_t
*pgdp
, unsigned long addr
, int node
,
75 if (pgd_none(READ_ONCE(*pgdp
))) {
76 phys_addr_t pud_phys
= early
? __pa_symbol(kasan_zero_pud
)
77 : kasan_alloc_zeroed_page(node
);
78 __pgd_populate(pgdp
, pud_phys
, PMD_TYPE_TABLE
);
81 return early
? pud_offset_kimg(pgdp
, addr
) : pud_offset(pgdp
, addr
);
84 static void __init
kasan_pte_populate(pmd_t
*pmdp
, unsigned long addr
,
85 unsigned long end
, int node
, bool early
)
88 pte_t
*ptep
= kasan_pte_offset(pmdp
, addr
, node
, early
);
91 phys_addr_t page_phys
= early
? __pa_symbol(kasan_zero_page
)
92 : kasan_alloc_zeroed_page(node
);
93 next
= addr
+ PAGE_SIZE
;
94 set_pte(ptep
, pfn_pte(__phys_to_pfn(page_phys
), PAGE_KERNEL
));
95 } while (ptep
++, addr
= next
, addr
!= end
&& pte_none(READ_ONCE(*ptep
)));
98 static void __init
kasan_pmd_populate(pud_t
*pudp
, unsigned long addr
,
99 unsigned long end
, int node
, bool early
)
102 pmd_t
*pmdp
= kasan_pmd_offset(pudp
, addr
, node
, early
);
105 next
= pmd_addr_end(addr
, end
);
106 kasan_pte_populate(pmdp
, addr
, next
, node
, early
);
107 } while (pmdp
++, addr
= next
, addr
!= end
&& pmd_none(READ_ONCE(*pmdp
)));
110 static void __init
kasan_pud_populate(pgd_t
*pgdp
, unsigned long addr
,
111 unsigned long end
, int node
, bool early
)
114 pud_t
*pudp
= kasan_pud_offset(pgdp
, addr
, node
, early
);
117 next
= pud_addr_end(addr
, end
);
118 kasan_pmd_populate(pudp
, addr
, next
, node
, early
);
119 } while (pudp
++, addr
= next
, addr
!= end
&& pud_none(READ_ONCE(*pudp
)));
122 static void __init
kasan_pgd_populate(unsigned long addr
, unsigned long end
,
123 int node
, bool early
)
128 pgdp
= pgd_offset_k(addr
);
130 next
= pgd_addr_end(addr
, end
);
131 kasan_pud_populate(pgdp
, addr
, next
, node
, early
);
132 } while (pgdp
++, addr
= next
, addr
!= end
);
135 /* The early shadow maps everything to a single page of zeroes */
136 asmlinkage
void __init
kasan_early_init(void)
138 BUILD_BUG_ON(KASAN_SHADOW_OFFSET
!=
139 KASAN_SHADOW_END
- (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT
)));
140 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, PGDIR_SIZE
));
141 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, PGDIR_SIZE
));
142 kasan_pgd_populate(KASAN_SHADOW_START
, KASAN_SHADOW_END
, NUMA_NO_NODE
,
146 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
147 static void __init
kasan_map_populate(unsigned long start
, unsigned long end
,
150 kasan_pgd_populate(start
& PAGE_MASK
, PAGE_ALIGN(end
), node
, false);
154 * Copy the current shadow region into a new pgdir.
156 void __init
kasan_copy_shadow(pgd_t
*pgdir
)
158 pgd_t
*pgdp
, *pgdp_new
, *pgdp_end
;
160 pgdp
= pgd_offset_k(KASAN_SHADOW_START
);
161 pgdp_end
= pgd_offset_k(KASAN_SHADOW_END
);
162 pgdp_new
= pgd_offset_raw(pgdir
, KASAN_SHADOW_START
);
164 set_pgd(pgdp_new
, READ_ONCE(*pgdp
));
165 } while (pgdp
++, pgdp_new
++, pgdp
!= pgdp_end
);
168 static void __init
clear_pgds(unsigned long start
,
172 * Remove references to kasan page tables from
173 * swapper_pg_dir. pgd_clear() can't be used
174 * here because it's nop on 2,3-level pagetable setups
176 for (; start
< end
; start
+= PGDIR_SIZE
)
177 set_pgd(pgd_offset_k(start
), __pgd(0));
180 void __init
kasan_init(void)
182 u64 kimg_shadow_start
, kimg_shadow_end
;
183 u64 mod_shadow_start
, mod_shadow_end
;
184 struct memblock_region
*reg
;
187 kimg_shadow_start
= (u64
)kasan_mem_to_shadow(_text
) & PAGE_MASK
;
188 kimg_shadow_end
= PAGE_ALIGN((u64
)kasan_mem_to_shadow(_end
));
190 mod_shadow_start
= (u64
)kasan_mem_to_shadow((void *)MODULES_VADDR
);
191 mod_shadow_end
= (u64
)kasan_mem_to_shadow((void *)MODULES_END
);
194 * We are going to perform proper setup of shadow memory.
195 * At first we should unmap early shadow (clear_pgds() call bellow).
196 * However, instrumented code couldn't execute without shadow memory.
197 * tmp_pg_dir used to keep early shadow mapped until full shadow
198 * setup will be finished.
200 memcpy(tmp_pg_dir
, swapper_pg_dir
, sizeof(tmp_pg_dir
));
202 cpu_replace_ttbr1(lm_alias(tmp_pg_dir
));
204 clear_pgds(KASAN_SHADOW_START
, KASAN_SHADOW_END
);
206 kasan_map_populate(kimg_shadow_start
, kimg_shadow_end
,
207 pfn_to_nid(virt_to_pfn(lm_alias(_text
))));
209 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START
,
210 (void *)mod_shadow_start
);
211 kasan_populate_zero_shadow((void *)kimg_shadow_end
,
212 kasan_mem_to_shadow((void *)PAGE_OFFSET
));
214 if (kimg_shadow_start
> mod_shadow_end
)
215 kasan_populate_zero_shadow((void *)mod_shadow_end
,
216 (void *)kimg_shadow_start
);
218 for_each_memblock(memory
, reg
) {
219 void *start
= (void *)__phys_to_virt(reg
->base
);
220 void *end
= (void *)__phys_to_virt(reg
->base
+ reg
->size
);
225 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start
),
226 (unsigned long)kasan_mem_to_shadow(end
),
227 pfn_to_nid(virt_to_pfn(start
)));
231 * KAsan may reuse the contents of kasan_zero_pte directly, so we
232 * should make sure that it maps the zero page read-only.
234 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
235 set_pte(&kasan_zero_pte
[i
],
236 pfn_pte(sym_to_pfn(kasan_zero_page
), PAGE_KERNEL_RO
));
238 memset(kasan_zero_page
, 0, PAGE_SIZE
);
239 cpu_replace_ttbr1(lm_alias(swapper_pg_dir
));
241 /* At this point kasan is fully initialized. Enable error messages */
242 init_task
.kasan_depth
= 0;
243 pr_info("KernelAddressSanitizer initialized\n");