2 * This file contains kasan initialization code for ARM64.
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) "kasan: " fmt
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/sched/task.h>
17 #include <linux/memblock.h>
18 #include <linux/start_kernel.h>
21 #include <asm/mmu_context.h>
22 #include <asm/kernel-pgtable.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
26 #include <asm/sections.h>
27 #include <asm/tlbflush.h>
29 static pgd_t tmp_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PGD_SIZE
);
32 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
33 * directly on kernel symbols (bm_p*d). All the early functions are called too
34 * early to use lm_alias so __p*d_populate functions must be used to populate
35 * with the physical address from __pa_symbol.
38 static void __init
kasan_early_pte_populate(pmd_t
*pmd
, unsigned long addr
,
45 __pmd_populate(pmd
, __pa_symbol(kasan_zero_pte
), PMD_TYPE_TABLE
);
47 pte
= pte_offset_kimg(pmd
, addr
);
49 next
= addr
+ PAGE_SIZE
;
50 set_pte(pte
, pfn_pte(sym_to_pfn(kasan_zero_page
),
52 } while (pte
++, addr
= next
, addr
!= end
&& pte_none(*pte
));
55 static void __init
kasan_early_pmd_populate(pud_t
*pud
,
63 __pud_populate(pud
, __pa_symbol(kasan_zero_pmd
), PMD_TYPE_TABLE
);
65 pmd
= pmd_offset_kimg(pud
, addr
);
67 next
= pmd_addr_end(addr
, end
);
68 kasan_early_pte_populate(pmd
, addr
, next
);
69 } while (pmd
++, addr
= next
, addr
!= end
&& pmd_none(*pmd
));
72 static void __init
kasan_early_pud_populate(pgd_t
*pgd
,
80 __pgd_populate(pgd
, __pa_symbol(kasan_zero_pud
), PUD_TYPE_TABLE
);
82 pud
= pud_offset_kimg(pgd
, addr
);
84 next
= pud_addr_end(addr
, end
);
85 kasan_early_pmd_populate(pud
, addr
, next
);
86 } while (pud
++, addr
= next
, addr
!= end
&& pud_none(*pud
));
89 static void __init
kasan_map_early_shadow(void)
91 unsigned long addr
= KASAN_SHADOW_START
;
92 unsigned long end
= KASAN_SHADOW_END
;
96 pgd
= pgd_offset_k(addr
);
98 next
= pgd_addr_end(addr
, end
);
99 kasan_early_pud_populate(pgd
, addr
, next
);
100 } while (pgd
++, addr
= next
, addr
!= end
);
103 asmlinkage
void __init
kasan_early_init(void)
105 BUILD_BUG_ON(KASAN_SHADOW_OFFSET
!= KASAN_SHADOW_END
- (1UL << 61));
106 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, PGDIR_SIZE
));
107 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, PGDIR_SIZE
));
108 kasan_map_early_shadow();
112 * Copy the current shadow region into a new pgdir.
114 void __init
kasan_copy_shadow(pgd_t
*pgdir
)
116 pgd_t
*pgd
, *pgd_new
, *pgd_end
;
118 pgd
= pgd_offset_k(KASAN_SHADOW_START
);
119 pgd_end
= pgd_offset_k(KASAN_SHADOW_END
);
120 pgd_new
= pgd_offset_raw(pgdir
, KASAN_SHADOW_START
);
122 set_pgd(pgd_new
, *pgd
);
123 } while (pgd
++, pgd_new
++, pgd
!= pgd_end
);
126 static void __init
clear_pgds(unsigned long start
,
130 * Remove references to kasan page tables from
131 * swapper_pg_dir. pgd_clear() can't be used
132 * here because it's nop on 2,3-level pagetable setups
134 for (; start
< end
; start
+= PGDIR_SIZE
)
135 set_pgd(pgd_offset_k(start
), __pgd(0));
138 void __init
kasan_init(void)
140 u64 kimg_shadow_start
, kimg_shadow_end
;
141 u64 mod_shadow_start
, mod_shadow_end
;
142 struct memblock_region
*reg
;
145 kimg_shadow_start
= (u64
)kasan_mem_to_shadow(_text
);
146 kimg_shadow_end
= (u64
)kasan_mem_to_shadow(_end
);
148 mod_shadow_start
= (u64
)kasan_mem_to_shadow((void *)MODULES_VADDR
);
149 mod_shadow_end
= (u64
)kasan_mem_to_shadow((void *)MODULES_END
);
152 * We are going to perform proper setup of shadow memory.
153 * At first we should unmap early shadow (clear_pgds() call bellow).
154 * However, instrumented code couldn't execute without shadow memory.
155 * tmp_pg_dir used to keep early shadow mapped until full shadow
156 * setup will be finished.
158 memcpy(tmp_pg_dir
, swapper_pg_dir
, sizeof(tmp_pg_dir
));
160 cpu_replace_ttbr1(lm_alias(tmp_pg_dir
));
162 clear_pgds(KASAN_SHADOW_START
, KASAN_SHADOW_END
);
164 vmemmap_populate(kimg_shadow_start
, kimg_shadow_end
,
165 pfn_to_nid(virt_to_pfn(lm_alias(_text
))));
168 * vmemmap_populate() has populated the shadow region that covers the
169 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
170 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
171 * kasan_populate_zero_shadow() from replacing the page table entries
172 * (PMD or PTE) at the edges of the shadow region for the kernel
175 kimg_shadow_start
= round_down(kimg_shadow_start
, SWAPPER_BLOCK_SIZE
);
176 kimg_shadow_end
= round_up(kimg_shadow_end
, SWAPPER_BLOCK_SIZE
);
178 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START
,
179 (void *)mod_shadow_start
);
180 kasan_populate_zero_shadow((void *)kimg_shadow_end
,
181 kasan_mem_to_shadow((void *)PAGE_OFFSET
));
183 if (kimg_shadow_start
> mod_shadow_end
)
184 kasan_populate_zero_shadow((void *)mod_shadow_end
,
185 (void *)kimg_shadow_start
);
187 for_each_memblock(memory
, reg
) {
188 void *start
= (void *)__phys_to_virt(reg
->base
);
189 void *end
= (void *)__phys_to_virt(reg
->base
+ reg
->size
);
194 vmemmap_populate((unsigned long)kasan_mem_to_shadow(start
),
195 (unsigned long)kasan_mem_to_shadow(end
),
196 pfn_to_nid(virt_to_pfn(start
)));
200 * KAsan may reuse the contents of kasan_zero_pte directly, so we
201 * should make sure that it maps the zero page read-only.
203 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
204 set_pte(&kasan_zero_pte
[i
],
205 pfn_pte(sym_to_pfn(kasan_zero_page
), PAGE_KERNEL_RO
));
207 memset(kasan_zero_page
, 0, PAGE_SIZE
);
208 cpu_replace_ttbr1(lm_alias(swapper_pg_dir
));
210 /* At this point kasan is fully initialized. Enable error messages */
211 init_task
.kasan_depth
= 0;
212 pr_info("KernelAddressSanitizer initialized\n");