2 * This file contains kasan initialization code for ARM64.
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) "kasan: " fmt
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/memblock.h>
17 #include <linux/start_kernel.h>
19 #include <asm/mmu_context.h>
20 #include <asm/kernel-pgtable.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pgtable.h>
24 #include <asm/sections.h>
25 #include <asm/tlbflush.h>
27 static pgd_t tmp_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PGD_SIZE
);
29 static void __init
kasan_early_pte_populate(pmd_t
*pmd
, unsigned long addr
,
36 pmd_populate_kernel(&init_mm
, pmd
, kasan_zero_pte
);
38 pte
= pte_offset_kimg(pmd
, addr
);
40 next
= addr
+ PAGE_SIZE
;
41 set_pte(pte
, pfn_pte(virt_to_pfn(kasan_zero_page
),
43 } while (pte
++, addr
= next
, addr
!= end
&& pte_none(*pte
));
46 static void __init
kasan_early_pmd_populate(pud_t
*pud
,
54 pud_populate(&init_mm
, pud
, kasan_zero_pmd
);
56 pmd
= pmd_offset_kimg(pud
, addr
);
58 next
= pmd_addr_end(addr
, end
);
59 kasan_early_pte_populate(pmd
, addr
, next
);
60 } while (pmd
++, addr
= next
, addr
!= end
&& pmd_none(*pmd
));
63 static void __init
kasan_early_pud_populate(pgd_t
*pgd
,
71 pgd_populate(&init_mm
, pgd
, kasan_zero_pud
);
73 pud
= pud_offset_kimg(pgd
, addr
);
75 next
= pud_addr_end(addr
, end
);
76 kasan_early_pmd_populate(pud
, addr
, next
);
77 } while (pud
++, addr
= next
, addr
!= end
&& pud_none(*pud
));
80 static void __init
kasan_map_early_shadow(void)
82 unsigned long addr
= KASAN_SHADOW_START
;
83 unsigned long end
= KASAN_SHADOW_END
;
87 pgd
= pgd_offset_k(addr
);
89 next
= pgd_addr_end(addr
, end
);
90 kasan_early_pud_populate(pgd
, addr
, next
);
91 } while (pgd
++, addr
= next
, addr
!= end
);
94 asmlinkage
void __init
kasan_early_init(void)
96 BUILD_BUG_ON(KASAN_SHADOW_OFFSET
!= KASAN_SHADOW_END
- (1UL << 61));
97 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, PGDIR_SIZE
));
98 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, PGDIR_SIZE
));
99 kasan_map_early_shadow();
103 * Copy the current shadow region into a new pgdir.
105 void __init
kasan_copy_shadow(pgd_t
*pgdir
)
107 pgd_t
*pgd
, *pgd_new
, *pgd_end
;
109 pgd
= pgd_offset_k(KASAN_SHADOW_START
);
110 pgd_end
= pgd_offset_k(KASAN_SHADOW_END
);
111 pgd_new
= pgd_offset_raw(pgdir
, KASAN_SHADOW_START
);
113 set_pgd(pgd_new
, *pgd
);
114 } while (pgd
++, pgd_new
++, pgd
!= pgd_end
);
117 static void __init
clear_pgds(unsigned long start
,
121 * Remove references to kasan page tables from
122 * swapper_pg_dir. pgd_clear() can't be used
123 * here because it's nop on 2,3-level pagetable setups
125 for (; start
< end
; start
+= PGDIR_SIZE
)
126 set_pgd(pgd_offset_k(start
), __pgd(0));
129 void __init
kasan_init(void)
131 u64 kimg_shadow_start
, kimg_shadow_end
;
132 u64 mod_shadow_start
, mod_shadow_end
;
133 struct memblock_region
*reg
;
136 kimg_shadow_start
= (u64
)kasan_mem_to_shadow(_text
);
137 kimg_shadow_end
= (u64
)kasan_mem_to_shadow(_end
);
139 mod_shadow_start
= (u64
)kasan_mem_to_shadow((void *)MODULES_VADDR
);
140 mod_shadow_end
= (u64
)kasan_mem_to_shadow((void *)MODULES_END
);
143 * We are going to perform proper setup of shadow memory.
144 * At first we should unmap early shadow (clear_pgds() call bellow).
145 * However, instrumented code couldn't execute without shadow memory.
146 * tmp_pg_dir used to keep early shadow mapped until full shadow
147 * setup will be finished.
149 memcpy(tmp_pg_dir
, swapper_pg_dir
, sizeof(tmp_pg_dir
));
151 cpu_replace_ttbr1(tmp_pg_dir
);
153 clear_pgds(KASAN_SHADOW_START
, KASAN_SHADOW_END
);
155 vmemmap_populate(kimg_shadow_start
, kimg_shadow_end
,
156 pfn_to_nid(virt_to_pfn(_text
)));
159 * vmemmap_populate() has populated the shadow region that covers the
160 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
161 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
162 * kasan_populate_zero_shadow() from replacing the page table entries
163 * (PMD or PTE) at the edges of the shadow region for the kernel
166 kimg_shadow_start
= round_down(kimg_shadow_start
, SWAPPER_BLOCK_SIZE
);
167 kimg_shadow_end
= round_up(kimg_shadow_end
, SWAPPER_BLOCK_SIZE
);
169 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START
,
170 (void *)mod_shadow_start
);
171 kasan_populate_zero_shadow((void *)kimg_shadow_end
,
172 kasan_mem_to_shadow((void *)PAGE_OFFSET
));
174 if (kimg_shadow_start
> mod_shadow_end
)
175 kasan_populate_zero_shadow((void *)mod_shadow_end
,
176 (void *)kimg_shadow_start
);
178 for_each_memblock(memory
, reg
) {
179 void *start
= (void *)__phys_to_virt(reg
->base
);
180 void *end
= (void *)__phys_to_virt(reg
->base
+ reg
->size
);
186 * end + 1 here is intentional. We check several shadow bytes in
187 * advance to slightly speed up fastpath. In some rare cases
188 * we could cross boundary of mapped shadow, so we just map
191 vmemmap_populate((unsigned long)kasan_mem_to_shadow(start
),
192 (unsigned long)kasan_mem_to_shadow(end
) + 1,
193 pfn_to_nid(virt_to_pfn(start
)));
197 * KAsan may reuse the contents of kasan_zero_pte directly, so we
198 * should make sure that it maps the zero page read-only.
200 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
201 set_pte(&kasan_zero_pte
[i
],
202 pfn_pte(virt_to_pfn(kasan_zero_page
), PAGE_KERNEL_RO
));
204 memset(kasan_zero_page
, 0, PAGE_SIZE
);
205 cpu_replace_ttbr1(swapper_pg_dir
);
207 /* At this point kasan is fully initialized. Enable error messages */
208 init_task
.kasan_depth
= 0;
209 pr_info("KernelAddressSanitizer initialized\n");