Linux 4.2.2
[linux/fpc-iii.git] / arch / x86 / mm / kasan_init_64.c
blobe1840f3db5b583984e53bcc7d11787706dbbed26
1 #define pr_fmt(fmt) "kasan: " fmt
2 #include <linux/bootmem.h>
3 #include <linux/kasan.h>
4 #include <linux/kdebug.h>
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <linux/vmalloc.h>
9 #include <asm/tlbflush.h>
10 #include <asm/sections.h>
12 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
13 extern struct range pfn_mapped[E820_X_MAX];
15 static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
16 static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
17 static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
20 * This page used as early shadow. We don't use empty_zero_page
21 * at early stages, stack instrumentation could write some garbage
22 * to this page.
23 * Latter we reuse it as zero shadow for large ranges of memory
24 * that allowed to access, but not instrumented by kasan
25 * (vmalloc/vmemmap ...).
27 static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
29 static int __init map_range(struct range *range)
31 unsigned long start;
32 unsigned long end;
34 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
35 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
38 * end + 1 here is intentional. We check several shadow bytes in advance
39 * to slightly speed up fastpath. In some rare cases we could cross
40 * boundary of mapped shadow, so we just map some more here.
42 return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
45 static void __init clear_pgds(unsigned long start,
46 unsigned long end)
48 for (; start < end; start += PGDIR_SIZE)
49 pgd_clear(pgd_offset_k(start));
52 static void __init kasan_map_early_shadow(pgd_t *pgd)
54 int i;
55 unsigned long start = KASAN_SHADOW_START;
56 unsigned long end = KASAN_SHADOW_END;
58 for (i = pgd_index(start); start < end; i++) {
59 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
60 | _KERNPG_TABLE);
61 start += PGDIR_SIZE;
65 static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
66 unsigned long end)
68 pte_t *pte = pte_offset_kernel(pmd, addr);
70 while (addr + PAGE_SIZE <= end) {
71 WARN_ON(!pte_none(*pte));
72 set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
73 | __PAGE_KERNEL_RO));
74 addr += PAGE_SIZE;
75 pte = pte_offset_kernel(pmd, addr);
77 return 0;
80 static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
81 unsigned long end)
83 int ret = 0;
84 pmd_t *pmd = pmd_offset(pud, addr);
86 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
87 WARN_ON(!pmd_none(*pmd));
88 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
89 | _KERNPG_TABLE));
90 addr += PMD_SIZE;
91 pmd = pmd_offset(pud, addr);
93 if (addr < end) {
94 if (pmd_none(*pmd)) {
95 void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
96 if (!p)
97 return -ENOMEM;
98 set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
100 ret = zero_pte_populate(pmd, addr, end);
102 return ret;
106 static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
107 unsigned long end)
109 int ret = 0;
110 pud_t *pud = pud_offset(pgd, addr);
112 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
113 WARN_ON(!pud_none(*pud));
114 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
115 | _KERNPG_TABLE));
116 addr += PUD_SIZE;
117 pud = pud_offset(pgd, addr);
120 if (addr < end) {
121 if (pud_none(*pud)) {
122 void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
123 if (!p)
124 return -ENOMEM;
125 set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
127 ret = zero_pmd_populate(pud, addr, end);
129 return ret;
132 static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
134 int ret = 0;
135 pgd_t *pgd = pgd_offset_k(addr);
137 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
138 WARN_ON(!pgd_none(*pgd));
139 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
140 | _KERNPG_TABLE));
141 addr += PGDIR_SIZE;
142 pgd = pgd_offset_k(addr);
145 if (addr < end) {
146 if (pgd_none(*pgd)) {
147 void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
148 if (!p)
149 return -ENOMEM;
150 set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
152 ret = zero_pud_populate(pgd, addr, end);
154 return ret;
158 static void __init populate_zero_shadow(const void *start, const void *end)
160 if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
161 panic("kasan: unable to map zero shadow!");
165 #ifdef CONFIG_KASAN_INLINE
166 static int kasan_die_handler(struct notifier_block *self,
167 unsigned long val,
168 void *data)
170 if (val == DIE_GPF) {
171 pr_emerg("CONFIG_KASAN_INLINE enabled");
172 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
174 return NOTIFY_OK;
177 static struct notifier_block kasan_die_notifier = {
178 .notifier_call = kasan_die_handler,
180 #endif
182 void __init kasan_early_init(void)
184 int i;
185 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
186 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
187 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
189 for (i = 0; i < PTRS_PER_PTE; i++)
190 kasan_zero_pte[i] = __pte(pte_val);
192 for (i = 0; i < PTRS_PER_PMD; i++)
193 kasan_zero_pmd[i] = __pmd(pmd_val);
195 for (i = 0; i < PTRS_PER_PUD; i++)
196 kasan_zero_pud[i] = __pud(pud_val);
198 kasan_map_early_shadow(early_level4_pgt);
199 kasan_map_early_shadow(init_level4_pgt);
202 void __init kasan_init(void)
204 int i;
206 #ifdef CONFIG_KASAN_INLINE
207 register_die_notifier(&kasan_die_notifier);
208 #endif
210 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
211 load_cr3(early_level4_pgt);
212 __flush_tlb_all();
214 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
216 populate_zero_shadow((void *)KASAN_SHADOW_START,
217 kasan_mem_to_shadow((void *)PAGE_OFFSET));
219 for (i = 0; i < E820_X_MAX; i++) {
220 if (pfn_mapped[i].end == 0)
221 break;
223 if (map_range(&pfn_mapped[i]))
224 panic("kasan: unable to allocate shadow!");
226 populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
227 kasan_mem_to_shadow((void *)__START_KERNEL_map));
229 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
230 (unsigned long)kasan_mem_to_shadow(_end),
231 NUMA_NO_NODE);
233 populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
234 (void *)KASAN_SHADOW_END);
236 memset(kasan_zero_page, 0, PAGE_SIZE);
238 load_cr3(init_level4_pgt);
239 __flush_tlb_all();
240 init_task.kasan_depth = 0;
242 pr_info("Kernel address sanitizer initialized\n");