mm: make wait_on_page_writeback() wait for multiple pending writebacks
[linux/fpc-iii.git] / arch / arm / mm / kasan_init.c
blob9c348042a7244cf3b4bc6314b0c0b6117d743c21
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file contains kasan initialization code for ARM.
5 * Copyright (c) 2018 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * Author: Linus Walleij <linus.walleij@linaro.org>
8 */
10 #define pr_fmt(fmt) "kasan: " fmt
11 #include <linux/kasan.h>
12 #include <linux/kernel.h>
13 #include <linux/memblock.h>
14 #include <linux/sched/task.h>
15 #include <linux/start_kernel.h>
16 #include <linux/pgtable.h>
17 #include <asm/cputype.h>
18 #include <asm/highmem.h>
19 #include <asm/mach/map.h>
20 #include <asm/memory.h>
21 #include <asm/page.h>
22 #include <asm/pgalloc.h>
23 #include <asm/procinfo.h>
24 #include <asm/proc-fns.h>
26 #include "mm.h"
28 static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
30 pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
32 static __init void *kasan_alloc_block(size_t size)
34 return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
35 MEMBLOCK_ALLOC_KASAN, NUMA_NO_NODE);
38 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
39 unsigned long end, bool early)
41 unsigned long next;
42 pte_t *ptep = pte_offset_kernel(pmdp, addr);
44 do {
45 pte_t entry;
46 void *p;
48 next = addr + PAGE_SIZE;
50 if (!early) {
51 if (!pte_none(READ_ONCE(*ptep)))
52 continue;
54 p = kasan_alloc_block(PAGE_SIZE);
55 if (!p) {
56 panic("%s failed to allocate shadow page for address 0x%lx\n",
57 __func__, addr);
58 return;
60 memset(p, KASAN_SHADOW_INIT, PAGE_SIZE);
61 entry = pfn_pte(virt_to_pfn(p),
62 __pgprot(pgprot_val(PAGE_KERNEL)));
63 } else if (pte_none(READ_ONCE(*ptep))) {
65 * The early shadow memory is mapping all KASan
66 * operations to one and the same page in memory,
67 * "kasan_early_shadow_page" so that the instrumentation
68 * will work on a scratch area until we can set up the
69 * proper KASan shadow memory.
71 entry = pfn_pte(virt_to_pfn(kasan_early_shadow_page),
72 __pgprot(_L_PTE_DEFAULT | L_PTE_DIRTY | L_PTE_XN));
73 } else {
75 * Early shadow mappings are PMD_SIZE aligned, so if the
76 * first entry is already set, they must all be set.
78 return;
81 set_pte_at(&init_mm, addr, ptep, entry);
82 } while (ptep++, addr = next, addr != end);
86 * The pmd (page middle directory) is only used on LPAE
88 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
89 unsigned long end, bool early)
91 unsigned long next;
92 pmd_t *pmdp = pmd_offset(pudp, addr);
94 do {
95 if (pmd_none(*pmdp)) {
97 * We attempt to allocate a shadow block for the PMDs
98 * used by the PTEs for this address if it isn't already
99 * allocated.
101 void *p = early ? kasan_early_shadow_pte :
102 kasan_alloc_block(PAGE_SIZE);
104 if (!p) {
105 panic("%s failed to allocate shadow block for address 0x%lx\n",
106 __func__, addr);
107 return;
109 pmd_populate_kernel(&init_mm, pmdp, p);
110 flush_pmd_entry(pmdp);
113 next = pmd_addr_end(addr, end);
114 kasan_pte_populate(pmdp, addr, next, early);
115 } while (pmdp++, addr = next, addr != end);
118 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
119 bool early)
121 unsigned long next;
122 pgd_t *pgdp;
123 p4d_t *p4dp;
124 pud_t *pudp;
126 pgdp = pgd_offset_k(addr);
128 do {
130 * Allocate and populate the shadow block of p4d folded into
131 * pud folded into pmd if it doesn't already exist
133 if (!early && pgd_none(*pgdp)) {
134 void *p = kasan_alloc_block(PAGE_SIZE);
136 if (!p) {
137 panic("%s failed to allocate shadow block for address 0x%lx\n",
138 __func__, addr);
139 return;
141 pgd_populate(&init_mm, pgdp, p);
144 next = pgd_addr_end(addr, end);
146 * We just immediately jump over the p4d and pud page
147 * directories since we believe ARM32 will never gain four
148 * nor five level page tables.
150 p4dp = p4d_offset(pgdp, addr);
151 pudp = pud_offset(p4dp, addr);
153 kasan_pmd_populate(pudp, addr, next, early);
154 } while (pgdp++, addr = next, addr != end);
157 extern struct proc_info_list *lookup_processor_type(unsigned int);
159 void __init kasan_early_init(void)
161 struct proc_info_list *list;
164 * locate processor in the list of supported processor
165 * types. The linker builds this table for us from the
166 * entries in arch/arm/mm/proc-*.S
168 list = lookup_processor_type(read_cpuid_id());
169 if (list) {
170 #ifdef MULTI_CPU
171 processor = *list->proc;
172 #endif
175 BUILD_BUG_ON((KASAN_SHADOW_END - (1UL << 29)) != KASAN_SHADOW_OFFSET);
177 * We walk the page table and set all of the shadow memory to point
178 * to the scratch page.
180 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, true);
183 static void __init clear_pgds(unsigned long start,
184 unsigned long end)
186 for (; start && start < end; start += PMD_SIZE)
187 pmd_clear(pmd_off_k(start));
190 static int __init create_mapping(void *start, void *end)
192 void *shadow_start, *shadow_end;
194 shadow_start = kasan_mem_to_shadow(start);
195 shadow_end = kasan_mem_to_shadow(end);
197 pr_info("Mapping kernel virtual memory block: %px-%px at shadow: %px-%px\n",
198 start, end, shadow_start, shadow_end);
200 kasan_pgd_populate((unsigned long)shadow_start & PAGE_MASK,
201 PAGE_ALIGN((unsigned long)shadow_end), false);
202 return 0;
205 void __init kasan_init(void)
207 phys_addr_t pa_start, pa_end;
208 u64 i;
211 * We are going to perform proper setup of shadow memory.
213 * At first we should unmap early shadow (clear_pgds() call bellow).
214 * However, instrumented code can't execute without shadow memory.
216 * To keep the early shadow memory MMU tables around while setting up
217 * the proper shadow memory, we copy swapper_pg_dir (the initial page
218 * table) to tmp_pgd_table and use that to keep the early shadow memory
219 * mapped until the full shadow setup is finished. Then we swap back
220 * to the proper swapper_pg_dir.
223 memcpy(tmp_pgd_table, swapper_pg_dir, sizeof(tmp_pgd_table));
224 #ifdef CONFIG_ARM_LPAE
225 /* We need to be in the same PGD or this won't work */
226 BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) !=
227 pgd_index(KASAN_SHADOW_END));
228 memcpy(tmp_pmd_table,
229 pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)),
230 sizeof(tmp_pmd_table));
231 set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)],
232 __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
233 #endif
234 cpu_switch_mm(tmp_pgd_table, &init_mm);
235 local_flush_tlb_all();
237 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
239 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
240 kasan_mem_to_shadow((void *)-1UL) + 1);
242 for_each_mem_range(i, &pa_start, &pa_end) {
243 void *start = __va(pa_start);
244 void *end = __va(pa_end);
246 /* Do not attempt to shadow highmem */
247 if (pa_start >= arm_lowmem_limit) {
248 pr_info("Skip highmem block at %pa-%pa\n", &pa_start, &pa_end);
249 continue;
251 if (pa_end > arm_lowmem_limit) {
252 pr_info("Truncating shadow for memory block at %pa-%pa to lowmem region at %pa\n",
253 &pa_start, &pa_end, &arm_lowmem_limit);
254 end = __va(arm_lowmem_limit);
256 if (start >= end) {
257 pr_info("Skipping invalid memory block %pa-%pa (virtual %p-%p)\n",
258 &pa_start, &pa_end, start, end);
259 continue;
262 create_mapping(start, end);
266 * 1. The module global variables are in MODULES_VADDR ~ MODULES_END,
267 * so we need to map this area.
268 * 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR
269 * ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't
270 * use kasan_populate_zero_shadow.
272 create_mapping((void *)MODULES_VADDR, (void *)(PKMAP_BASE + PMD_SIZE));
275 * KAsan may reuse the contents of kasan_early_shadow_pte directly, so
276 * we should make sure that it maps the zero page read-only.
278 for (i = 0; i < PTRS_PER_PTE; i++)
279 set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE,
280 &kasan_early_shadow_pte[i],
281 pfn_pte(virt_to_pfn(kasan_early_shadow_page),
282 __pgprot(pgprot_val(PAGE_KERNEL)
283 | L_PTE_RDONLY)));
285 cpu_switch_mm(swapper_pg_dir, &init_mm);
286 local_flush_tlb_all();
288 memset(kasan_early_shadow_page, 0, PAGE_SIZE);
289 pr_info("Kernel address sanitizer initialized\n");
290 init_task.kasan_depth = 0;