2 * This file contains some kasan initialization code.
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/bootmem.h>
14 #include <linux/init.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/memblock.h>
19 #include <linux/pfn.h>
22 #include <asm/pgalloc.h>
25 * This page serves two purposes:
26 * - It used as early shadow memory. The entire shadow region populated
27 * with this page, before we will be able to setup normal shadow memory.
28 * - Latter it reused it as zero shadow to cover large ranges of memory
29 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
31 unsigned char kasan_zero_page
[PAGE_SIZE
] __page_aligned_bss
;
33 #if CONFIG_PGTABLE_LEVELS > 3
34 pud_t kasan_zero_pud
[PTRS_PER_PUD
] __page_aligned_bss
;
36 #if CONFIG_PGTABLE_LEVELS > 2
37 pmd_t kasan_zero_pmd
[PTRS_PER_PMD
] __page_aligned_bss
;
39 pte_t kasan_zero_pte
[PTRS_PER_PTE
] __page_aligned_bss
;
41 static __init
void *early_alloc(size_t size
, int node
)
43 return memblock_virt_alloc_try_nid(size
, size
, __pa(MAX_DMA_ADDRESS
),
44 BOOTMEM_ALLOC_ACCESSIBLE
, node
);
47 static void __init
zero_pte_populate(pmd_t
*pmd
, unsigned long addr
,
50 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
53 zero_pte
= pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page
)), PAGE_KERNEL
);
54 zero_pte
= pte_wrprotect(zero_pte
);
56 while (addr
+ PAGE_SIZE
<= end
) {
57 set_pte_at(&init_mm
, addr
, pte
, zero_pte
);
59 pte
= pte_offset_kernel(pmd
, addr
);
63 static void __init
zero_pmd_populate(pud_t
*pud
, unsigned long addr
,
66 pmd_t
*pmd
= pmd_offset(pud
, addr
);
70 next
= pmd_addr_end(addr
, end
);
72 if (IS_ALIGNED(addr
, PMD_SIZE
) && end
- addr
>= PMD_SIZE
) {
73 pmd_populate_kernel(&init_mm
, pmd
, lm_alias(kasan_zero_pte
));
78 pmd_populate_kernel(&init_mm
, pmd
,
79 early_alloc(PAGE_SIZE
, NUMA_NO_NODE
));
81 zero_pte_populate(pmd
, addr
, next
);
82 } while (pmd
++, addr
= next
, addr
!= end
);
85 static void __init
zero_pud_populate(pgd_t
*pgd
, unsigned long addr
,
88 pud_t
*pud
= pud_offset(pgd
, addr
);
92 next
= pud_addr_end(addr
, end
);
93 if (IS_ALIGNED(addr
, PUD_SIZE
) && end
- addr
>= PUD_SIZE
) {
96 pud_populate(&init_mm
, pud
, lm_alias(kasan_zero_pmd
));
97 pmd
= pmd_offset(pud
, addr
);
98 pmd_populate_kernel(&init_mm
, pmd
, lm_alias(kasan_zero_pte
));
102 if (pud_none(*pud
)) {
103 pud_populate(&init_mm
, pud
,
104 early_alloc(PAGE_SIZE
, NUMA_NO_NODE
));
106 zero_pmd_populate(pud
, addr
, next
);
107 } while (pud
++, addr
= next
, addr
!= end
);
111 * kasan_populate_zero_shadow - populate shadow memory region with
113 * @shadow_start - start of the memory range to populate
114 * @shadow_end - end of the memory range to populate
116 void __init
kasan_populate_zero_shadow(const void *shadow_start
,
117 const void *shadow_end
)
119 unsigned long addr
= (unsigned long)shadow_start
;
120 unsigned long end
= (unsigned long)shadow_end
;
121 pgd_t
*pgd
= pgd_offset_k(addr
);
125 next
= pgd_addr_end(addr
, end
);
127 if (IS_ALIGNED(addr
, PGDIR_SIZE
) && end
- addr
>= PGDIR_SIZE
) {
132 * kasan_zero_pud should be populated with pmds
134 * [pud,pmd]_populate*() below needed only for
135 * 3,2 - level page tables where we don't have
136 * puds,pmds, so pgd_populate(), pud_populate()
139 pgd_populate(&init_mm
, pgd
, lm_alias(kasan_zero_pud
));
140 pud
= pud_offset(pgd
, addr
);
141 pud_populate(&init_mm
, pud
, lm_alias(kasan_zero_pmd
));
142 pmd
= pmd_offset(pud
, addr
);
143 pmd_populate_kernel(&init_mm
, pmd
, lm_alias(kasan_zero_pte
));
147 if (pgd_none(*pgd
)) {
148 pgd_populate(&init_mm
, pgd
,
149 early_alloc(PAGE_SIZE
, NUMA_NO_NODE
));
151 zero_pud_populate(pgd
, addr
, next
);
152 } while (pgd
++, addr
= next
, addr
!= end
);