1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2023 Loongson Technology Corporation Limited
5 #define pr_fmt(fmt) "kasan: " fmt
6 #include <linux/kasan.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
10 #include <asm/tlbflush.h>
11 #include <asm/pgalloc.h>
12 #include <asm-generic/sections.h>
14 static pgd_t kasan_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PAGE_SIZE
);
16 #ifdef __PAGETABLE_P4D_FOLDED
17 #define __pgd_none(early, pgd) (0)
19 #define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
20 (__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
23 #ifdef __PAGETABLE_PUD_FOLDED
24 #define __p4d_none(early, p4d) (0)
26 #define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
27 (__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
30 #ifdef __PAGETABLE_PMD_FOLDED
31 #define __pud_none(early, pud) (0)
33 #define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
34 (__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
37 #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
38 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
40 #define __pte_none(early, pte) (early ? pte_none(pte) : \
41 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
43 bool kasan_early_stage
= true;
45 void *kasan_mem_to_shadow(const void *addr
)
47 if (!kasan_arch_is_ready()) {
48 return (void *)(kasan_early_shadow_page
);
50 unsigned long maddr
= (unsigned long)addr
;
51 unsigned long xrange
= (maddr
>> XRANGE_SHIFT
) & 0xffff;
52 unsigned long offset
= 0;
54 if (maddr
>= FIXADDR_START
)
55 return (void *)(kasan_early_shadow_page
);
57 maddr
&= XRANGE_SHADOW_MASK
;
60 offset
= XKPRANGE_CC_SHADOW_OFFSET
;
63 offset
= XKPRANGE_UC_SHADOW_OFFSET
;
66 offset
= XKPRANGE_WC_SHADOW_OFFSET
;
69 offset
= XKVRANGE_VC_SHADOW_OFFSET
;
76 return (void *)((maddr
>> KASAN_SHADOW_SCALE_SHIFT
) + offset
);
80 const void *kasan_shadow_to_mem(const void *shadow_addr
)
82 unsigned long addr
= (unsigned long)shadow_addr
;
84 if (unlikely(addr
> KASAN_SHADOW_END
) ||
85 unlikely(addr
< KASAN_SHADOW_START
)) {
90 if (addr
>= XKVRANGE_VC_SHADOW_OFFSET
)
91 return (void *)(((addr
- XKVRANGE_VC_SHADOW_OFFSET
) << KASAN_SHADOW_SCALE_SHIFT
) + XKVRANGE_VC_START
);
92 else if (addr
>= XKPRANGE_WC_SHADOW_OFFSET
)
93 return (void *)(((addr
- XKPRANGE_WC_SHADOW_OFFSET
) << KASAN_SHADOW_SCALE_SHIFT
) + XKPRANGE_WC_START
);
94 else if (addr
>= XKPRANGE_UC_SHADOW_OFFSET
)
95 return (void *)(((addr
- XKPRANGE_UC_SHADOW_OFFSET
) << KASAN_SHADOW_SCALE_SHIFT
) + XKPRANGE_UC_START
);
96 else if (addr
>= XKPRANGE_CC_SHADOW_OFFSET
)
97 return (void *)(((addr
- XKPRANGE_CC_SHADOW_OFFSET
) << KASAN_SHADOW_SCALE_SHIFT
) + XKPRANGE_CC_START
);
105 * Alloc memory for shadow memory page table.
107 static phys_addr_t __init
kasan_alloc_zeroed_page(int node
)
109 void *p
= memblock_alloc_try_nid(PAGE_SIZE
, PAGE_SIZE
,
110 __pa(MAX_DMA_ADDRESS
), MEMBLOCK_ALLOC_ACCESSIBLE
, node
);
112 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
113 __func__
, PAGE_SIZE
, PAGE_SIZE
, node
, __pa(MAX_DMA_ADDRESS
));
118 static pte_t
*__init
kasan_pte_offset(pmd_t
*pmdp
, unsigned long addr
, int node
, bool early
)
120 if (__pmd_none(early
, pmdp_get(pmdp
))) {
121 phys_addr_t pte_phys
= early
?
122 __pa_symbol(kasan_early_shadow_pte
) : kasan_alloc_zeroed_page(node
);
124 memcpy(__va(pte_phys
), kasan_early_shadow_pte
, sizeof(kasan_early_shadow_pte
));
125 pmd_populate_kernel(NULL
, pmdp
, (pte_t
*)__va(pte_phys
));
128 return pte_offset_kernel(pmdp
, addr
);
131 static pmd_t
*__init
kasan_pmd_offset(pud_t
*pudp
, unsigned long addr
, int node
, bool early
)
133 if (__pud_none(early
, pudp_get(pudp
))) {
134 phys_addr_t pmd_phys
= early
?
135 __pa_symbol(kasan_early_shadow_pmd
) : kasan_alloc_zeroed_page(node
);
137 memcpy(__va(pmd_phys
), kasan_early_shadow_pmd
, sizeof(kasan_early_shadow_pmd
));
138 pud_populate(&init_mm
, pudp
, (pmd_t
*)__va(pmd_phys
));
141 return pmd_offset(pudp
, addr
);
144 static pud_t
*__init
kasan_pud_offset(p4d_t
*p4dp
, unsigned long addr
, int node
, bool early
)
146 if (__p4d_none(early
, p4dp_get(p4dp
))) {
147 phys_addr_t pud_phys
= early
?
148 __pa_symbol(kasan_early_shadow_pud
) : kasan_alloc_zeroed_page(node
);
150 memcpy(__va(pud_phys
), kasan_early_shadow_pud
, sizeof(kasan_early_shadow_pud
));
151 p4d_populate(&init_mm
, p4dp
, (pud_t
*)__va(pud_phys
));
154 return pud_offset(p4dp
, addr
);
157 static p4d_t
*__init
kasan_p4d_offset(pgd_t
*pgdp
, unsigned long addr
, int node
, bool early
)
159 if (__pgd_none(early
, pgdp_get(pgdp
))) {
160 phys_addr_t p4d_phys
= early
?
161 __pa_symbol(kasan_early_shadow_p4d
) : kasan_alloc_zeroed_page(node
);
163 memcpy(__va(p4d_phys
), kasan_early_shadow_p4d
, sizeof(kasan_early_shadow_p4d
));
164 pgd_populate(&init_mm
, pgdp
, (p4d_t
*)__va(p4d_phys
));
167 return p4d_offset(pgdp
, addr
);
170 static void __init
kasan_pte_populate(pmd_t
*pmdp
, unsigned long addr
,
171 unsigned long end
, int node
, bool early
)
174 pte_t
*ptep
= kasan_pte_offset(pmdp
, addr
, node
, early
);
177 phys_addr_t page_phys
= early
?
178 __pa_symbol(kasan_early_shadow_page
)
179 : kasan_alloc_zeroed_page(node
);
180 next
= addr
+ PAGE_SIZE
;
181 set_pte(ptep
, pfn_pte(__phys_to_pfn(page_phys
), PAGE_KERNEL
));
182 } while (ptep
++, addr
= next
, addr
!= end
&& __pte_none(early
, ptep_get(ptep
)));
185 static void __init
kasan_pmd_populate(pud_t
*pudp
, unsigned long addr
,
186 unsigned long end
, int node
, bool early
)
189 pmd_t
*pmdp
= kasan_pmd_offset(pudp
, addr
, node
, early
);
192 next
= pmd_addr_end(addr
, end
);
193 kasan_pte_populate(pmdp
, addr
, next
, node
, early
);
194 } while (pmdp
++, addr
= next
, addr
!= end
&& __pmd_none(early
, pmdp_get(pmdp
)));
197 static void __init
kasan_pud_populate(p4d_t
*p4dp
, unsigned long addr
,
198 unsigned long end
, int node
, bool early
)
201 pud_t
*pudp
= kasan_pud_offset(p4dp
, addr
, node
, early
);
204 next
= pud_addr_end(addr
, end
);
205 kasan_pmd_populate(pudp
, addr
, next
, node
, early
);
206 } while (pudp
++, addr
= next
, addr
!= end
&& __pud_none(early
, READ_ONCE(*pudp
)));
209 static void __init
kasan_p4d_populate(pgd_t
*pgdp
, unsigned long addr
,
210 unsigned long end
, int node
, bool early
)
213 p4d_t
*p4dp
= kasan_p4d_offset(pgdp
, addr
, node
, early
);
216 next
= p4d_addr_end(addr
, end
);
217 kasan_pud_populate(p4dp
, addr
, next
, node
, early
);
218 } while (p4dp
++, addr
= next
, addr
!= end
&& __p4d_none(early
, READ_ONCE(*p4dp
)));
221 static void __init
kasan_pgd_populate(unsigned long addr
, unsigned long end
,
222 int node
, bool early
)
227 pgdp
= pgd_offset_k(addr
);
230 next
= pgd_addr_end(addr
, end
);
231 kasan_p4d_populate(pgdp
, addr
, next
, node
, early
);
232 } while (pgdp
++, addr
= next
, addr
!= end
);
236 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
237 static void __init
kasan_map_populate(unsigned long start
, unsigned long end
,
240 kasan_pgd_populate(start
& PAGE_MASK
, PAGE_ALIGN(end
), node
, false);
243 asmlinkage
void __init
kasan_early_init(void)
245 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, PGDIR_SIZE
));
246 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
+ 1, PGDIR_SIZE
));
249 static inline void kasan_set_pgd(pgd_t
*pgdp
, pgd_t pgdval
)
251 WRITE_ONCE(*pgdp
, pgdval
);
254 static void __init
clear_pgds(unsigned long start
, unsigned long end
)
257 * Remove references to kasan page tables from
258 * swapper_pg_dir. pgd_clear() can't be used
259 * here because it's nop on 2,3-level pagetable setups
261 for (; start
< end
; start
= pgd_addr_end(start
, end
))
262 kasan_set_pgd((pgd_t
*)pgd_offset_k(start
), __pgd(0));
265 void __init
kasan_init(void)
268 phys_addr_t pa_start
, pa_end
;
271 * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
272 * overflow UINTPTR_MAX and then looks like a user space address.
273 * For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
274 * large for Loongson-2K series whose cpu_vabits = 39.
276 if (KASAN_SHADOW_END
< vm_map_base
) {
277 pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
282 * PGD was populated as invalid_pmd_table or invalid_pud_table
283 * in pagetable_init() which depends on how many levels of page
284 * table you are using, but we had to clean the gpd of kasan
285 * shadow memory, as the pgd value is none-zero.
286 * The assertion pgd_none is going to be false and the formal populate
287 * afterwards is not going to create any new pgd at all.
289 memcpy(kasan_pg_dir
, swapper_pg_dir
, sizeof(kasan_pg_dir
));
290 csr_write64(__pa_symbol(kasan_pg_dir
), LOONGARCH_CSR_PGDH
);
291 local_flush_tlb_all();
293 clear_pgds(KASAN_SHADOW_START
, KASAN_SHADOW_END
);
295 /* Maps everything to a single page of zeroes */
296 kasan_pgd_populate(KASAN_SHADOW_START
, KASAN_SHADOW_END
, NUMA_NO_NODE
, true);
298 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START
),
299 kasan_mem_to_shadow((void *)KFENCE_AREA_END
));
301 kasan_early_stage
= false;
303 /* Populate the linear mapping */
304 for_each_mem_range(i
, &pa_start
, &pa_end
) {
305 void *start
= (void *)phys_to_virt(pa_start
);
306 void *end
= (void *)phys_to_virt(pa_end
);
311 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start
),
312 (unsigned long)kasan_mem_to_shadow(end
), NUMA_NO_NODE
);
315 /* Populate modules mapping */
316 kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR
),
317 (unsigned long)kasan_mem_to_shadow((void *)MODULES_END
), NUMA_NO_NODE
);
319 * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
320 * should make sure that it maps the zero page read-only.
322 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
323 set_pte(&kasan_early_shadow_pte
[i
],
324 pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page
)), PAGE_KERNEL_RO
));
326 memset(kasan_early_shadow_page
, 0, PAGE_SIZE
);
327 csr_write64(__pa_symbol(swapper_pg_dir
), LOONGARCH_CSR_PGDH
);
328 local_flush_tlb_all();
330 /* At this point kasan is fully initialized. Enable error messages */
331 init_task
.kasan_depth
= 0;
332 pr_info("KernelAddressSanitizer initialized.\n");