accel/ivpu: Move recovery work to system_unbound_wq
[drm/drm-misc.git] / arch / arm / include / asm / kfence.h
blob7980d0f2271f7e036c599885c8292e39ccfb3de3
1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef __ASM_ARM_KFENCE_H
4 #define __ASM_ARM_KFENCE_H
6 #include <linux/kfence.h>
8 #include <asm/pgalloc.h>
9 #include <asm/set_memory.h>
11 static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
13 int i;
14 unsigned long pfn = PFN_DOWN(__pa(addr));
15 pte_t *pte = pte_alloc_one_kernel(&init_mm);
17 if (!pte)
18 return -ENOMEM;
20 for (i = 0; i < PTRS_PER_PTE; i++)
21 set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
22 pmd_populate_kernel(&init_mm, pmd, pte);
24 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
25 return 0;
28 static inline bool arch_kfence_init_pool(void)
30 unsigned long addr;
31 pmd_t *pmd;
33 for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
34 addr += PAGE_SIZE) {
35 pmd = pmd_off_k(addr);
37 if (pmd_leaf(*pmd)) {
38 if (split_pmd_page(pmd, addr & PMD_MASK))
39 return false;
43 return true;
46 static inline bool kfence_protect_page(unsigned long addr, bool protect)
48 set_memory_valid(addr, 1, !protect);
50 return true;
53 #endif /* __ASM_ARM_KFENCE_H */