1 // SPDX-License-Identifier: GPL-2.0
4 * MMU-generic set_memory implementation for powerpc
6 * Copyright 2019-2021, IBM Corporation.
10 #include <linux/vmalloc.h>
11 #include <linux/set_memory.h>
15 #include <asm/pgtable.h>
17 #include <mm/mmu_decl.h>
19 static pte_basic_t
pte_update_delta(pte_t
*ptep
, unsigned long addr
,
20 unsigned long old
, unsigned long new)
22 return pte_update(&init_mm
, addr
, ptep
, old
& ~new, new & ~old
, 0);
26 * Updates the attributes of a page atomically.
28 * This sequence is safe against concurrent updates, and also allows updating the
29 * attributes of a page currently being executed or accessed.
31 static int change_page_attr(pte_t
*ptep
, unsigned long addr
, void *data
)
33 long action
= (long)data
;
36 /* modify the PTE bits as desired */
39 /* Don't clear DIRTY bit */
40 pte_update_delta(ptep
, addr
, _PAGE_KERNEL_RW
& ~_PAGE_DIRTY
, _PAGE_KERNEL_RO
);
43 /* Don't clear DIRTY bit */
44 pte_update_delta(ptep
, addr
, _PAGE_KERNEL_RW
& ~_PAGE_DIRTY
, _PAGE_KERNEL_ROX
);
47 pte_update_delta(ptep
, addr
, _PAGE_KERNEL_RO
, _PAGE_KERNEL_RW
);
50 pte_update_delta(ptep
, addr
, _PAGE_KERNEL_ROX
, _PAGE_KERNEL_RO
);
53 pte_update_delta(ptep
, addr
, _PAGE_KERNEL_RO
, _PAGE_KERNEL_ROX
);
56 pte_update(&init_mm
, addr
, ptep
, _PAGE_PRESENT
, 0, 0);
59 pte_update(&init_mm
, addr
, ptep
, 0, _PAGE_PRESENT
, 0);
66 /* See ptesync comment in radix__set_pte_at() */
68 asm volatile("ptesync": : :"memory");
70 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
);
75 int change_memory_attr(unsigned long addr
, int numpages
, long action
)
77 unsigned long start
= ALIGN_DOWN(addr
, PAGE_SIZE
);
78 unsigned long size
= numpages
* PAGE_SIZE
;
83 if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr
) &&
84 is_vm_area_hugepages((void *)addr
)))
87 #ifdef CONFIG_PPC_BOOK3S_64
89 * On hash, the linear mapping is not in the Linux page table so
90 * apply_to_existing_page_range() will have no effect. If in the future
91 * the set_memory_* functions are used on the linear map this will need
94 if (!radix_enabled()) {
95 int region
= get_region_id(addr
);
97 if (WARN_ON_ONCE(region
!= VMALLOC_REGION_ID
&& region
!= IO_REGION_ID
))
102 return apply_to_existing_page_range(&init_mm
, start
, size
,
103 change_page_attr
, (void *)action
);
106 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
107 #ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
108 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
111 unsigned long addr
= (unsigned long)page_address(page
);
113 if (PageHighMem(page
))
116 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64
) && !radix_enabled())
117 err
= hash__kernel_map_pages(page
, numpages
, enable
);
119 err
= set_memory_p(addr
, numpages
);
121 err
= set_memory_np(addr
, numpages
);
124 panic("%s: changing memory protections failed\n", __func__
);