2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/vmalloc.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
22 struct page_change_data
{
27 static int change_page_range(pte_t
*ptep
, pgtable_t token
, unsigned long addr
,
30 struct page_change_data
*cdata
= data
;
33 pte
= clear_pte_bit(pte
, cdata
->clear_mask
);
34 pte
= set_pte_bit(pte
, cdata
->set_mask
);
41 * This function assumes that the range is mapped with PAGE_SIZE pages.
43 static int __change_memory_common(unsigned long start
, unsigned long size
,
44 pgprot_t set_mask
, pgprot_t clear_mask
)
46 struct page_change_data data
;
49 data
.set_mask
= set_mask
;
50 data
.clear_mask
= clear_mask
;
52 ret
= apply_to_page_range(&init_mm
, start
, size
, change_page_range
,
55 flush_tlb_kernel_range(start
, start
+ size
);
59 static int change_memory_common(unsigned long addr
, int numpages
,
60 pgprot_t set_mask
, pgprot_t clear_mask
)
62 unsigned long start
= addr
;
63 unsigned long size
= PAGE_SIZE
*numpages
;
64 unsigned long end
= start
+ size
;
65 struct vm_struct
*area
;
67 if (!PAGE_ALIGNED(addr
)) {
74 * Kernel VA mappings are always live, and splitting live section
75 * mappings into page mappings may cause TLB conflicts. This means
76 * we have to ensure that changing the permission bits of the range
77 * we are operating on does not result in such splitting.
79 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
80 * Those are guaranteed to consist entirely of page mappings, and
81 * splitting is never needed.
83 * So check whether the [addr, addr + size) interval is entirely
84 * covered by precisely one VM area that has the VM_ALLOC flag set.
86 area
= find_vm_area((void *)addr
);
88 end
> (unsigned long)area
->addr
+ area
->size
||
89 !(area
->flags
& VM_ALLOC
))
95 return __change_memory_common(start
, size
, set_mask
, clear_mask
);
98 int set_memory_ro(unsigned long addr
, int numpages
)
100 return change_memory_common(addr
, numpages
,
101 __pgprot(PTE_RDONLY
),
102 __pgprot(PTE_WRITE
));
105 int set_memory_rw(unsigned long addr
, int numpages
)
107 return change_memory_common(addr
, numpages
,
109 __pgprot(PTE_RDONLY
));
112 int set_memory_nx(unsigned long addr
, int numpages
)
114 return change_memory_common(addr
, numpages
,
118 EXPORT_SYMBOL_GPL(set_memory_nx
);
120 int set_memory_x(unsigned long addr
, int numpages
)
122 return change_memory_common(addr
, numpages
,
126 EXPORT_SYMBOL_GPL(set_memory_x
);
128 #ifdef CONFIG_DEBUG_PAGEALLOC
129 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
131 unsigned long addr
= (unsigned long) page_address(page
);
134 __change_memory_common(addr
, PAGE_SIZE
* numpages
,
138 __change_memory_common(addr
, PAGE_SIZE
* numpages
,
140 __pgprot(PTE_VALID
));
142 #ifdef CONFIG_HIBERNATION
144 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
145 * is used to determine if a linear map page has been marked as not-valid by
146 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
147 * This is based on kern_addr_valid(), which almost does what we need.
149 * Because this is only called on the kernel linear map, p?d_sect() implies
150 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
153 bool kernel_page_present(struct page
*page
)
159 unsigned long addr
= (unsigned long)page_address(page
);
161 pgd
= pgd_offset_k(addr
);
165 pud
= pud_offset(pgd
, addr
);
171 pmd
= pmd_offset(pud
, addr
);
177 pte
= pte_offset_kernel(pmd
, addr
);
178 return pte_valid(*pte
);
180 #endif /* CONFIG_HIBERNATION */
181 #endif /* CONFIG_DEBUG_PAGEALLOC */