2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/vmalloc.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
22 struct page_change_data
{
27 static int change_page_range(pte_t
*ptep
, pgtable_t token
, unsigned long addr
,
30 struct page_change_data
*cdata
= data
;
33 pte
= clear_pte_bit(pte
, cdata
->clear_mask
);
34 pte
= set_pte_bit(pte
, cdata
->set_mask
);
40 static int change_memory_common(unsigned long addr
, int numpages
,
41 pgprot_t set_mask
, pgprot_t clear_mask
)
43 unsigned long start
= addr
;
44 unsigned long size
= PAGE_SIZE
*numpages
;
45 unsigned long end
= start
+ size
;
47 struct page_change_data data
;
48 struct vm_struct
*area
;
50 if (!PAGE_ALIGNED(addr
)) {
57 * Kernel VA mappings are always live, and splitting live section
58 * mappings into page mappings may cause TLB conflicts. This means
59 * we have to ensure that changing the permission bits of the range
60 * we are operating on does not result in such splitting.
62 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
63 * Those are guaranteed to consist entirely of page mappings, and
64 * splitting is never needed.
66 * So check whether the [addr, addr + size) interval is entirely
67 * covered by precisely one VM area that has the VM_ALLOC flag set.
69 area
= find_vm_area((void *)addr
);
71 end
> (unsigned long)area
->addr
+ area
->size
||
72 !(area
->flags
& VM_ALLOC
))
78 data
.set_mask
= set_mask
;
79 data
.clear_mask
= clear_mask
;
81 ret
= apply_to_page_range(&init_mm
, start
, size
, change_page_range
,
84 flush_tlb_kernel_range(start
, end
);
88 int set_memory_ro(unsigned long addr
, int numpages
)
90 return change_memory_common(addr
, numpages
,
95 int set_memory_rw(unsigned long addr
, int numpages
)
97 return change_memory_common(addr
, numpages
,
99 __pgprot(PTE_RDONLY
));
102 int set_memory_nx(unsigned long addr
, int numpages
)
104 return change_memory_common(addr
, numpages
,
108 EXPORT_SYMBOL_GPL(set_memory_nx
);
110 int set_memory_x(unsigned long addr
, int numpages
)
112 return change_memory_common(addr
, numpages
,
116 EXPORT_SYMBOL_GPL(set_memory_x
);