mm/hmm.c: remove superfluous RCU protection around radix tree lookup
[linux/fpc-iii.git] / arch / arm64 / mm / pageattr.c
bloba56359373d8b3592e6cde6891d9b44206bd96137
1 /*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/vmalloc.h>
19 #include <asm/pgtable.h>
20 #include <asm/set_memory.h>
21 #include <asm/tlbflush.h>
23 struct page_change_data {
24 pgprot_t set_mask;
25 pgprot_t clear_mask;
28 static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
29 void *data)
31 struct page_change_data *cdata = data;
32 pte_t pte = READ_ONCE(*ptep);
34 pte = clear_pte_bit(pte, cdata->clear_mask);
35 pte = set_pte_bit(pte, cdata->set_mask);
37 set_pte(ptep, pte);
38 return 0;
42 * This function assumes that the range is mapped with PAGE_SIZE pages.
44 static int __change_memory_common(unsigned long start, unsigned long size,
45 pgprot_t set_mask, pgprot_t clear_mask)
47 struct page_change_data data;
48 int ret;
50 data.set_mask = set_mask;
51 data.clear_mask = clear_mask;
53 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
54 &data);
56 flush_tlb_kernel_range(start, start + size);
57 return ret;
60 static int change_memory_common(unsigned long addr, int numpages,
61 pgprot_t set_mask, pgprot_t clear_mask)
63 unsigned long start = addr;
64 unsigned long size = PAGE_SIZE*numpages;
65 unsigned long end = start + size;
66 struct vm_struct *area;
68 if (!PAGE_ALIGNED(addr)) {
69 start &= PAGE_MASK;
70 end = start + size;
71 WARN_ON_ONCE(1);
75 * Kernel VA mappings are always live, and splitting live section
76 * mappings into page mappings may cause TLB conflicts. This means
77 * we have to ensure that changing the permission bits of the range
78 * we are operating on does not result in such splitting.
80 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
81 * Those are guaranteed to consist entirely of page mappings, and
82 * splitting is never needed.
84 * So check whether the [addr, addr + size) interval is entirely
85 * covered by precisely one VM area that has the VM_ALLOC flag set.
87 area = find_vm_area((void *)addr);
88 if (!area ||
89 end > (unsigned long)area->addr + area->size ||
90 !(area->flags & VM_ALLOC))
91 return -EINVAL;
93 if (!numpages)
94 return 0;
96 return __change_memory_common(start, size, set_mask, clear_mask);
99 int set_memory_ro(unsigned long addr, int numpages)
101 return change_memory_common(addr, numpages,
102 __pgprot(PTE_RDONLY),
103 __pgprot(PTE_WRITE));
106 int set_memory_rw(unsigned long addr, int numpages)
108 return change_memory_common(addr, numpages,
109 __pgprot(PTE_WRITE),
110 __pgprot(PTE_RDONLY));
113 int set_memory_nx(unsigned long addr, int numpages)
115 return change_memory_common(addr, numpages,
116 __pgprot(PTE_PXN),
117 __pgprot(0));
119 EXPORT_SYMBOL_GPL(set_memory_nx);
121 int set_memory_x(unsigned long addr, int numpages)
123 return change_memory_common(addr, numpages,
124 __pgprot(0),
125 __pgprot(PTE_PXN));
127 EXPORT_SYMBOL_GPL(set_memory_x);
129 int set_memory_valid(unsigned long addr, int numpages, int enable)
131 if (enable)
132 return __change_memory_common(addr, PAGE_SIZE * numpages,
133 __pgprot(PTE_VALID),
134 __pgprot(0));
135 else
136 return __change_memory_common(addr, PAGE_SIZE * numpages,
137 __pgprot(0),
138 __pgprot(PTE_VALID));
141 #ifdef CONFIG_DEBUG_PAGEALLOC
142 void __kernel_map_pages(struct page *page, int numpages, int enable)
144 set_memory_valid((unsigned long)page_address(page), numpages, enable);
146 #ifdef CONFIG_HIBERNATION
148 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
149 * is used to determine if a linear map page has been marked as not-valid by
150 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
151 * This is based on kern_addr_valid(), which almost does what we need.
153 * Because this is only called on the kernel linear map, p?d_sect() implies
154 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
155 * disabled.
157 bool kernel_page_present(struct page *page)
159 pgd_t *pgdp;
160 pud_t *pudp, pud;
161 pmd_t *pmdp, pmd;
162 pte_t *ptep;
163 unsigned long addr = (unsigned long)page_address(page);
165 pgdp = pgd_offset_k(addr);
166 if (pgd_none(READ_ONCE(*pgdp)))
167 return false;
169 pudp = pud_offset(pgdp, addr);
170 pud = READ_ONCE(*pudp);
171 if (pud_none(pud))
172 return false;
173 if (pud_sect(pud))
174 return true;
176 pmdp = pmd_offset(pudp, addr);
177 pmd = READ_ONCE(*pmdp);
178 if (pmd_none(pmd))
179 return false;
180 if (pmd_sect(pmd))
181 return true;
183 ptep = pte_offset_kernel(pmdp, addr);
184 return pte_valid(READ_ONCE(*ptep));
186 #endif /* CONFIG_HIBERNATION */
187 #endif /* CONFIG_DEBUG_PAGEALLOC */