x86/asm/entry/64: Use a define for an invalid segment selector
[linux/fpc-iii.git] / mm / mprotect.c
blob44727811bf4cf62e3579261ee9699a37fab78b3d
1 /*
2 * mm/mprotect.c
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/fs.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <linux/ksm.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
33 * For a prot_numa update we only hold mmap_sem for read so there is a
34 * potential race with faulting where a pmd was temporarily none. This
35 * function checks for a transhuge pmd under the appropriate lock. It
36 * returns a pte if it was successfully locked or NULL if it raced with
37 * a transhuge insertion.
39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
40 unsigned long addr, int prot_numa, spinlock_t **ptl)
42 pte_t *pte;
43 spinlock_t *pmdl;
45 /* !prot_numa is protected by mmap_sem held for write */
46 if (!prot_numa)
47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
49 pmdl = pmd_lock(vma->vm_mm, pmd);
50 if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
51 spin_unlock(pmdl);
52 return NULL;
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
56 spin_unlock(pmdl);
57 return pte;
60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
61 unsigned long addr, unsigned long end, pgprot_t newprot,
62 int dirty_accountable, int prot_numa)
64 struct mm_struct *mm = vma->vm_mm;
65 pte_t *pte, oldpte;
66 spinlock_t *ptl;
67 unsigned long pages = 0;
69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
70 if (!pte)
71 return 0;
73 arch_enter_lazy_mmu_mode();
74 do {
75 oldpte = *pte;
76 if (pte_present(oldpte)) {
77 pte_t ptent;
80 * Avoid trapping faults against the zero or KSM
81 * pages. See similar comment in change_huge_pmd.
83 if (prot_numa) {
84 struct page *page;
86 page = vm_normal_page(vma, addr, oldpte);
87 if (!page || PageKsm(page))
88 continue;
90 /* Avoid TLB flush if possible */
91 if (pte_protnone(oldpte))
92 continue;
95 ptent = ptep_modify_prot_start(mm, addr, pte);
96 ptent = pte_modify(ptent, newprot);
98 /* Avoid taking write faults for known dirty pages */
99 if (dirty_accountable && pte_dirty(ptent) &&
100 (pte_soft_dirty(ptent) ||
101 !(vma->vm_flags & VM_SOFTDIRTY))) {
102 ptent = pte_mkwrite(ptent);
104 ptep_modify_prot_commit(mm, addr, pte, ptent);
105 pages++;
106 } else if (IS_ENABLED(CONFIG_MIGRATION)) {
107 swp_entry_t entry = pte_to_swp_entry(oldpte);
109 if (is_write_migration_entry(entry)) {
110 pte_t newpte;
112 * A protection check is difficult so
113 * just be safe and disable write
115 make_migration_entry_read(&entry);
116 newpte = swp_entry_to_pte(entry);
117 if (pte_swp_soft_dirty(oldpte))
118 newpte = pte_swp_mksoft_dirty(newpte);
119 set_pte_at(mm, addr, pte, newpte);
121 pages++;
124 } while (pte++, addr += PAGE_SIZE, addr != end);
125 arch_leave_lazy_mmu_mode();
126 pte_unmap_unlock(pte - 1, ptl);
128 return pages;
131 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
132 pud_t *pud, unsigned long addr, unsigned long end,
133 pgprot_t newprot, int dirty_accountable, int prot_numa)
135 pmd_t *pmd;
136 struct mm_struct *mm = vma->vm_mm;
137 unsigned long next;
138 unsigned long pages = 0;
139 unsigned long nr_huge_updates = 0;
140 unsigned long mni_start = 0;
142 pmd = pmd_offset(pud, addr);
143 do {
144 unsigned long this_pages;
146 next = pmd_addr_end(addr, end);
147 if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
148 continue;
150 /* invoke the mmu notifier if the pmd is populated */
151 if (!mni_start) {
152 mni_start = addr;
153 mmu_notifier_invalidate_range_start(mm, mni_start, end);
156 if (pmd_trans_huge(*pmd)) {
157 if (next - addr != HPAGE_PMD_SIZE)
158 split_huge_page_pmd(vma, addr, pmd);
159 else {
160 int nr_ptes = change_huge_pmd(vma, pmd, addr,
161 newprot, prot_numa);
163 if (nr_ptes) {
164 if (nr_ptes == HPAGE_PMD_NR) {
165 pages += HPAGE_PMD_NR;
166 nr_huge_updates++;
169 /* huge pmd was handled */
170 continue;
173 /* fall through, the trans huge pmd just split */
175 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
176 dirty_accountable, prot_numa);
177 pages += this_pages;
178 } while (pmd++, addr = next, addr != end);
180 if (mni_start)
181 mmu_notifier_invalidate_range_end(mm, mni_start, end);
183 if (nr_huge_updates)
184 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
185 return pages;
188 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
189 pgd_t *pgd, unsigned long addr, unsigned long end,
190 pgprot_t newprot, int dirty_accountable, int prot_numa)
192 pud_t *pud;
193 unsigned long next;
194 unsigned long pages = 0;
196 pud = pud_offset(pgd, addr);
197 do {
198 next = pud_addr_end(addr, end);
199 if (pud_none_or_clear_bad(pud))
200 continue;
201 pages += change_pmd_range(vma, pud, addr, next, newprot,
202 dirty_accountable, prot_numa);
203 } while (pud++, addr = next, addr != end);
205 return pages;
208 static unsigned long change_protection_range(struct vm_area_struct *vma,
209 unsigned long addr, unsigned long end, pgprot_t newprot,
210 int dirty_accountable, int prot_numa)
212 struct mm_struct *mm = vma->vm_mm;
213 pgd_t *pgd;
214 unsigned long next;
215 unsigned long start = addr;
216 unsigned long pages = 0;
218 BUG_ON(addr >= end);
219 pgd = pgd_offset(mm, addr);
220 flush_cache_range(vma, addr, end);
221 set_tlb_flush_pending(mm);
222 do {
223 next = pgd_addr_end(addr, end);
224 if (pgd_none_or_clear_bad(pgd))
225 continue;
226 pages += change_pud_range(vma, pgd, addr, next, newprot,
227 dirty_accountable, prot_numa);
228 } while (pgd++, addr = next, addr != end);
230 /* Only flush the TLB if we actually modified any entries: */
231 if (pages)
232 flush_tlb_range(vma, start, end);
233 clear_tlb_flush_pending(mm);
235 return pages;
238 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
239 unsigned long end, pgprot_t newprot,
240 int dirty_accountable, int prot_numa)
242 unsigned long pages;
244 if (is_vm_hugetlb_page(vma))
245 pages = hugetlb_change_protection(vma, start, end, newprot);
246 else
247 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
249 return pages;
253 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
254 unsigned long start, unsigned long end, unsigned long newflags)
256 struct mm_struct *mm = vma->vm_mm;
257 unsigned long oldflags = vma->vm_flags;
258 long nrpages = (end - start) >> PAGE_SHIFT;
259 unsigned long charged = 0;
260 pgoff_t pgoff;
261 int error;
262 int dirty_accountable = 0;
264 if (newflags == oldflags) {
265 *pprev = vma;
266 return 0;
270 * If we make a private mapping writable we increase our commit;
271 * but (without finer accounting) cannot reduce our commit if we
272 * make it unwritable again. hugetlb mapping were accounted for
273 * even if read-only so there is no need to account for them here
275 if (newflags & VM_WRITE) {
276 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
277 VM_SHARED|VM_NORESERVE))) {
278 charged = nrpages;
279 if (security_vm_enough_memory_mm(mm, charged))
280 return -ENOMEM;
281 newflags |= VM_ACCOUNT;
286 * First try to merge with previous and/or next vma.
288 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
289 *pprev = vma_merge(mm, *pprev, start, end, newflags,
290 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
291 if (*pprev) {
292 vma = *pprev;
293 goto success;
296 *pprev = vma;
298 if (start != vma->vm_start) {
299 error = split_vma(mm, vma, start, 1);
300 if (error)
301 goto fail;
304 if (end != vma->vm_end) {
305 error = split_vma(mm, vma, end, 0);
306 if (error)
307 goto fail;
310 success:
312 * vm_flags and vm_page_prot are protected by the mmap_sem
313 * held in write mode.
315 vma->vm_flags = newflags;
316 dirty_accountable = vma_wants_writenotify(vma);
317 vma_set_page_prot(vma);
319 change_protection(vma, start, end, vma->vm_page_prot,
320 dirty_accountable, 0);
322 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
323 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
324 perf_event_mmap(vma);
325 return 0;
327 fail:
328 vm_unacct_memory(charged);
329 return error;
332 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
333 unsigned long, prot)
335 unsigned long vm_flags, nstart, end, tmp, reqprot;
336 struct vm_area_struct *vma, *prev;
337 int error = -EINVAL;
338 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
339 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
340 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
341 return -EINVAL;
343 if (start & ~PAGE_MASK)
344 return -EINVAL;
345 if (!len)
346 return 0;
347 len = PAGE_ALIGN(len);
348 end = start + len;
349 if (end <= start)
350 return -ENOMEM;
351 if (!arch_validate_prot(prot))
352 return -EINVAL;
354 reqprot = prot;
356 * Does the application expect PROT_READ to imply PROT_EXEC:
358 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
359 prot |= PROT_EXEC;
361 vm_flags = calc_vm_prot_bits(prot);
363 down_write(&current->mm->mmap_sem);
365 vma = find_vma(current->mm, start);
366 error = -ENOMEM;
367 if (!vma)
368 goto out;
369 prev = vma->vm_prev;
370 if (unlikely(grows & PROT_GROWSDOWN)) {
371 if (vma->vm_start >= end)
372 goto out;
373 start = vma->vm_start;
374 error = -EINVAL;
375 if (!(vma->vm_flags & VM_GROWSDOWN))
376 goto out;
377 } else {
378 if (vma->vm_start > start)
379 goto out;
380 if (unlikely(grows & PROT_GROWSUP)) {
381 end = vma->vm_end;
382 error = -EINVAL;
383 if (!(vma->vm_flags & VM_GROWSUP))
384 goto out;
387 if (start > vma->vm_start)
388 prev = vma;
390 for (nstart = start ; ; ) {
391 unsigned long newflags;
393 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
395 newflags = vm_flags;
396 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
398 /* newflags >> 4 shift VM_MAY% in place of VM_% */
399 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
400 error = -EACCES;
401 goto out;
404 error = security_file_mprotect(vma, reqprot, prot);
405 if (error)
406 goto out;
408 tmp = vma->vm_end;
409 if (tmp > end)
410 tmp = end;
411 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
412 if (error)
413 goto out;
414 nstart = tmp;
416 if (nstart < prev->vm_end)
417 nstart = prev->vm_end;
418 if (nstart >= end)
419 goto out;
421 vma = prev->vm_next;
422 if (!vma || vma->vm_start != nstart) {
423 error = -ENOMEM;
424 goto out;
427 out:
428 up_write(&current->mm->mmap_sem);
429 return error;