Lynx framebuffers multidomain implementation.
[linux/elbrus.git] / mm / mmap.c
blobcdaa219e86c31a1ed9226d088e00ec383e186744
1 /*
2 * mm/mmap.c
4 * Written by obz.
6 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
7 */
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/backing-dev.h>
12 #include <linux/mm.h>
13 #include <linux/vmacache.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/pagemap.h>
17 #include <linux/swap.h>
18 #include <linux/syscalls.h>
19 #include <linux/capability.h>
20 #include <linux/init.h>
21 #include <linux/file.h>
22 #include <linux/fs.h>
23 #include <linux/personality.h>
24 #include <linux/security.h>
25 #include <linux/hugetlb.h>
26 #include <linux/profile.h>
27 #include <linux/export.h>
28 #include <linux/mount.h>
29 #include <linux/mempolicy.h>
30 #include <linux/rmap.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/perf_event.h>
33 #include <linux/audit.h>
34 #include <linux/khugepaged.h>
35 #include <linux/uprobes.h>
36 #include <linux/rbtree_augmented.h>
37 #include <linux/sched/sysctl.h>
38 #include <linux/notifier.h>
39 #include <linux/memory.h>
41 #include <asm/uaccess.h>
42 #include <asm/cacheflush.h>
43 #include <asm/tlb.h>
44 #include <asm/mmu_context.h>
46 #ifdef CONFIG_E2K
47 #include <asm/process.h>
48 #endif
50 #include "internal.h"
52 #ifndef arch_mmap_check
53 #define arch_mmap_check(addr, len, flags) (0)
54 #endif
56 #ifndef arch_rebalance_pgtables
57 #define arch_rebalance_pgtables(addr, len) (addr)
58 #endif
60 static void unmap_region(struct mm_struct *mm,
61 struct vm_area_struct *vma, struct vm_area_struct *prev,
62 unsigned long start, unsigned long end);
64 /* description of effects of mapping type and prot in current implementation.
65 * this is due to the limited x86 page protection hardware. The expected
66 * behavior is in parens:
68 * map_type prot
69 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
70 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
71 * w: (no) no w: (no) no w: (yes) yes w: (no) no
72 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
74 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
75 * w: (no) no w: (no) no w: (copy) copy w: (no) no
76 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
79 pgprot_t protection_map[16] = {
80 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
81 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
84 pgprot_t vm_get_page_prot(unsigned long vm_flags)
86 return __pgprot(pgprot_val(protection_map[vm_flags &
87 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
88 pgprot_val(arch_vm_get_page_prot(vm_flags)));
90 EXPORT_SYMBOL(vm_get_page_prot);
92 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
93 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
94 unsigned long sysctl_overcommit_kbytes __read_mostly;
95 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
97 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
99 * Make sure vm_committed_as in one cacheline and not cacheline shared with
100 * other variables. It can be updated by several CPUs frequently.
102 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
105 * The global memory commitment made in the system can be a metric
106 * that can be used to drive ballooning decisions when Linux is hosted
107 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
108 * balancing memory across competing virtual machines that are hosted.
109 * Several metrics drive this policy engine including the guest reported
110 * memory commitment.
112 unsigned long vm_memory_committed(void)
114 return percpu_counter_read_positive(&vm_committed_as);
116 EXPORT_SYMBOL_GPL(vm_memory_committed);
119 * Check that a process has enough memory to allocate a new virtual
120 * mapping. 0 means there is enough memory for the allocation to
121 * succeed and -ENOMEM implies there is not.
123 * We currently support three overcommit policies, which are set via the
124 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
126 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
127 * Additional code 2002 Jul 20 by Robert Love.
129 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
131 * Note this is a helper function intended to be used by LSMs which
132 * wish to use this logic.
134 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
136 long free, allowed, reserve;
138 vm_acct_memory(pages);
141 * Sometimes we want to use more memory than we have
143 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
144 return 0;
146 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
147 free = global_page_state(NR_FREE_PAGES);
148 free += global_page_state(NR_FILE_PAGES);
151 * shmem pages shouldn't be counted as free in this
152 * case, they can't be purged, only swapped out, and
153 * that won't affect the overall amount of available
154 * memory in the system.
156 free -= global_page_state(NR_SHMEM);
158 free += get_nr_swap_pages();
161 * Any slabs which are created with the
162 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
163 * which are reclaimable, under pressure. The dentry
164 * cache and most inode caches should fall into this
166 free += global_page_state(NR_SLAB_RECLAIMABLE);
169 * Leave reserved pages. The pages are not for anonymous pages.
171 if (free <= totalreserve_pages)
172 goto error;
173 else
174 free -= totalreserve_pages;
177 * Reserve some for root
179 if (!cap_sys_admin)
180 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
182 if (free > pages)
183 return 0;
185 goto error;
188 allowed = vm_commit_limit();
190 * Reserve some for root
192 if (!cap_sys_admin)
193 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
196 * Don't let a single process grow so big a user can't recover
198 if (mm) {
199 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
200 allowed -= min_t(long, mm->total_vm / 32, reserve);
203 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
204 return 0;
205 error:
206 vm_unacct_memory(pages);
208 return -ENOMEM;
212 * Requires inode->i_mapping->i_mmap_mutex
214 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
215 struct file *file, struct address_space *mapping)
217 if (vma->vm_flags & VM_DENYWRITE)
218 atomic_inc(&file_inode(file)->i_writecount);
219 if (vma->vm_flags & VM_SHARED)
220 mapping->i_mmap_writable--;
222 flush_dcache_mmap_lock(mapping);
223 if (unlikely(vma->vm_flags & VM_NONLINEAR))
224 list_del_init(&vma->shared.nonlinear);
225 else
226 vma_interval_tree_remove(vma, &mapping->i_mmap);
227 flush_dcache_mmap_unlock(mapping);
231 * Unlink a file-based vm structure from its interval tree, to hide
232 * vma from rmap and vmtruncate before freeing its page tables.
234 void unlink_file_vma(struct vm_area_struct *vma)
236 struct file *file = vma->vm_file;
238 if (file) {
239 struct address_space *mapping = file->f_mapping;
240 mutex_lock(&mapping->i_mmap_mutex);
241 __remove_shared_vm_struct(vma, file, mapping);
242 mutex_unlock(&mapping->i_mmap_mutex);
247 * Close a vm structure and free it, returning the next.
249 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
251 struct vm_area_struct *next = vma->vm_next;
253 might_sleep();
254 if (vma->vm_ops && vma->vm_ops->close)
255 vma->vm_ops->close(vma);
256 if (vma->vm_file)
257 fput(vma->vm_file);
258 mpol_put(vma_policy(vma));
259 kmem_cache_free(vm_area_cachep, vma);
260 return next;
263 static unsigned long do_brk(unsigned long addr, unsigned long len);
265 SYSCALL_DEFINE1(brk, unsigned long, brk)
267 unsigned long rlim, retval;
268 unsigned long newbrk, oldbrk;
269 struct mm_struct *mm = current->mm;
270 unsigned long min_brk;
271 bool populate;
273 down_write(&mm->mmap_sem);
275 #ifdef CONFIG_COMPAT_BRK
277 * CONFIG_COMPAT_BRK can still be overridden by setting
278 * randomize_va_space to 2, which will still cause mm->start_brk
279 * to be arbitrarily shifted
281 if (current->brk_randomized)
282 min_brk = mm->start_brk;
283 else
284 min_brk = mm->end_data;
285 #else
286 min_brk = mm->start_brk;
287 #endif
288 if (brk < min_brk)
289 goto out;
292 * Check against rlimit here. If this check is done later after the test
293 * of oldbrk with newbrk then it can escape the test and let the data
294 * segment grow beyond its set limit the in case where the limit is
295 * not page aligned -Ram Gupta
297 rlim = rlimit(RLIMIT_DATA);
298 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
299 (mm->end_data - mm->start_data) > rlim)
300 goto out;
302 newbrk = PAGE_ALIGN(brk);
303 oldbrk = PAGE_ALIGN(mm->brk);
304 if (oldbrk == newbrk)
305 goto set_brk;
307 /* Always allow shrinking brk. */
308 if (brk <= mm->brk) {
309 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
310 goto set_brk;
311 goto out;
314 /* Check against existing mmap mappings. */
315 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
316 goto out;
318 /* Ok, looks good - let it rip. */
319 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
320 goto out;
322 set_brk:
323 mm->brk = brk;
324 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
325 up_write(&mm->mmap_sem);
326 if (populate)
327 mm_populate(oldbrk, newbrk - oldbrk);
328 return brk;
330 out:
331 retval = mm->brk;
332 up_write(&mm->mmap_sem);
333 return retval;
336 static long vma_compute_subtree_gap(struct vm_area_struct *vma)
338 unsigned long max, subtree_gap;
339 max = vma->vm_start;
340 if (vma->vm_prev)
341 max -= vma->vm_prev->vm_end;
342 if (vma->vm_rb.rb_left) {
343 subtree_gap = rb_entry(vma->vm_rb.rb_left,
344 struct vm_area_struct, vm_rb)->rb_subtree_gap;
345 if (subtree_gap > max)
346 max = subtree_gap;
348 if (vma->vm_rb.rb_right) {
349 subtree_gap = rb_entry(vma->vm_rb.rb_right,
350 struct vm_area_struct, vm_rb)->rb_subtree_gap;
351 if (subtree_gap > max)
352 max = subtree_gap;
354 return max;
357 #ifdef CONFIG_DEBUG_VM_RB
358 static int browse_rb(struct rb_root *root)
360 int i = 0, j, bug = 0;
361 struct rb_node *nd, *pn = NULL;
362 unsigned long prev = 0, pend = 0;
364 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
365 struct vm_area_struct *vma;
366 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
367 if (vma->vm_start < prev) {
368 printk("vm_start %lx prev %lx\n", vma->vm_start, prev);
369 bug = 1;
371 if (vma->vm_start < pend) {
372 printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
373 bug = 1;
375 if (vma->vm_start > vma->vm_end) {
376 printk("vm_end %lx < vm_start %lx\n",
377 vma->vm_end, vma->vm_start);
378 bug = 1;
380 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
381 printk("free gap %lx, correct %lx\n",
382 vma->rb_subtree_gap,
383 vma_compute_subtree_gap(vma));
384 bug = 1;
386 i++;
387 pn = nd;
388 prev = vma->vm_start;
389 pend = vma->vm_end;
391 j = 0;
392 for (nd = pn; nd; nd = rb_prev(nd))
393 j++;
394 if (i != j) {
395 printk("backwards %d, forwards %d\n", j, i);
396 bug = 1;
398 return bug ? -1 : i;
401 static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
403 struct rb_node *nd;
405 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
406 struct vm_area_struct *vma;
407 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
408 BUG_ON(vma != ignore &&
409 vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
413 void validate_mm(struct mm_struct *mm)
415 int bug = 0;
416 int i = 0;
417 unsigned long highest_address = 0;
418 struct vm_area_struct *vma = mm->mmap;
419 while (vma) {
420 struct anon_vma_chain *avc;
421 vma_lock_anon_vma(vma);
422 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
423 anon_vma_interval_tree_verify(avc);
424 vma_unlock_anon_vma(vma);
425 highest_address = vma->vm_end;
426 vma = vma->vm_next;
427 i++;
429 if (i != mm->map_count) {
430 printk("map_count %d vm_next %d\n", mm->map_count, i);
431 bug = 1;
433 if (highest_address != mm->highest_vm_end) {
434 printk("mm->highest_vm_end %lx, found %lx\n",
435 mm->highest_vm_end, highest_address);
436 bug = 1;
438 i = browse_rb(&mm->mm_rb);
439 if (i != mm->map_count) {
440 printk("map_count %d rb %d\n", mm->map_count, i);
441 bug = 1;
443 BUG_ON(bug);
445 #else
446 #define validate_mm_rb(root, ignore) do { } while (0)
447 #define validate_mm(mm) do { } while (0)
448 #endif
450 RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
451 unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
454 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
455 * vma->vm_prev->vm_end values changed, without modifying the vma's position
456 * in the rbtree.
458 static void vma_gap_update(struct vm_area_struct *vma)
461 * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
462 * function that does exacltly what we want.
464 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
467 static inline void vma_rb_insert(struct vm_area_struct *vma,
468 struct rb_root *root)
470 /* All rb_subtree_gap values must be consistent prior to insertion */
471 validate_mm_rb(root, NULL);
473 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
476 static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
479 * All rb_subtree_gap values must be consistent prior to erase,
480 * with the possible exception of the vma being erased.
482 validate_mm_rb(root, vma);
485 * Note rb_erase_augmented is a fairly large inline function,
486 * so make sure we instantiate it only once with our desired
487 * augmented rbtree callbacks.
489 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
493 * vma has some anon_vma assigned, and is already inserted on that
494 * anon_vma's interval trees.
496 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
497 * vma must be removed from the anon_vma's interval trees using
498 * anon_vma_interval_tree_pre_update_vma().
500 * After the update, the vma will be reinserted using
501 * anon_vma_interval_tree_post_update_vma().
503 * The entire update must be protected by exclusive mmap_sem and by
504 * the root anon_vma's mutex.
506 static inline void
507 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
509 struct anon_vma_chain *avc;
511 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
512 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
515 static inline void
516 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
518 struct anon_vma_chain *avc;
520 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
521 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
524 static int find_vma_links(struct mm_struct *mm, unsigned long addr,
525 unsigned long end, struct vm_area_struct **pprev,
526 struct rb_node ***rb_link, struct rb_node **rb_parent)
528 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
530 __rb_link = &mm->mm_rb.rb_node;
531 rb_prev = __rb_parent = NULL;
533 while (*__rb_link) {
534 struct vm_area_struct *vma_tmp;
536 __rb_parent = *__rb_link;
537 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
539 if (vma_tmp->vm_end > addr) {
540 /* Fail if an existing vma overlaps the area */
541 if (vma_tmp->vm_start < end)
542 return -ENOMEM;
543 __rb_link = &__rb_parent->rb_left;
544 } else {
545 rb_prev = __rb_parent;
546 __rb_link = &__rb_parent->rb_right;
550 *pprev = NULL;
551 if (rb_prev)
552 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
553 *rb_link = __rb_link;
554 *rb_parent = __rb_parent;
555 return 0;
558 static unsigned long count_vma_pages_range(struct mm_struct *mm,
559 unsigned long addr, unsigned long end)
561 unsigned long nr_pages = 0;
562 struct vm_area_struct *vma;
564 /* Find first overlaping mapping */
565 vma = find_vma_intersection(mm, addr, end);
566 if (!vma)
567 return 0;
569 nr_pages = (min(end, vma->vm_end) -
570 max(addr, vma->vm_start)) >> PAGE_SHIFT;
572 /* Iterate over the rest of the overlaps */
573 for (vma = vma->vm_next; vma; vma = vma->vm_next) {
574 unsigned long overlap_len;
576 if (vma->vm_start > end)
577 break;
579 overlap_len = min(end, vma->vm_end) - vma->vm_start;
580 nr_pages += overlap_len >> PAGE_SHIFT;
583 return nr_pages;
586 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
587 struct rb_node **rb_link, struct rb_node *rb_parent)
589 /* Update tracking information for the gap following the new vma. */
590 if (vma->vm_next)
591 vma_gap_update(vma->vm_next);
592 else
593 mm->highest_vm_end = vma->vm_end;
596 * vma->vm_prev wasn't known when we followed the rbtree to find the
597 * correct insertion point for that vma. As a result, we could not
598 * update the vma vm_rb parents rb_subtree_gap values on the way down.
599 * So, we first insert the vma with a zero rb_subtree_gap value
600 * (to be consistent with what we did on the way down), and then
601 * immediately update the gap to the correct value. Finally we
602 * rebalance the rbtree after all augmented values have been set.
604 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
605 vma->rb_subtree_gap = 0;
606 vma_gap_update(vma);
607 vma_rb_insert(vma, &mm->mm_rb);
610 static void __vma_link_file(struct vm_area_struct *vma)
612 struct file *file;
614 file = vma->vm_file;
615 if (file) {
616 struct address_space *mapping = file->f_mapping;
618 if (vma->vm_flags & VM_DENYWRITE)
619 atomic_dec(&file_inode(file)->i_writecount);
620 if (vma->vm_flags & VM_SHARED)
621 mapping->i_mmap_writable++;
623 flush_dcache_mmap_lock(mapping);
624 if (unlikely(vma->vm_flags & VM_NONLINEAR))
625 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
626 else
627 vma_interval_tree_insert(vma, &mapping->i_mmap);
628 flush_dcache_mmap_unlock(mapping);
632 static void
633 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
634 struct vm_area_struct *prev, struct rb_node **rb_link,
635 struct rb_node *rb_parent)
637 __vma_link_list(mm, vma, prev, rb_parent);
638 __vma_link_rb(mm, vma, rb_link, rb_parent);
641 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
642 struct vm_area_struct *prev, struct rb_node **rb_link,
643 struct rb_node *rb_parent)
645 struct address_space *mapping = NULL;
647 if (vma->vm_file)
648 mapping = vma->vm_file->f_mapping;
650 if (mapping)
651 mutex_lock(&mapping->i_mmap_mutex);
653 __vma_link(mm, vma, prev, rb_link, rb_parent);
654 __vma_link_file(vma);
656 if (mapping)
657 mutex_unlock(&mapping->i_mmap_mutex);
659 mm->map_count++;
660 validate_mm(mm);
664 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
665 * mm's list and rbtree. It has already been inserted into the interval tree.
667 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
669 struct vm_area_struct *prev;
670 struct rb_node **rb_link, *rb_parent;
672 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
673 &prev, &rb_link, &rb_parent))
674 BUG();
675 __vma_link(mm, vma, prev, rb_link, rb_parent);
676 mm->map_count++;
679 static inline void
680 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
681 struct vm_area_struct *prev)
683 struct vm_area_struct *next;
685 vma_rb_erase(vma, &mm->mm_rb);
686 prev->vm_next = next = vma->vm_next;
687 if (next)
688 next->vm_prev = prev;
690 /* Kill the cache */
691 vmacache_invalidate(mm);
695 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
696 * is already present in an i_mmap tree without adjusting the tree.
697 * The following helper function should be used when such adjustments
698 * are necessary. The "insert" vma (if any) is to be inserted
699 * before we drop the necessary locks.
701 int vma_adjust(struct vm_area_struct *vma, unsigned long start,
702 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
704 struct mm_struct *mm = vma->vm_mm;
705 struct vm_area_struct *next = vma->vm_next;
706 struct vm_area_struct *importer = NULL;
707 struct address_space *mapping = NULL;
708 struct rb_root *root = NULL;
709 struct anon_vma *anon_vma = NULL;
710 struct file *file = vma->vm_file;
711 bool start_changed = false, end_changed = false;
712 long adjust_next = 0;
713 int remove_next = 0;
715 if (next && !insert) {
716 struct vm_area_struct *exporter = NULL;
718 if (end >= next->vm_end) {
720 * vma expands, overlapping all the next, and
721 * perhaps the one after too (mprotect case 6).
723 again: remove_next = 1 + (end > next->vm_end);
724 end = next->vm_end;
725 exporter = next;
726 importer = vma;
727 } else if (end > next->vm_start) {
729 * vma expands, overlapping part of the next:
730 * mprotect case 5 shifting the boundary up.
732 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
733 exporter = next;
734 importer = vma;
735 } else if (end < vma->vm_end) {
737 * vma shrinks, and !insert tells it's not
738 * split_vma inserting another: so it must be
739 * mprotect case 4 shifting the boundary down.
741 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
742 exporter = vma;
743 importer = next;
747 * Easily overlooked: when mprotect shifts the boundary,
748 * make sure the expanding vma has anon_vma set if the
749 * shrinking vma had, to cover any anon pages imported.
751 if (exporter && exporter->anon_vma && !importer->anon_vma) {
752 int error;
754 error = anon_vma_clone(importer, exporter);
755 if (error)
756 return error;
757 importer->anon_vma = exporter->anon_vma;
761 if (file) {
762 mapping = file->f_mapping;
763 if (!(vma->vm_flags & VM_NONLINEAR)) {
764 root = &mapping->i_mmap;
765 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
767 if (adjust_next)
768 uprobe_munmap(next, next->vm_start,
769 next->vm_end);
772 mutex_lock(&mapping->i_mmap_mutex);
773 if (insert) {
775 * Put into interval tree now, so instantiated pages
776 * are visible to arm/parisc __flush_dcache_page
777 * throughout; but we cannot insert into address
778 * space until vma start or end is updated.
780 __vma_link_file(insert);
784 vma_adjust_trans_huge(vma, start, end, adjust_next);
786 anon_vma = vma->anon_vma;
787 if (!anon_vma && adjust_next)
788 anon_vma = next->anon_vma;
789 if (anon_vma) {
790 VM_BUG_ON(adjust_next && next->anon_vma &&
791 anon_vma != next->anon_vma);
792 anon_vma_lock_write(anon_vma);
793 anon_vma_interval_tree_pre_update_vma(vma);
794 if (adjust_next)
795 anon_vma_interval_tree_pre_update_vma(next);
798 if (root) {
799 flush_dcache_mmap_lock(mapping);
800 vma_interval_tree_remove(vma, root);
801 if (adjust_next)
802 vma_interval_tree_remove(next, root);
805 if (start != vma->vm_start) {
806 vma->vm_start = start;
807 start_changed = true;
809 if (end != vma->vm_end) {
810 vma->vm_end = end;
811 end_changed = true;
813 vma->vm_pgoff = pgoff;
814 if (adjust_next) {
815 next->vm_start += adjust_next << PAGE_SHIFT;
816 next->vm_pgoff += adjust_next;
819 if (root) {
820 if (adjust_next)
821 vma_interval_tree_insert(next, root);
822 vma_interval_tree_insert(vma, root);
823 flush_dcache_mmap_unlock(mapping);
826 if (remove_next) {
828 * vma_merge has merged next into vma, and needs
829 * us to remove next before dropping the locks.
831 __vma_unlink(mm, next, vma);
832 if (file)
833 __remove_shared_vm_struct(next, file, mapping);
834 } else if (insert) {
836 * split_vma has split insert from vma, and needs
837 * us to insert it before dropping the locks
838 * (it may either follow vma or precede it).
840 __insert_vm_struct(mm, insert);
841 } else {
842 if (start_changed)
843 vma_gap_update(vma);
844 if (end_changed) {
845 if (!next)
846 mm->highest_vm_end = end;
847 else if (!adjust_next)
848 vma_gap_update(next);
852 if (anon_vma) {
853 anon_vma_interval_tree_post_update_vma(vma);
854 if (adjust_next)
855 anon_vma_interval_tree_post_update_vma(next);
856 anon_vma_unlock_write(anon_vma);
858 if (mapping)
859 mutex_unlock(&mapping->i_mmap_mutex);
861 if (root) {
862 uprobe_mmap(vma);
864 if (adjust_next)
865 uprobe_mmap(next);
868 if (remove_next) {
869 if (file) {
870 uprobe_munmap(next, next->vm_start, next->vm_end);
871 fput(file);
873 if (next->anon_vma)
874 anon_vma_merge(vma, next);
875 mm->map_count--;
876 mpol_put(vma_policy(next));
877 kmem_cache_free(vm_area_cachep, next);
879 * In mprotect's case 6 (see comments on vma_merge),
880 * we must remove another next too. It would clutter
881 * up the code too much to do both in one go.
883 next = vma->vm_next;
884 if (remove_next == 2)
885 goto again;
886 else if (next)
887 vma_gap_update(next);
888 else
889 mm->highest_vm_end = end;
891 if (insert && file)
892 uprobe_mmap(insert);
894 validate_mm(mm);
896 return 0;
900 * If the vma has a ->close operation then the driver probably needs to release
901 * per-vma resources, so we don't attempt to merge those.
903 static inline int is_mergeable_vma(struct vm_area_struct *vma,
904 struct file *file, unsigned long vm_flags)
907 * VM_SOFTDIRTY should not prevent from VMA merging, if we
908 * match the flags but dirty bit -- the caller should mark
909 * merged VMA as dirty. If dirty bit won't be excluded from
910 * comparison, we increase pressue on the memory system forcing
911 * the kernel to generate new VMAs when old one could be
912 * extended instead.
914 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
915 return 0;
916 if (vma->vm_file != file)
917 return 0;
918 if (vma->vm_ops && vma->vm_ops->close)
919 return 0;
920 return 1;
923 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
924 struct anon_vma *anon_vma2,
925 struct vm_area_struct *vma)
928 * The list_is_singular() test is to avoid merging VMA cloned from
929 * parents. This can improve scalability caused by anon_vma lock.
931 if ((!anon_vma1 || !anon_vma2) && (!vma ||
932 list_is_singular(&vma->anon_vma_chain)))
933 return 1;
934 return anon_vma1 == anon_vma2;
938 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
939 * in front of (at a lower virtual address and file offset than) the vma.
941 * We cannot merge two vmas if they have differently assigned (non-NULL)
942 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
944 * We don't check here for the merged mmap wrapping around the end of pagecache
945 * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
946 * wrap, nor mmaps which cover the final page at index -1UL.
948 static int
949 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
950 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
952 if (is_mergeable_vma(vma, file, vm_flags) &&
953 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
954 if (vma->vm_pgoff == vm_pgoff)
955 return 1;
957 return 0;
961 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
962 * beyond (at a higher virtual address and file offset than) the vma.
964 * We cannot merge two vmas if they have differently assigned (non-NULL)
965 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
967 static int
968 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
969 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
971 if (is_mergeable_vma(vma, file, vm_flags) &&
972 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
973 pgoff_t vm_pglen;
974 vm_pglen = vma_pages(vma);
975 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
976 return 1;
978 return 0;
982 * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
983 * whether that can be merged with its predecessor or its successor.
984 * Or both (it neatly fills a hole).
986 * In most cases - when called for mmap, brk or mremap - [addr,end) is
987 * certain not to be mapped by the time vma_merge is called; but when
988 * called for mprotect, it is certain to be already mapped (either at
989 * an offset within prev, or at the start of next), and the flags of
990 * this area are about to be changed to vm_flags - and the no-change
991 * case has already been eliminated.
993 * The following mprotect cases have to be considered, where AAAA is
994 * the area passed down from mprotect_fixup, never extending beyond one
995 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
997 * AAAA AAAA AAAA AAAA
998 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX
999 * cannot merge might become might become might become
1000 * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
1001 * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
1002 * mremap move: PPPPNNNNNNNN 8
1003 * AAAA
1004 * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
1005 * might become case 1 below case 2 below case 3 below
1007 * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
1008 * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
1010 struct vm_area_struct *vma_merge(struct mm_struct *mm,
1011 struct vm_area_struct *prev, unsigned long addr,
1012 unsigned long end, unsigned long vm_flags,
1013 struct anon_vma *anon_vma, struct file *file,
1014 pgoff_t pgoff, struct mempolicy *policy)
1016 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1017 struct vm_area_struct *area, *next;
1018 int err;
1021 * We later require that vma->vm_flags == vm_flags,
1022 * so this tests vma->vm_flags & VM_SPECIAL, too.
1024 if (vm_flags & VM_SPECIAL)
1025 return NULL;
1027 if (prev)
1028 next = prev->vm_next;
1029 else
1030 next = mm->mmap;
1031 area = next;
1032 if (next && next->vm_end == end) /* cases 6, 7, 8 */
1033 next = next->vm_next;
1036 * Can it merge with the predecessor?
1038 if (prev && prev->vm_end == addr &&
1039 mpol_equal(vma_policy(prev), policy) &&
1040 can_vma_merge_after(prev, vm_flags,
1041 anon_vma, file, pgoff)) {
1043 * OK, it can. Can we now merge in the successor as well?
1045 if (next && end == next->vm_start &&
1046 mpol_equal(policy, vma_policy(next)) &&
1047 can_vma_merge_before(next, vm_flags,
1048 anon_vma, file, pgoff+pglen) &&
1049 is_mergeable_anon_vma(prev->anon_vma,
1050 next->anon_vma, NULL)) {
1051 /* cases 1, 6 */
1052 err = vma_adjust(prev, prev->vm_start,
1053 next->vm_end, prev->vm_pgoff, NULL);
1054 } else /* cases 2, 5, 7 */
1055 err = vma_adjust(prev, prev->vm_start,
1056 end, prev->vm_pgoff, NULL);
1057 if (err)
1058 return NULL;
1059 khugepaged_enter_vma_merge(prev);
1060 return prev;
1064 * Can this new request be merged in front of next?
1066 if (next && end == next->vm_start &&
1067 mpol_equal(policy, vma_policy(next)) &&
1068 can_vma_merge_before(next, vm_flags,
1069 anon_vma, file, pgoff+pglen)) {
1070 if (prev && addr < prev->vm_end) /* case 4 */
1071 err = vma_adjust(prev, prev->vm_start,
1072 addr, prev->vm_pgoff, NULL);
1073 else /* cases 3, 8 */
1074 err = vma_adjust(area, addr, next->vm_end,
1075 next->vm_pgoff - pglen, NULL);
1076 if (err)
1077 return NULL;
1078 khugepaged_enter_vma_merge(area);
1079 return area;
1082 return NULL;
1086 * Rough compatbility check to quickly see if it's even worth looking
1087 * at sharing an anon_vma.
1089 * They need to have the same vm_file, and the flags can only differ
1090 * in things that mprotect may change.
1092 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1093 * we can merge the two vma's. For example, we refuse to merge a vma if
1094 * there is a vm_ops->close() function, because that indicates that the
1095 * driver is doing some kind of reference counting. But that doesn't
1096 * really matter for the anon_vma sharing case.
1098 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1100 return a->vm_end == b->vm_start &&
1101 mpol_equal(vma_policy(a), vma_policy(b)) &&
1102 a->vm_file == b->vm_file &&
1103 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
1104 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1108 * Do some basic sanity checking to see if we can re-use the anon_vma
1109 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1110 * the same as 'old', the other will be the new one that is trying
1111 * to share the anon_vma.
1113 * NOTE! This runs with mm_sem held for reading, so it is possible that
1114 * the anon_vma of 'old' is concurrently in the process of being set up
1115 * by another page fault trying to merge _that_. But that's ok: if it
1116 * is being set up, that automatically means that it will be a singleton
1117 * acceptable for merging, so we can do all of this optimistically. But
1118 * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
1120 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1121 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1122 * is to return an anon_vma that is "complex" due to having gone through
1123 * a fork).
1125 * We also make sure that the two vma's are compatible (adjacent,
1126 * and with the same memory policies). That's all stable, even with just
1127 * a read lock on the mm_sem.
1129 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1131 if (anon_vma_compatible(a, b)) {
1132 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
1134 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1135 return anon_vma;
1137 return NULL;
1141 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1142 * neighbouring vmas for a suitable anon_vma, before it goes off
1143 * to allocate a new anon_vma. It checks because a repetitive
1144 * sequence of mprotects and faults may otherwise lead to distinct
1145 * anon_vmas being allocated, preventing vma merge in subsequent
1146 * mprotect.
1148 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1150 struct anon_vma *anon_vma;
1151 struct vm_area_struct *near;
1153 near = vma->vm_next;
1154 if (!near)
1155 goto try_prev;
1157 anon_vma = reusable_anon_vma(near, vma, near);
1158 if (anon_vma)
1159 return anon_vma;
1160 try_prev:
1161 near = vma->vm_prev;
1162 if (!near)
1163 goto none;
1165 anon_vma = reusable_anon_vma(near, near, vma);
1166 if (anon_vma)
1167 return anon_vma;
1168 none:
1170 * There's no absolute need to look only at touching neighbours:
1171 * we could search further afield for "compatible" anon_vmas.
1172 * But it would probably just be a waste of time searching,
1173 * or lead to too many vmas hanging off the same anon_vma.
1174 * We're trying to allow mprotect remerging later on,
1175 * not trying to minimize memory used for anon_vmas.
1177 return NULL;
1180 #ifdef CONFIG_PROC_FS
1181 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
1182 struct file *file, long pages)
1184 const unsigned long stack_flags
1185 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
1187 mm->total_vm += pages;
1189 if (file) {
1190 mm->shared_vm += pages;
1191 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
1192 mm->exec_vm += pages;
1193 } else if (flags & stack_flags)
1194 mm->stack_vm += pages;
1196 #endif /* CONFIG_PROC_FS */
1199 * If a hint addr is less than mmap_min_addr change hint to be as
1200 * low as possible but still greater than mmap_min_addr
1202 static inline unsigned long round_hint_to_min(unsigned long hint)
1204 hint &= PAGE_MASK;
1205 if (((void *)hint != NULL) &&
1206 (hint < mmap_min_addr))
1207 return PAGE_ALIGN(mmap_min_addr);
1208 return hint;
1211 static inline int mlock_future_check(struct mm_struct *mm,
1212 unsigned long flags,
1213 unsigned long len)
1215 unsigned long locked, lock_limit;
1217 /* mlock MCL_FUTURE? */
1218 if (flags & VM_LOCKED) {
1219 locked = len >> PAGE_SHIFT;
1220 locked += mm->locked_vm;
1221 lock_limit = rlimit(RLIMIT_MEMLOCK);
1222 lock_limit >>= PAGE_SHIFT;
1223 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1224 return -EAGAIN;
1226 return 0;
1230 * The caller must hold down_write(&current->mm->mmap_sem).
1233 unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1234 unsigned long len, unsigned long prot,
1235 unsigned long flags, unsigned long pgoff,
1236 unsigned long *populate)
1238 struct mm_struct * mm = current->mm;
1239 vm_flags_t vm_flags;
1241 *populate = 0;
1242 #ifdef CONFIG_MCST_RT
1243 if (mm->extra_vm_flags & VM_MLOCK_DONE) {
1244 /* That is RT task, which done mlockall().
1245 * New mmap() is impossible */
1246 return -EFAULT;
1248 #endif
1251 * Does the application expect PROT_READ to imply PROT_EXEC?
1253 * (the exception is when the underlying filesystem is noexec
1254 * mounted, in which case we dont add PROT_EXEC.)
1256 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1257 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
1258 prot |= PROT_EXEC;
1260 if (!len)
1261 return -EINVAL;
1263 if (!(flags & MAP_FIXED))
1264 addr = round_hint_to_min(addr);
1266 /* Careful about overflows.. */
1267 len = PAGE_ALIGN(len);
1268 if (!len)
1269 return -ENOMEM;
1271 /* offset overflow? */
1272 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1273 return -EOVERFLOW;
1275 /* Too many mappings? */
1276 if (mm->map_count > sysctl_max_map_count)
1277 return -ENOMEM;
1279 /* Obtain the address to map to. we verify (or select) it and ensure
1280 * that it represents a valid section of the address space.
1282 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1283 if (addr & ~PAGE_MASK)
1284 return addr;
1286 /* Do simple checking here so the lower-level routines won't have
1287 * to. we assume access permissions have been handled by the open
1288 * of the memory object, so we don't do any here.
1290 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
1291 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1293 if (flags & MAP_LOCKED)
1294 if (!can_do_mlock())
1295 return -EPERM;
1297 if (mlock_future_check(mm, vm_flags, len))
1298 return -EAGAIN;
1300 if (file) {
1301 struct inode *inode = file_inode(file);
1303 switch (flags & MAP_TYPE) {
1304 case MAP_SHARED:
1305 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1306 return -EACCES;
1309 * Make sure we don't allow writing to an append-only
1310 * file..
1312 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1313 return -EACCES;
1316 * Make sure there are no mandatory locks on the file.
1318 if (locks_verify_locked(inode))
1319 return -EAGAIN;
1321 vm_flags |= VM_SHARED | VM_MAYSHARE;
1322 if (!(file->f_mode & FMODE_WRITE))
1323 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1325 /* fall through */
1326 case MAP_PRIVATE:
1327 if (!(file->f_mode & FMODE_READ))
1328 return -EACCES;
1329 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1330 if (vm_flags & VM_EXEC)
1331 return -EPERM;
1332 vm_flags &= ~VM_MAYEXEC;
1335 if (!file->f_op->mmap)
1336 return -ENODEV;
1337 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1338 return -EINVAL;
1339 break;
1341 default:
1342 return -EINVAL;
1344 } else {
1345 switch (flags & MAP_TYPE) {
1346 case MAP_SHARED:
1347 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1348 return -EINVAL;
1350 * Ignore pgoff.
1352 pgoff = 0;
1353 vm_flags |= VM_SHARED | VM_MAYSHARE;
1354 break;
1355 case MAP_PRIVATE:
1357 * Set pgoff according to addr for anon_vma.
1359 pgoff = addr >> PAGE_SHIFT;
1360 break;
1361 default:
1362 return -EINVAL;
1367 * Set 'VM_NORESERVE' if we should not account for the
1368 * memory use of this mapping.
1370 if (flags & MAP_NORESERVE) {
1371 /* We honor MAP_NORESERVE if allowed to overcommit */
1372 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1373 vm_flags |= VM_NORESERVE;
1375 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1376 if (file && is_file_hugepages(file))
1377 vm_flags |= VM_NORESERVE;
1380 addr = mmap_region(file, addr, len, vm_flags, pgoff);
1381 if (!IS_ERR_VALUE(addr) &&
1382 ((vm_flags & VM_LOCKED) ||
1383 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1384 *populate = len;
1385 return addr;
1388 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1389 unsigned long, prot, unsigned long, flags,
1390 unsigned long, fd, unsigned long, pgoff)
1392 struct file *file = NULL;
1393 unsigned long retval = -EBADF;
1395 if (!(flags & MAP_ANONYMOUS)) {
1396 audit_mmap_fd(fd, flags);
1397 file = fget(fd);
1398 if (!file)
1399 goto out;
1400 if (is_file_hugepages(file))
1401 len = ALIGN(len, huge_page_size(hstate_file(file)));
1402 retval = -EINVAL;
1403 if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
1404 goto out_fput;
1405 } else if (flags & MAP_HUGETLB) {
1406 struct user_struct *user = NULL;
1407 struct hstate *hs;
1409 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK);
1410 if (!hs)
1411 return -EINVAL;
1413 len = ALIGN(len, huge_page_size(hs));
1415 * VM_NORESERVE is used because the reservations will be
1416 * taken when vm_ops->mmap() is called
1417 * A dummy user value is used because we are not locking
1418 * memory so no accounting is necessary
1420 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1421 VM_NORESERVE,
1422 &user, HUGETLB_ANONHUGE_INODE,
1423 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1424 if (IS_ERR(file))
1425 return PTR_ERR(file);
1428 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1430 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1431 out_fput:
1432 if (file)
1433 fput(file);
1434 out:
1435 return retval;
1438 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1439 struct mmap_arg_struct {
1440 unsigned long addr;
1441 unsigned long len;
1442 unsigned long prot;
1443 unsigned long flags;
1444 unsigned long fd;
1445 unsigned long offset;
1448 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1450 struct mmap_arg_struct a;
1452 if (copy_from_user(&a, arg, sizeof(a)))
1453 return -EFAULT;
1454 if (a.offset & ~PAGE_MASK)
1455 return -EINVAL;
1457 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1458 a.offset >> PAGE_SHIFT);
1460 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1463 * Some shared mappigns will want the pages marked read-only
1464 * to track write events. If so, we'll downgrade vm_page_prot
1465 * to the private version (using protection_map[] without the
1466 * VM_SHARED bit).
1468 int vma_wants_writenotify(struct vm_area_struct *vma)
1470 vm_flags_t vm_flags = vma->vm_flags;
1472 /* If it was private or non-writable, the write bit is already clear */
1473 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1474 return 0;
1476 /* The backer wishes to know when pages are first written to? */
1477 if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1478 return 1;
1480 /* The open routine did something to the protections already? */
1481 if (pgprot_val(vma->vm_page_prot) !=
1482 pgprot_val(vm_get_page_prot(vm_flags)))
1483 return 0;
1485 /* Specialty mapping? */
1486 if (vm_flags & VM_PFNMAP)
1487 return 0;
1489 /* Can the mapping track the dirty pages? */
1490 return vma->vm_file && vma->vm_file->f_mapping &&
1491 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1495 * We account for memory if it's a private writeable mapping,
1496 * not hugepages and VM_NORESERVE wasn't set.
1498 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1501 * hugetlb has its own accounting separate from the core VM
1502 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1504 if (file && is_file_hugepages(file))
1505 return 0;
1507 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1510 unsigned long mmap_region(struct file *file, unsigned long addr,
1511 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
1513 struct mm_struct *mm = current->mm;
1514 struct vm_area_struct *vma, *prev;
1515 int error;
1516 struct rb_node **rb_link, *rb_parent;
1517 unsigned long charged = 0;
1519 /* Check against address space limit. */
1520 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
1521 unsigned long nr_pages;
1524 * MAP_FIXED may remove pages of mappings that intersects with
1525 * requested mapping. Account for the pages it would unmap.
1527 if (!(vm_flags & MAP_FIXED))
1528 return -ENOMEM;
1530 nr_pages = count_vma_pages_range(mm, addr, addr + len);
1532 if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages))
1533 return -ENOMEM;
1536 /* Clear old maps */
1537 error = -ENOMEM;
1538 munmap_back:
1539 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
1540 if (do_munmap(mm, addr, len))
1541 return -ENOMEM;
1542 goto munmap_back;
1546 * Private writable mapping: check memory availability
1548 if (accountable_mapping(file, vm_flags)) {
1549 charged = len >> PAGE_SHIFT;
1550 if (security_vm_enough_memory_mm(mm, charged))
1551 return -ENOMEM;
1552 vm_flags |= VM_ACCOUNT;
1556 * Can we just expand an old mapping?
1558 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1559 if (vma)
1560 goto out;
1563 * Determine the object being mapped and call the appropriate
1564 * specific mapper. the address has already been validated, but
1565 * not unmapped, but the maps are removed from the list.
1567 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1568 if (!vma) {
1569 error = -ENOMEM;
1570 goto unacct_error;
1573 vma->vm_mm = mm;
1574 vma->vm_start = addr;
1575 vma->vm_end = addr + len;
1576 vma->vm_flags = vm_flags;
1577 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1578 vma->vm_pgoff = pgoff;
1579 INIT_LIST_HEAD(&vma->anon_vma_chain);
1581 if (file) {
1582 if (vm_flags & VM_DENYWRITE) {
1583 error = deny_write_access(file);
1584 if (error)
1585 goto free_vma;
1587 vma->vm_file = get_file(file);
1588 error = file->f_op->mmap(file, vma);
1589 if (error)
1590 goto unmap_and_free_vma;
1592 /* Can addr have changed??
1594 * Answer: Yes, several device drivers can do it in their
1595 * f_op->mmap method. -DaveM
1596 * Bug: If addr is changed, prev, rb_link, rb_parent should
1597 * be updated for vma_link()
1599 WARN_ON_ONCE(addr != vma->vm_start);
1601 addr = vma->vm_start;
1602 vm_flags = vma->vm_flags;
1603 } else if (vm_flags & VM_SHARED) {
1604 error = shmem_zero_setup(vma);
1605 if (error)
1606 goto free_vma;
1609 if (vma_wants_writenotify(vma)) {
1610 pgprot_t pprot = vma->vm_page_prot;
1612 /* Can vma->vm_page_prot have changed??
1614 * Answer: Yes, drivers may have changed it in their
1615 * f_op->mmap method.
1617 * Ensures that vmas marked as uncached stay that way.
1619 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1620 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1621 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1624 vma_link(mm, vma, prev, rb_link, rb_parent);
1625 /* Once vma denies write, undo our temporary denial count */
1626 if (vm_flags & VM_DENYWRITE)
1627 allow_write_access(file);
1628 file = vma->vm_file;
1629 out:
1630 perf_event_mmap(vma);
1632 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1633 if (vm_flags & VM_LOCKED) {
1634 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
1635 vma == get_gate_vma(current->mm)))
1636 mm->locked_vm += (len >> PAGE_SHIFT);
1637 else
1638 vma->vm_flags &= ~VM_LOCKED;
1641 if (file)
1642 uprobe_mmap(vma);
1644 #if defined(CONFIG_E2K) && defined(CONFIG_MAKE_ALL_PAGES_VALID)
1645 if (vm_flags & VM_PAGESVALID) {
1646 int ret = make_vma_pages_valid(vma, addr, addr + len);
1648 if (ret) {
1649 do_munmap(mm, addr, len);
1650 return ret;
1653 #endif
1656 * New (or expanded) vma always get soft dirty status.
1657 * Otherwise user-space soft-dirty page tracker won't
1658 * be able to distinguish situation when vma area unmapped,
1659 * then new mapped in-place (which must be aimed as
1660 * a completely new data area).
1662 vma->vm_flags |= VM_SOFTDIRTY;
1664 return addr;
1666 unmap_and_free_vma:
1667 if (vm_flags & VM_DENYWRITE)
1668 allow_write_access(file);
1669 vma->vm_file = NULL;
1670 fput(file);
1672 /* Undo any partial mapping done by a device driver. */
1673 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1674 charged = 0;
1675 free_vma:
1676 kmem_cache_free(vm_area_cachep, vma);
1677 unacct_error:
1678 if (charged)
1679 vm_unacct_memory(charged);
1680 return error;
1683 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1686 * We implement the search by looking for an rbtree node that
1687 * immediately follows a suitable gap. That is,
1688 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
1689 * - gap_end = vma->vm_start >= info->low_limit + length;
1690 * - gap_end - gap_start >= length
1693 struct mm_struct *mm = current->mm;
1694 struct vm_area_struct *vma;
1695 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1697 /* Adjust search length to account for worst case alignment overhead */
1698 length = info->length + info->align_mask;
1699 if (length < info->length)
1700 return -ENOMEM;
1702 /* Adjust search limits by the desired length */
1703 if (info->high_limit < length)
1704 return -ENOMEM;
1705 high_limit = info->high_limit - length;
1707 if (info->low_limit > high_limit)
1708 return -ENOMEM;
1709 low_limit = info->low_limit + length;
1711 /* Check if rbtree root looks promising */
1712 if (RB_EMPTY_ROOT(&mm->mm_rb))
1713 goto check_highest;
1714 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1715 if (vma->rb_subtree_gap < length)
1716 goto check_highest;
1718 while (true) {
1719 /* Visit left subtree if it looks promising */
1720 gap_end = vma->vm_start;
1721 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1722 struct vm_area_struct *left =
1723 rb_entry(vma->vm_rb.rb_left,
1724 struct vm_area_struct, vm_rb);
1725 if (left->rb_subtree_gap >= length) {
1726 vma = left;
1727 continue;
1731 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1732 check_current:
1733 /* Check if current node has a suitable gap */
1734 if (gap_start > high_limit)
1735 return -ENOMEM;
1736 if (gap_end >= low_limit && gap_end - gap_start >= length)
1737 goto found;
1739 /* Visit right subtree if it looks promising */
1740 if (vma->vm_rb.rb_right) {
1741 struct vm_area_struct *right =
1742 rb_entry(vma->vm_rb.rb_right,
1743 struct vm_area_struct, vm_rb);
1744 if (right->rb_subtree_gap >= length) {
1745 vma = right;
1746 continue;
1750 /* Go back up the rbtree to find next candidate node */
1751 while (true) {
1752 struct rb_node *prev = &vma->vm_rb;
1753 if (!rb_parent(prev))
1754 goto check_highest;
1755 vma = rb_entry(rb_parent(prev),
1756 struct vm_area_struct, vm_rb);
1757 if (prev == vma->vm_rb.rb_left) {
1758 gap_start = vma->vm_prev->vm_end;
1759 gap_end = vma->vm_start;
1760 goto check_current;
1765 check_highest:
1766 /* Check highest gap, which does not precede any rbtree node */
1767 gap_start = mm->highest_vm_end;
1768 gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
1769 if (gap_start > high_limit)
1770 return -ENOMEM;
1772 found:
1773 /* We found a suitable gap. Clip it with the original low_limit. */
1774 if (gap_start < info->low_limit)
1775 gap_start = info->low_limit;
1777 /* Adjust gap address to the desired alignment */
1778 gap_start += (info->align_offset - gap_start) & info->align_mask;
1780 VM_BUG_ON(gap_start + info->length > info->high_limit);
1781 VM_BUG_ON(gap_start + info->length > gap_end);
1782 return gap_start;
1785 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1787 struct mm_struct *mm = current->mm;
1788 struct vm_area_struct *vma;
1789 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1791 /* Adjust search length to account for worst case alignment overhead */
1792 length = info->length + info->align_mask;
1793 if (length < info->length)
1794 return -ENOMEM;
1797 * Adjust search limits by the desired length.
1798 * See implementation comment at top of unmapped_area().
1800 gap_end = info->high_limit;
1801 if (gap_end < length)
1802 return -ENOMEM;
1803 high_limit = gap_end - length;
1805 if (info->low_limit > high_limit)
1806 return -ENOMEM;
1807 low_limit = info->low_limit + length;
1809 /* Check highest gap, which does not precede any rbtree node */
1810 gap_start = mm->highest_vm_end;
1811 if (gap_start <= high_limit)
1812 goto found_highest;
1814 /* Check if rbtree root looks promising */
1815 if (RB_EMPTY_ROOT(&mm->mm_rb))
1816 return -ENOMEM;
1817 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1818 if (vma->rb_subtree_gap < length)
1819 return -ENOMEM;
1821 while (true) {
1822 /* Visit right subtree if it looks promising */
1823 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1824 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1825 struct vm_area_struct *right =
1826 rb_entry(vma->vm_rb.rb_right,
1827 struct vm_area_struct, vm_rb);
1828 if (right->rb_subtree_gap >= length) {
1829 vma = right;
1830 continue;
1834 check_current:
1835 /* Check if current node has a suitable gap */
1836 gap_end = vma->vm_start;
1837 if (gap_end < low_limit)
1838 return -ENOMEM;
1839 if (gap_start <= high_limit && gap_end - gap_start >= length)
1840 goto found;
1842 /* Visit left subtree if it looks promising */
1843 if (vma->vm_rb.rb_left) {
1844 struct vm_area_struct *left =
1845 rb_entry(vma->vm_rb.rb_left,
1846 struct vm_area_struct, vm_rb);
1847 if (left->rb_subtree_gap >= length) {
1848 vma = left;
1849 continue;
1853 /* Go back up the rbtree to find next candidate node */
1854 while (true) {
1855 struct rb_node *prev = &vma->vm_rb;
1856 if (!rb_parent(prev))
1857 return -ENOMEM;
1858 vma = rb_entry(rb_parent(prev),
1859 struct vm_area_struct, vm_rb);
1860 if (prev == vma->vm_rb.rb_right) {
1861 gap_start = vma->vm_prev ?
1862 vma->vm_prev->vm_end : 0;
1863 goto check_current;
1868 found:
1869 /* We found a suitable gap. Clip it with the original high_limit. */
1870 if (gap_end > info->high_limit)
1871 gap_end = info->high_limit;
1873 found_highest:
1874 /* Compute highest gap address at the desired alignment */
1875 gap_end -= info->length;
1876 gap_end -= (gap_end - info->align_offset) & info->align_mask;
1878 VM_BUG_ON(gap_end < info->low_limit);
1879 VM_BUG_ON(gap_end < gap_start);
1880 return gap_end;
1883 /* Get an address range which is currently unmapped.
1884 * For shmat() with addr=0.
1886 * Ugly calling convention alert:
1887 * Return value with the low bits set means error value,
1888 * ie
1889 * if (ret & ~PAGE_MASK)
1890 * error = ret;
1892 * This function "knows" that -ENOMEM has the bits set.
1894 #ifndef HAVE_ARCH_UNMAPPED_AREA
1895 unsigned long
1896 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1897 unsigned long len, unsigned long pgoff, unsigned long flags)
1899 struct mm_struct *mm = current->mm;
1900 struct vm_area_struct *vma;
1901 struct vm_unmapped_area_info info;
1903 if (len > TASK_SIZE - mmap_min_addr)
1904 return -ENOMEM;
1906 if (flags & MAP_FIXED)
1907 return addr;
1909 if (addr) {
1910 addr = PAGE_ALIGN(addr);
1911 vma = find_vma(mm, addr);
1912 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1913 (!vma || addr + len <= vma->vm_start))
1914 return addr;
1917 info.flags = 0;
1918 info.length = len;
1919 info.low_limit = mm->mmap_base;
1920 info.high_limit = TASK_SIZE;
1921 info.align_mask = 0;
1922 return vm_unmapped_area(&info);
1924 #endif
1927 * This mmap-allocator allocates new areas top-down from below the
1928 * stack's low limit (the base):
1930 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1931 unsigned long
1932 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1933 const unsigned long len, const unsigned long pgoff,
1934 const unsigned long flags)
1936 struct vm_area_struct *vma;
1937 struct mm_struct *mm = current->mm;
1938 unsigned long addr = addr0;
1939 struct vm_unmapped_area_info info;
1941 /* requested length too big for entire address space */
1942 if (len > TASK_SIZE - mmap_min_addr)
1943 return -ENOMEM;
1945 if (flags & MAP_FIXED)
1946 return addr;
1948 /* requesting a specific address */
1949 if (addr) {
1950 addr = PAGE_ALIGN(addr);
1951 vma = find_vma(mm, addr);
1952 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1953 (!vma || addr + len <= vma->vm_start))
1954 return addr;
1957 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1958 info.length = len;
1959 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
1960 info.high_limit = mm->mmap_base;
1961 info.align_mask = 0;
1962 addr = vm_unmapped_area(&info);
1965 * A failed mmap() very likely causes application failure,
1966 * so fall back to the bottom-up function here. This scenario
1967 * can happen with large stack limits and large mmap()
1968 * allocations.
1970 if (addr & ~PAGE_MASK) {
1971 VM_BUG_ON(addr != -ENOMEM);
1972 info.flags = 0;
1973 info.low_limit = TASK_UNMAPPED_BASE;
1974 info.high_limit = TASK_SIZE;
1975 addr = vm_unmapped_area(&info);
1978 return addr;
1980 #endif
1982 unsigned long
1983 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1984 unsigned long pgoff, unsigned long flags)
1986 unsigned long (*get_area)(struct file *, unsigned long,
1987 unsigned long, unsigned long, unsigned long);
1989 unsigned long error = arch_mmap_check(addr, len, flags);
1990 if (error)
1991 return error;
1993 /* Careful about overflows.. */
1994 if (len > TASK_SIZE)
1995 return -ENOMEM;
1997 get_area = current->mm->get_unmapped_area;
1998 if (file && file->f_op->get_unmapped_area)
1999 get_area = file->f_op->get_unmapped_area;
2000 addr = get_area(file, addr, len, pgoff, flags);
2001 if (IS_ERR_VALUE(addr))
2002 return addr;
2004 if (addr > TASK_SIZE - len)
2005 return -ENOMEM;
2006 if (addr & ~PAGE_MASK)
2007 return -EINVAL;
2009 addr = arch_rebalance_pgtables(addr, len);
2010 error = security_mmap_addr(addr);
2011 return error ? error : addr;
2014 EXPORT_SYMBOL(get_unmapped_area);
2016 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
2017 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
2019 struct rb_node *rb_node;
2020 struct vm_area_struct *vma;
2022 /* Check the cache first. */
2023 vma = vmacache_find(mm, addr);
2024 if (likely(vma))
2025 return vma;
2027 rb_node = mm->mm_rb.rb_node;
2028 vma = NULL;
2030 while (rb_node) {
2031 struct vm_area_struct *tmp;
2033 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2035 if (tmp->vm_end > addr) {
2036 vma = tmp;
2037 if (tmp->vm_start <= addr)
2038 break;
2039 rb_node = rb_node->rb_left;
2040 } else
2041 rb_node = rb_node->rb_right;
2044 if (vma)
2045 vmacache_update(addr, vma);
2046 return vma;
2049 EXPORT_SYMBOL(find_vma);
2052 * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
2054 struct vm_area_struct *
2055 find_vma_prev(struct mm_struct *mm, unsigned long addr,
2056 struct vm_area_struct **pprev)
2058 struct vm_area_struct *vma;
2060 vma = find_vma(mm, addr);
2061 if (vma) {
2062 *pprev = vma->vm_prev;
2063 } else {
2064 struct rb_node *rb_node = mm->mm_rb.rb_node;
2065 *pprev = NULL;
2066 while (rb_node) {
2067 *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2068 rb_node = rb_node->rb_right;
2071 return vma;
2075 * Verify that the stack growth is acceptable and
2076 * update accounting. This is shared with both the
2077 * grow-up and grow-down cases.
2079 static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
2081 struct mm_struct *mm = vma->vm_mm;
2082 struct rlimit *rlim = current->signal->rlim;
2083 unsigned long new_start, actual_size;
2085 /* address space limit tests */
2086 if (!may_expand_vm(mm, grow))
2087 return -ENOMEM;
2089 /* Stack limit test */
2090 actual_size = size;
2091 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
2092 actual_size -= PAGE_SIZE;
2093 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2094 return -ENOMEM;
2096 /* mlock limit tests */
2097 if (vma->vm_flags & VM_LOCKED) {
2098 unsigned long locked;
2099 unsigned long limit;
2100 locked = mm->locked_vm + grow;
2101 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2102 limit >>= PAGE_SHIFT;
2103 if (locked > limit && !capable(CAP_IPC_LOCK))
2104 return -ENOMEM;
2107 /* Check to ensure the stack will not grow into a hugetlb-only region */
2108 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2109 vma->vm_end - size;
2110 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2111 return -EFAULT;
2114 * Overcommit.. This must be the final test, as it will
2115 * update security statistics.
2117 if (security_vm_enough_memory_mm(mm, grow))
2118 return -ENOMEM;
2120 /* Ok, everything looks good - let it rip */
2121 if (vma->vm_flags & VM_LOCKED)
2122 mm->locked_vm += grow;
2123 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
2124 return 0;
2127 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
2129 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
2130 * vma is the last one with address > vma->vm_end. Have to extend vma.
2132 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2134 int error;
2136 if (!(vma->vm_flags & VM_GROWSUP))
2137 return -EFAULT;
2140 * We must make sure the anon_vma is allocated
2141 * so that the anon_vma locking is not a noop.
2143 if (unlikely(anon_vma_prepare(vma)))
2144 return -ENOMEM;
2145 vma_lock_anon_vma(vma);
2148 * vma->vm_start/vm_end cannot change under us because the caller
2149 * is required to hold the mmap_sem in read mode. We need the
2150 * anon_vma lock to serialize against concurrent expand_stacks.
2151 * Also guard against wrapping around to address 0.
2153 if (address < PAGE_ALIGN(address+4))
2154 address = PAGE_ALIGN(address+4);
2155 else {
2156 vma_unlock_anon_vma(vma);
2157 return -ENOMEM;
2159 error = 0;
2161 /* Somebody else might have raced and expanded it already */
2162 if (address > vma->vm_end) {
2163 unsigned long size, grow;
2165 size = address - vma->vm_start;
2166 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2168 error = -ENOMEM;
2169 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2170 error = acct_stack_growth(vma, size, grow);
2171 if (!error) {
2173 * vma_gap_update() doesn't support concurrent
2174 * updates, but we only hold a shared mmap_sem
2175 * lock here, so we need to protect against
2176 * concurrent vma expansions.
2177 * vma_lock_anon_vma() doesn't help here, as
2178 * we don't guarantee that all growable vmas
2179 * in a mm share the same root anon vma.
2180 * So, we reuse mm->page_table_lock to guard
2181 * against concurrent vma expansions.
2183 spin_lock(&vma->vm_mm->page_table_lock);
2184 anon_vma_interval_tree_pre_update_vma(vma);
2185 vma->vm_end = address;
2186 anon_vma_interval_tree_post_update_vma(vma);
2187 if (vma->vm_next)
2188 vma_gap_update(vma->vm_next);
2189 else
2190 vma->vm_mm->highest_vm_end = address;
2191 spin_unlock(&vma->vm_mm->page_table_lock);
2193 perf_event_mmap(vma);
2197 vma_unlock_anon_vma(vma);
2198 khugepaged_enter_vma_merge(vma);
2199 validate_mm(vma->vm_mm);
2200 return error;
2202 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2205 * vma is the first one with address < vma->vm_start. Have to extend vma.
2207 int expand_downwards(struct vm_area_struct *vma,
2208 unsigned long address)
2210 int error;
2211 #if defined(CONFIG_E2K) && defined(CONFIG_MAKE_ALL_PAGES_VALID)
2212 unsigned long start = vma->vm_start;
2213 #endif
2216 * We must make sure the anon_vma is allocated
2217 * so that the anon_vma locking is not a noop.
2219 if (unlikely(anon_vma_prepare(vma)))
2220 return -ENOMEM;
2222 address &= PAGE_MASK;
2223 error = security_mmap_addr(address);
2224 if (error)
2225 return error;
2227 vma_lock_anon_vma(vma);
2230 * vma->vm_start/vm_end cannot change under us because the caller
2231 * is required to hold the mmap_sem in read mode. We need the
2232 * anon_vma lock to serialize against concurrent expand_stacks.
2235 /* Somebody else might have raced and expanded it already */
2236 if (address < vma->vm_start) {
2237 unsigned long size, grow;
2239 size = vma->vm_end - address;
2240 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2242 error = -ENOMEM;
2243 if (grow <= vma->vm_pgoff) {
2244 error = acct_stack_growth(vma, size, grow);
2245 if (!error) {
2247 * vma_gap_update() doesn't support concurrent
2248 * updates, but we only hold a shared mmap_sem
2249 * lock here, so we need to protect against
2250 * concurrent vma expansions.
2251 * vma_lock_anon_vma() doesn't help here, as
2252 * we don't guarantee that all growable vmas
2253 * in a mm share the same root anon vma.
2254 * So, we reuse mm->page_table_lock to guard
2255 * against concurrent vma expansions.
2257 spin_lock(&vma->vm_mm->page_table_lock);
2258 anon_vma_interval_tree_pre_update_vma(vma);
2259 vma->vm_start = address;
2260 vma->vm_pgoff -= grow;
2261 anon_vma_interval_tree_post_update_vma(vma);
2262 vma_gap_update(vma);
2263 spin_unlock(&vma->vm_mm->page_table_lock);
2265 perf_event_mmap(vma);
2269 vma_unlock_anon_vma(vma);
2270 khugepaged_enter_vma_merge(vma);
2271 validate_mm(vma->vm_mm);
2272 #if defined(CONFIG_E2K) && defined(CONFIG_MAKE_ALL_PAGES_VALID)
2273 if (!error && (vma->vm_flags & VM_PAGESVALID))
2274 error = make_vma_pages_valid(vma, address, start);
2275 #endif
2276 return error;
2280 * Note how expand_stack() refuses to expand the stack all the way to
2281 * abut the next virtual mapping, *unless* that mapping itself is also
2282 * a stack mapping. We want to leave room for a guard page, after all
2283 * (the guard page itself is not added here, that is done by the
2284 * actual page faulting logic)
2286 * This matches the behavior of the guard page logic (see mm/memory.c:
2287 * check_stack_guard_page()), which only allows the guard page to be
2288 * removed under these circumstances.
2290 #ifdef CONFIG_STACK_GROWSUP
2291 int expand_stack(struct vm_area_struct *vma, unsigned long address)
2293 struct vm_area_struct *next;
2295 address &= PAGE_MASK;
2296 next = vma->vm_next;
2297 if (next && next->vm_start == address + PAGE_SIZE) {
2298 if (!(next->vm_flags & VM_GROWSUP))
2299 return -ENOMEM;
2301 return expand_upwards(vma, address);
2304 struct vm_area_struct *
2305 find_extend_vma(struct mm_struct *mm, unsigned long addr)
2307 struct vm_area_struct *vma, *prev;
2309 addr &= PAGE_MASK;
2310 vma = find_vma_prev(mm, addr, &prev);
2311 if (vma && (vma->vm_start <= addr))
2312 return vma;
2313 if (!prev || expand_stack(prev, addr))
2314 return NULL;
2315 if (prev->vm_flags & VM_LOCKED)
2316 __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
2317 return prev;
2319 #else
2320 int expand_stack(struct vm_area_struct *vma, unsigned long address)
2322 struct vm_area_struct *prev;
2324 address &= PAGE_MASK;
2325 prev = vma->vm_prev;
2326 if (prev && prev->vm_end == address) {
2327 if (!(prev->vm_flags & VM_GROWSDOWN))
2328 return -ENOMEM;
2330 return expand_downwards(vma, address);
2333 struct vm_area_struct *
2334 find_extend_vma(struct mm_struct * mm, unsigned long addr)
2336 struct vm_area_struct * vma;
2337 unsigned long start;
2339 addr &= PAGE_MASK;
2340 vma = find_vma(mm,addr);
2341 if (!vma)
2342 return NULL;
2343 if (vma->vm_start <= addr)
2344 return vma;
2345 if (!(vma->vm_flags & VM_GROWSDOWN))
2346 return NULL;
2347 start = vma->vm_start;
2348 if (expand_stack(vma, addr))
2349 return NULL;
2350 if (vma->vm_flags & VM_LOCKED)
2351 __mlock_vma_pages_range(vma, addr, start, NULL);
2352 return vma;
2354 #endif
2357 * Ok - we have the memory areas we should free on the vma list,
2358 * so release them, and do the vma updates.
2360 * Called with the mm semaphore held.
2362 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2364 unsigned long nr_accounted = 0;
2366 /* Update high watermark before we lower total_vm */
2367 update_hiwater_vm(mm);
2368 do {
2369 long nrpages = vma_pages(vma);
2371 if (vma->vm_flags & VM_ACCOUNT)
2372 nr_accounted += nrpages;
2373 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
2374 vma = remove_vma(vma);
2375 } while (vma);
2376 vm_unacct_memory(nr_accounted);
2377 validate_mm(mm);
2381 * Get rid of page table information in the indicated region.
2383 * Called with the mm semaphore held.
2385 static void unmap_region(struct mm_struct *mm,
2386 struct vm_area_struct *vma, struct vm_area_struct *prev,
2387 unsigned long start, unsigned long end)
2389 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
2390 struct mmu_gather tlb;
2392 lru_add_drain();
2393 tlb_gather_mmu(&tlb, mm, start, end);
2394 update_hiwater_rss(mm);
2395 unmap_vmas(&tlb, vma, start, end);
2396 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2397 next ? next->vm_start : USER_PGTABLES_CEILING);
2398 tlb_finish_mmu(&tlb, start, end);
2402 * Create a list of vma's touched by the unmap, removing them from the mm's
2403 * vma list as we go..
2405 static void
2406 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2407 struct vm_area_struct *prev, unsigned long end)
2409 struct vm_area_struct **insertion_point;
2410 struct vm_area_struct *tail_vma = NULL;
2412 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2413 vma->vm_prev = NULL;
2414 do {
2415 vma_rb_erase(vma, &mm->mm_rb);
2416 mm->map_count--;
2417 tail_vma = vma;
2418 vma = vma->vm_next;
2419 } while (vma && vma->vm_start < end);
2420 *insertion_point = vma;
2421 if (vma) {
2422 vma->vm_prev = prev;
2423 vma_gap_update(vma);
2424 } else
2425 mm->highest_vm_end = prev ? prev->vm_end : 0;
2426 tail_vma->vm_next = NULL;
2428 /* Kill the cache */
2429 vmacache_invalidate(mm);
2433 * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
2434 * munmap path where it doesn't make sense to fail.
2436 static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
2437 unsigned long addr, int new_below)
2439 struct vm_area_struct *new;
2440 int err = -ENOMEM;
2442 if (is_vm_hugetlb_page(vma) && (addr &
2443 ~(huge_page_mask(hstate_vma(vma)))))
2444 return -EINVAL;
2446 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2447 if (!new)
2448 goto out_err;
2450 /* most fields are the same, copy all, and then fixup */
2451 *new = *vma;
2453 INIT_LIST_HEAD(&new->anon_vma_chain);
2455 if (new_below)
2456 new->vm_end = addr;
2457 else {
2458 new->vm_start = addr;
2459 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2462 err = vma_dup_policy(vma, new);
2463 if (err)
2464 goto out_free_vma;
2466 err = anon_vma_clone(new, vma);
2467 if (err)
2468 goto out_free_mpol;
2470 if (new->vm_file)
2471 get_file(new->vm_file);
2473 if (new->vm_ops && new->vm_ops->open)
2474 new->vm_ops->open(new);
2476 if (new_below)
2477 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2478 ((addr - new->vm_start) >> PAGE_SHIFT), new);
2479 else
2480 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2482 /* Success. */
2483 if (!err)
2484 return 0;
2486 /* Clean everything up if vma_adjust failed. */
2487 if (new->vm_ops && new->vm_ops->close)
2488 new->vm_ops->close(new);
2489 if (new->vm_file)
2490 fput(new->vm_file);
2491 unlink_anon_vmas(new);
2492 out_free_mpol:
2493 mpol_put(vma_policy(new));
2494 out_free_vma:
2495 kmem_cache_free(vm_area_cachep, new);
2496 out_err:
2497 return err;
2501 * Split a vma into two pieces at address 'addr', a new vma is allocated
2502 * either for the first part or the tail.
2504 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2505 unsigned long addr, int new_below)
2507 if (mm->map_count >= sysctl_max_map_count)
2508 return -ENOMEM;
2510 return __split_vma(mm, vma, addr, new_below);
2513 /* Munmap is split into 2 main parts -- this part which finds
2514 * what needs doing, and the areas themselves, which do the
2515 * work. This now handles partial unmappings.
2516 * Jeremy Fitzhardinge <jeremy@goop.org>
2518 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2520 unsigned long end;
2521 struct vm_area_struct *vma, *prev, *last;
2523 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
2524 return -EINVAL;
2526 if ((len = PAGE_ALIGN(len)) == 0)
2527 return -EINVAL;
2529 /* Find the first overlapping VMA */
2530 vma = find_vma(mm, start);
2531 if (!vma)
2532 return 0;
2533 prev = vma->vm_prev;
2534 /* we have start < vma->vm_end */
2536 /* if it doesn't overlap, we have nothing.. */
2537 end = start + len;
2538 if (vma->vm_start >= end)
2539 return 0;
2541 #ifdef CONFIG_E2K
2542 if (!test_ts_flag(TS_KERNEL_SYSCALL) &&
2543 __is_u_hw_stack_range(vma, start, start + len))
2544 return -EPERM;
2545 #endif
2548 * If we need to split any vma, do it now to save pain later.
2550 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2551 * unmapped vm_area_struct will remain in use: so lower split_vma
2552 * places tmp vma above, and higher split_vma places tmp vma below.
2554 if (start > vma->vm_start) {
2555 int error;
2558 * Make sure that map_count on return from munmap() will
2559 * not exceed its limit; but let map_count go just above
2560 * its limit temporarily, to help free resources as expected.
2562 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2563 return -ENOMEM;
2565 error = __split_vma(mm, vma, start, 0);
2566 if (error)
2567 return error;
2568 prev = vma;
2571 /* Does it split the last one? */
2572 last = find_vma(mm, end);
2573 if (last && end > last->vm_start) {
2574 int error = __split_vma(mm, last, end, 1);
2575 if (error)
2576 return error;
2578 vma = prev? prev->vm_next: mm->mmap;
2581 * unlock any mlock()ed ranges before detaching vmas
2583 if (mm->locked_vm) {
2584 struct vm_area_struct *tmp = vma;
2585 while (tmp && tmp->vm_start < end) {
2586 if (tmp->vm_flags & VM_LOCKED) {
2587 mm->locked_vm -= vma_pages(tmp);
2588 munlock_vma_pages_all(tmp);
2590 tmp = tmp->vm_next;
2595 * Remove the vma's, and unmap the actual pages
2597 detach_vmas_to_be_unmapped(mm, vma, prev, end);
2598 unmap_region(mm, vma, prev, start, end);
2600 /* Fix up all other VM information */
2601 remove_vma_list(mm, vma);
2603 return 0;
2606 int vm_munmap(unsigned long start, size_t len)
2608 int ret;
2609 struct mm_struct *mm = current->mm;
2611 down_write(&mm->mmap_sem);
2612 ret = do_munmap(mm, start, len);
2613 up_write(&mm->mmap_sem);
2614 return ret;
2616 EXPORT_SYMBOL(vm_munmap);
2618 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2620 profile_munmap(addr);
2621 return vm_munmap(addr, len);
2624 static inline void verify_mm_writelocked(struct mm_struct *mm)
2626 #ifdef CONFIG_DEBUG_VM
2627 if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2628 WARN_ON(1);
2629 up_read(&mm->mmap_sem);
2631 #endif
2635 * this is really a simplified "do_mmap". it only handles
2636 * anonymous maps. eventually we may be able to do some
2637 * brk-specific accounting here.
2639 static unsigned long do_brk(unsigned long addr, unsigned long len)
2641 struct mm_struct * mm = current->mm;
2642 struct vm_area_struct * vma, * prev;
2643 unsigned long flags;
2644 struct rb_node ** rb_link, * rb_parent;
2645 pgoff_t pgoff = addr >> PAGE_SHIFT;
2646 int error;
2648 len = PAGE_ALIGN(len);
2649 if (!len)
2650 return addr;
2652 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2654 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2655 if (error & ~PAGE_MASK)
2656 return error;
2658 error = mlock_future_check(mm, mm->def_flags, len);
2659 if (error)
2660 return error;
2663 * mm->mmap_sem is required to protect against another thread
2664 * changing the mappings in case we sleep.
2666 verify_mm_writelocked(mm);
2669 * Clear old maps. this also does some error checking for us
2671 munmap_back:
2672 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
2673 if (do_munmap(mm, addr, len))
2674 return -ENOMEM;
2675 goto munmap_back;
2678 /* Check against address space limits *after* clearing old maps... */
2679 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
2680 return -ENOMEM;
2682 if (mm->map_count > sysctl_max_map_count)
2683 return -ENOMEM;
2685 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2686 return -ENOMEM;
2688 /* Can we just expand an old private anonymous mapping? */
2689 vma = vma_merge(mm, prev, addr, addr + len, flags,
2690 NULL, NULL, pgoff, NULL);
2691 if (vma)
2692 goto out;
2695 * create a vma struct for an anonymous mapping
2697 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2698 if (!vma) {
2699 vm_unacct_memory(len >> PAGE_SHIFT);
2700 return -ENOMEM;
2703 INIT_LIST_HEAD(&vma->anon_vma_chain);
2704 vma->vm_mm = mm;
2705 vma->vm_start = addr;
2706 vma->vm_end = addr + len;
2707 vma->vm_pgoff = pgoff;
2708 vma->vm_flags = flags;
2709 vma->vm_page_prot = vm_get_page_prot(flags);
2710 vma_link(mm, vma, prev, rb_link, rb_parent);
2711 out:
2712 perf_event_mmap(vma);
2713 mm->total_vm += len >> PAGE_SHIFT;
2714 if (flags & VM_LOCKED)
2715 mm->locked_vm += (len >> PAGE_SHIFT);
2716 vma->vm_flags |= VM_SOFTDIRTY;
2717 #if defined(CONFIG_E2K) && defined(CONFIG_MAKE_ALL_PAGES_VALID)
2718 if (flags & VM_PAGESVALID) {
2719 int ret;
2720 ret = make_vma_pages_valid(vma, addr, addr + len);
2721 if (ret) {
2722 do_munmap(mm, addr, len);
2723 return ret;
2726 #endif
2727 return addr;
2730 unsigned long vm_brk(unsigned long addr, unsigned long len)
2732 struct mm_struct *mm = current->mm;
2733 unsigned long ret;
2734 bool populate;
2736 down_write(&mm->mmap_sem);
2737 ret = do_brk(addr, len);
2738 populate = ((mm->def_flags & VM_LOCKED) != 0);
2739 up_write(&mm->mmap_sem);
2740 if (populate)
2741 mm_populate(addr, len);
2742 return ret;
2744 EXPORT_SYMBOL(vm_brk);
2746 /* Release all mmaps. */
2747 void exit_mmap(struct mm_struct *mm)
2749 struct mmu_gather tlb;
2750 struct vm_area_struct *vma;
2751 unsigned long nr_accounted = 0;
2753 /* mm's last user has gone, and its about to be pulled down */
2754 mmu_notifier_release(mm);
2756 if (mm->locked_vm) {
2757 vma = mm->mmap;
2758 while (vma) {
2759 if (vma->vm_flags & VM_LOCKED)
2760 munlock_vma_pages_all(vma);
2761 vma = vma->vm_next;
2765 arch_exit_mmap(mm);
2767 vma = mm->mmap;
2768 if (!vma) /* Can happen if dup_mmap() received an OOM */
2769 return;
2771 lru_add_drain();
2772 flush_cache_mm(mm);
2773 tlb_gather_mmu(&tlb, mm, 0, -1);
2774 /* update_hiwater_rss(mm) here? but nobody should be looking */
2775 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2776 unmap_vmas(&tlb, vma, 0, -1);
2778 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
2779 tlb_finish_mmu(&tlb, 0, -1);
2782 * Walk the list again, actually closing and freeing it,
2783 * with preemption enabled, without holding any MM locks.
2785 while (vma) {
2786 if (vma->vm_flags & VM_ACCOUNT)
2787 nr_accounted += vma_pages(vma);
2788 vma = remove_vma(vma);
2790 vm_unacct_memory(nr_accounted);
2792 WARN_ON(atomic_long_read(&mm->nr_ptes) >
2793 (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2796 /* Insert vm structure into process list sorted by address
2797 * and into the inode's i_mmap tree. If vm_file is non-NULL
2798 * then i_mmap_mutex is taken here.
2800 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
2802 struct vm_area_struct *prev;
2803 struct rb_node **rb_link, *rb_parent;
2806 * The vm_pgoff of a purely anonymous vma should be irrelevant
2807 * until its first write fault, when page's anon_vma and index
2808 * are set. But now set the vm_pgoff it will almost certainly
2809 * end up with (unless mremap moves it elsewhere before that
2810 * first wfault), so /proc/pid/maps tells a consistent story.
2812 * By setting it to reflect the virtual start address of the
2813 * vma, merges and splits can happen in a seamless way, just
2814 * using the existing file pgoff checks and manipulations.
2815 * Similarly in do_mmap_pgoff and in do_brk.
2817 if (!vma->vm_file) {
2818 BUG_ON(vma->anon_vma);
2819 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2821 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
2822 &prev, &rb_link, &rb_parent))
2823 return -ENOMEM;
2824 if ((vma->vm_flags & VM_ACCOUNT) &&
2825 security_vm_enough_memory_mm(mm, vma_pages(vma)))
2826 return -ENOMEM;
2828 vma_link(mm, vma, prev, rb_link, rb_parent);
2829 return 0;
2833 * Copy the vma structure to a new location in the same mm,
2834 * prior to moving page table entries, to effect an mremap move.
2836 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2837 unsigned long addr, unsigned long len, pgoff_t pgoff,
2838 bool *need_rmap_locks)
2840 struct vm_area_struct *vma = *vmap;
2841 unsigned long vma_start = vma->vm_start;
2842 struct mm_struct *mm = vma->vm_mm;
2843 struct vm_area_struct *new_vma, *prev;
2844 struct rb_node **rb_link, *rb_parent;
2845 bool faulted_in_anon_vma = true;
2848 * If anonymous vma has not yet been faulted, update new pgoff
2849 * to match new location, to increase its chance of merging.
2851 if (unlikely(!vma->vm_file && !vma->anon_vma)) {
2852 pgoff = addr >> PAGE_SHIFT;
2853 faulted_in_anon_vma = false;
2856 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
2857 return NULL; /* should never get here */
2858 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2859 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2860 if (new_vma) {
2862 * Source vma may have been merged into new_vma
2864 if (unlikely(vma_start >= new_vma->vm_start &&
2865 vma_start < new_vma->vm_end)) {
2867 * The only way we can get a vma_merge with
2868 * self during an mremap is if the vma hasn't
2869 * been faulted in yet and we were allowed to
2870 * reset the dst vma->vm_pgoff to the
2871 * destination address of the mremap to allow
2872 * the merge to happen. mremap must change the
2873 * vm_pgoff linearity between src and dst vmas
2874 * (in turn preventing a vma_merge) to be
2875 * safe. It is only safe to keep the vm_pgoff
2876 * linear if there are no pages mapped yet.
2878 VM_BUG_ON(faulted_in_anon_vma);
2879 *vmap = vma = new_vma;
2881 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
2882 } else {
2883 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2884 if (new_vma) {
2885 *new_vma = *vma;
2886 new_vma->vm_start = addr;
2887 new_vma->vm_end = addr + len;
2888 new_vma->vm_pgoff = pgoff;
2889 if (vma_dup_policy(vma, new_vma))
2890 goto out_free_vma;
2891 INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2892 if (anon_vma_clone(new_vma, vma))
2893 goto out_free_mempol;
2894 if (new_vma->vm_file)
2895 get_file(new_vma->vm_file);
2896 if (new_vma->vm_ops && new_vma->vm_ops->open)
2897 new_vma->vm_ops->open(new_vma);
2898 vma_link(mm, new_vma, prev, rb_link, rb_parent);
2899 *need_rmap_locks = false;
2902 return new_vma;
2904 out_free_mempol:
2905 mpol_put(vma_policy(new_vma));
2906 out_free_vma:
2907 kmem_cache_free(vm_area_cachep, new_vma);
2908 return NULL;
2912 * Return true if the calling process may expand its vm space by the passed
2913 * number of pages
2915 int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2917 unsigned long cur = mm->total_vm; /* pages */
2918 unsigned long lim;
2920 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
2922 if (cur + npages > lim)
2923 return 0;
2924 return 1;
2928 static int special_mapping_fault(struct vm_area_struct *vma,
2929 struct vm_fault *vmf)
2931 pgoff_t pgoff;
2932 struct page **pages;
2935 * special mappings have no vm_file, and in that case, the mm
2936 * uses vm_pgoff internally. So we have to subtract it from here.
2937 * We are allowed to do this because we are the mm; do not copy
2938 * this code into drivers!
2940 pgoff = vmf->pgoff - vma->vm_pgoff;
2942 for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2943 pgoff--;
2945 if (*pages) {
2946 struct page *page = *pages;
2947 get_page(page);
2948 vmf->page = page;
2949 return 0;
2952 return VM_FAULT_SIGBUS;
2956 * Having a close hook prevents vma merging regardless of flags.
2958 static void special_mapping_close(struct vm_area_struct *vma)
2962 static const struct vm_operations_struct special_mapping_vmops = {
2963 .close = special_mapping_close,
2964 .fault = special_mapping_fault,
2968 * Called with mm->mmap_sem held for writing.
2969 * Insert a new vma covering the given region, with the given flags.
2970 * Its pages are supplied by the given array of struct page *.
2971 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2972 * The region past the last page supplied will always produce SIGBUS.
2973 * The array pointer and the pages it points to are assumed to stay alive
2974 * for as long as this mapping might exist.
2976 int install_special_mapping(struct mm_struct *mm,
2977 unsigned long addr, unsigned long len,
2978 unsigned long vm_flags, struct page **pages)
2980 int ret;
2981 struct vm_area_struct *vma;
2983 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2984 if (unlikely(vma == NULL))
2985 return -ENOMEM;
2987 INIT_LIST_HEAD(&vma->anon_vma_chain);
2988 vma->vm_mm = mm;
2989 vma->vm_start = addr;
2990 vma->vm_end = addr + len;
2992 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
2993 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2995 vma->vm_ops = &special_mapping_vmops;
2996 vma->vm_private_data = pages;
2998 ret = insert_vm_struct(mm, vma);
2999 if (ret)
3000 goto out;
3002 mm->total_vm += len >> PAGE_SHIFT;
3004 perf_event_mmap(vma);
3006 #if defined(CONFIG_E2K) && defined(CONFIG_MAKE_ALL_PAGES_VALID)
3007 if (vm_flags & VM_PAGESVALID) {
3008 int ret = make_vma_pages_valid(vma, addr, addr + len);
3009 if (ret)
3010 return ret;
3012 #endif
3014 return 0;
3016 out:
3017 kmem_cache_free(vm_area_cachep, vma);
3018 return ret;
3021 static DEFINE_MUTEX(mm_all_locks_mutex);
3023 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3025 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
3027 * The LSB of head.next can't change from under us
3028 * because we hold the mm_all_locks_mutex.
3030 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
3032 * We can safely modify head.next after taking the
3033 * anon_vma->root->rwsem. If some other vma in this mm shares
3034 * the same anon_vma we won't take it again.
3036 * No need of atomic instructions here, head.next
3037 * can't change from under us thanks to the
3038 * anon_vma->root->rwsem.
3040 if (__test_and_set_bit(0, (unsigned long *)
3041 &anon_vma->root->rb_root.rb_node))
3042 BUG();
3046 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3048 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3050 * AS_MM_ALL_LOCKS can't change from under us because
3051 * we hold the mm_all_locks_mutex.
3053 * Operations on ->flags have to be atomic because
3054 * even if AS_MM_ALL_LOCKS is stable thanks to the
3055 * mm_all_locks_mutex, there may be other cpus
3056 * changing other bitflags in parallel to us.
3058 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3059 BUG();
3060 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
3065 * This operation locks against the VM for all pte/vma/mm related
3066 * operations that could ever happen on a certain mm. This includes
3067 * vmtruncate, try_to_unmap, and all page faults.
3069 * The caller must take the mmap_sem in write mode before calling
3070 * mm_take_all_locks(). The caller isn't allowed to release the
3071 * mmap_sem until mm_drop_all_locks() returns.
3073 * mmap_sem in write mode is required in order to block all operations
3074 * that could modify pagetables and free pages without need of
3075 * altering the vma layout (for example populate_range() with
3076 * nonlinear vmas). It's also needed in write mode to avoid new
3077 * anon_vmas to be associated with existing vmas.
3079 * A single task can't take more than one mm_take_all_locks() in a row
3080 * or it would deadlock.
3082 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3083 * mapping->flags avoid to take the same lock twice, if more than one
3084 * vma in this mm is backed by the same anon_vma or address_space.
3086 * We can take all the locks in random order because the VM code
3087 * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
3088 * takes more than one of them in a row. Secondly we're protected
3089 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
3091 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3092 * that may have to take thousand of locks.
3094 * mm_take_all_locks() can fail if it's interrupted by signals.
3096 int mm_take_all_locks(struct mm_struct *mm)
3098 struct vm_area_struct *vma;
3099 struct anon_vma_chain *avc;
3101 BUG_ON(down_read_trylock(&mm->mmap_sem));
3103 mutex_lock(&mm_all_locks_mutex);
3105 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3106 if (signal_pending(current))
3107 goto out_unlock;
3108 if (vma->vm_file && vma->vm_file->f_mapping)
3109 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3112 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3113 if (signal_pending(current))
3114 goto out_unlock;
3115 if (vma->anon_vma)
3116 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3117 vm_lock_anon_vma(mm, avc->anon_vma);
3120 return 0;
3122 out_unlock:
3123 mm_drop_all_locks(mm);
3124 return -EINTR;
3127 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3129 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
3131 * The LSB of head.next can't change to 0 from under
3132 * us because we hold the mm_all_locks_mutex.
3134 * We must however clear the bitflag before unlocking
3135 * the vma so the users using the anon_vma->rb_root will
3136 * never see our bitflag.
3138 * No need of atomic instructions here, head.next
3139 * can't change from under us until we release the
3140 * anon_vma->root->rwsem.
3142 if (!__test_and_clear_bit(0, (unsigned long *)
3143 &anon_vma->root->rb_root.rb_node))
3144 BUG();
3145 anon_vma_unlock_write(anon_vma);
3149 static void vm_unlock_mapping(struct address_space *mapping)
3151 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3153 * AS_MM_ALL_LOCKS can't change to 0 from under us
3154 * because we hold the mm_all_locks_mutex.
3156 mutex_unlock(&mapping->i_mmap_mutex);
3157 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3158 &mapping->flags))
3159 BUG();
3164 * The mmap_sem cannot be released by the caller until
3165 * mm_drop_all_locks() returns.
3167 void mm_drop_all_locks(struct mm_struct *mm)
3169 struct vm_area_struct *vma;
3170 struct anon_vma_chain *avc;
3172 BUG_ON(down_read_trylock(&mm->mmap_sem));
3173 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3175 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3176 if (vma->anon_vma)
3177 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3178 vm_unlock_anon_vma(avc->anon_vma);
3179 if (vma->vm_file && vma->vm_file->f_mapping)
3180 vm_unlock_mapping(vma->vm_file->f_mapping);
3183 mutex_unlock(&mm_all_locks_mutex);
3187 * initialise the VMA slab
3189 void __init mmap_init(void)
3191 int ret;
3193 ret = percpu_counter_init(&vm_committed_as, 0);
3194 VM_BUG_ON(ret);
3198 * Initialise sysctl_user_reserve_kbytes.
3200 * This is intended to prevent a user from starting a single memory hogging
3201 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3202 * mode.
3204 * The default value is min(3% of free memory, 128MB)
3205 * 128MB is enough to recover with sshd/login, bash, and top/kill.
3207 static int init_user_reserve(void)
3209 unsigned long free_kbytes;
3211 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3213 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3214 return 0;
3216 subsys_initcall(init_user_reserve);
3219 * Initialise sysctl_admin_reserve_kbytes.
3221 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3222 * to log in and kill a memory hogging process.
3224 * Systems with more than 256MB will reserve 8MB, enough to recover
3225 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3226 * only reserve 3% of free pages by default.
3228 static int init_admin_reserve(void)
3230 unsigned long free_kbytes;
3232 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3234 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3235 return 0;
3237 subsys_initcall(init_admin_reserve);
3240 * Reinititalise user and admin reserves if memory is added or removed.
3242 * The default user reserve max is 128MB, and the default max for the
3243 * admin reserve is 8MB. These are usually, but not always, enough to
3244 * enable recovery from a memory hogging process using login/sshd, a shell,
3245 * and tools like top. It may make sense to increase or even disable the
3246 * reserve depending on the existence of swap or variations in the recovery
3247 * tools. So, the admin may have changed them.
3249 * If memory is added and the reserves have been eliminated or increased above
3250 * the default max, then we'll trust the admin.
3252 * If memory is removed and there isn't enough free memory, then we
3253 * need to reset the reserves.
3255 * Otherwise keep the reserve set by the admin.
3257 static int reserve_mem_notifier(struct notifier_block *nb,
3258 unsigned long action, void *data)
3260 unsigned long tmp, free_kbytes;
3262 switch (action) {
3263 case MEM_ONLINE:
3264 /* Default max is 128MB. Leave alone if modified by operator. */
3265 tmp = sysctl_user_reserve_kbytes;
3266 if (0 < tmp && tmp < (1UL << 17))
3267 init_user_reserve();
3269 /* Default max is 8MB. Leave alone if modified by operator. */
3270 tmp = sysctl_admin_reserve_kbytes;
3271 if (0 < tmp && tmp < (1UL << 13))
3272 init_admin_reserve();
3274 break;
3275 case MEM_OFFLINE:
3276 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3278 if (sysctl_user_reserve_kbytes > free_kbytes) {
3279 init_user_reserve();
3280 pr_info("vm.user_reserve_kbytes reset to %lu\n",
3281 sysctl_user_reserve_kbytes);
3284 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3285 init_admin_reserve();
3286 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3287 sysctl_admin_reserve_kbytes);
3289 break;
3290 default:
3291 break;
3293 return NOTIFY_OK;
3296 static struct notifier_block reserve_mem_nb = {
3297 .notifier_call = reserve_mem_notifier,
3300 static int __meminit init_reserve_notifier(void)
3302 if (register_hotmemory_notifier(&reserve_mem_nb))
3303 printk("Failed registering memory add/remove notifier for admin reserve");
3305 return 0;
3307 subsys_initcall(init_reserve_notifier);