printf: Remove unused 'bprintf'
[drm/drm-misc.git] / mm / mmap.c
blob386429f7db5a0f334ce61d663e511f5ade92231b
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/mmap.c
5 * Written by obz.
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/swap.h>
21 #include <linux/syscalls.h>
22 #include <linux/capability.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/personality.h>
27 #include <linux/security.h>
28 #include <linux/hugetlb.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/profile.h>
31 #include <linux/export.h>
32 #include <linux/mount.h>
33 #include <linux/mempolicy.h>
34 #include <linux/rmap.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/mmdebug.h>
37 #include <linux/perf_event.h>
38 #include <linux/audit.h>
39 #include <linux/khugepaged.h>
40 #include <linux/uprobes.h>
41 #include <linux/notifier.h>
42 #include <linux/memory.h>
43 #include <linux/printk.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/moduleparam.h>
46 #include <linux/pkeys.h>
47 #include <linux/oom.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ksm.h>
51 #include <linux/uaccess.h>
52 #include <asm/cacheflush.h>
53 #include <asm/tlb.h>
54 #include <asm/mmu_context.h>
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/mmap.h>
59 #include "internal.h"
61 #ifndef arch_mmap_check
62 #define arch_mmap_check(addr, len, flags) (0)
63 #endif
65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
67 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX;
68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
69 #endif
70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74 #endif
76 static bool ignore_rlimit_data;
77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
79 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
80 void vma_set_page_prot(struct vm_area_struct *vma)
82 unsigned long vm_flags = vma->vm_flags;
83 pgprot_t vm_page_prot;
85 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
86 if (vma_wants_writenotify(vma, vm_page_prot)) {
87 vm_flags &= ~VM_SHARED;
88 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
90 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
91 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
95 * check_brk_limits() - Use platform specific check of range & verify mlock
96 * limits.
97 * @addr: The address to check
98 * @len: The size of increase.
100 * Return: 0 on success.
102 static int check_brk_limits(unsigned long addr, unsigned long len)
104 unsigned long mapped_addr;
106 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
107 if (IS_ERR_VALUE(mapped_addr))
108 return mapped_addr;
110 return mlock_future_ok(current->mm, current->mm->def_flags, len)
111 ? 0 : -EAGAIN;
113 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
114 unsigned long addr, unsigned long request, unsigned long flags);
115 SYSCALL_DEFINE1(brk, unsigned long, brk)
117 unsigned long newbrk, oldbrk, origbrk;
118 struct mm_struct *mm = current->mm;
119 struct vm_area_struct *brkvma, *next = NULL;
120 unsigned long min_brk;
121 bool populate = false;
122 LIST_HEAD(uf);
123 struct vma_iterator vmi;
125 if (mmap_write_lock_killable(mm))
126 return -EINTR;
128 origbrk = mm->brk;
130 #ifdef CONFIG_COMPAT_BRK
132 * CONFIG_COMPAT_BRK can still be overridden by setting
133 * randomize_va_space to 2, which will still cause mm->start_brk
134 * to be arbitrarily shifted
136 if (current->brk_randomized)
137 min_brk = mm->start_brk;
138 else
139 min_brk = mm->end_data;
140 #else
141 min_brk = mm->start_brk;
142 #endif
143 if (brk < min_brk)
144 goto out;
147 * Check against rlimit here. If this check is done later after the test
148 * of oldbrk with newbrk then it can escape the test and let the data
149 * segment grow beyond its set limit the in case where the limit is
150 * not page aligned -Ram Gupta
152 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
153 mm->end_data, mm->start_data))
154 goto out;
156 newbrk = PAGE_ALIGN(brk);
157 oldbrk = PAGE_ALIGN(mm->brk);
158 if (oldbrk == newbrk) {
159 mm->brk = brk;
160 goto success;
163 /* Always allow shrinking brk. */
164 if (brk <= mm->brk) {
165 /* Search one past newbrk */
166 vma_iter_init(&vmi, mm, newbrk);
167 brkvma = vma_find(&vmi, oldbrk);
168 if (!brkvma || brkvma->vm_start >= oldbrk)
169 goto out; /* mapping intersects with an existing non-brk vma. */
171 * mm->brk must be protected by write mmap_lock.
172 * do_vmi_align_munmap() will drop the lock on success, so
173 * update it before calling do_vma_munmap().
175 mm->brk = brk;
176 if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
177 /* unlock = */ true))
178 goto out;
180 goto success_unlocked;
183 if (check_brk_limits(oldbrk, newbrk - oldbrk))
184 goto out;
187 * Only check if the next VMA is within the stack_guard_gap of the
188 * expansion area
190 vma_iter_init(&vmi, mm, oldbrk);
191 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
192 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
193 goto out;
195 brkvma = vma_prev_limit(&vmi, mm->start_brk);
196 /* Ok, looks good - let it rip. */
197 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
198 goto out;
200 mm->brk = brk;
201 if (mm->def_flags & VM_LOCKED)
202 populate = true;
204 success:
205 mmap_write_unlock(mm);
206 success_unlocked:
207 userfaultfd_unmap_complete(mm, &uf);
208 if (populate)
209 mm_populate(oldbrk, newbrk - oldbrk);
210 return brk;
212 out:
213 mm->brk = origbrk;
214 mmap_write_unlock(mm);
215 return origbrk;
219 * If a hint addr is less than mmap_min_addr change hint to be as
220 * low as possible but still greater than mmap_min_addr
222 static inline unsigned long round_hint_to_min(unsigned long hint)
224 hint &= PAGE_MASK;
225 if (((void *)hint != NULL) &&
226 (hint < mmap_min_addr))
227 return PAGE_ALIGN(mmap_min_addr);
228 return hint;
231 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
232 unsigned long bytes)
234 unsigned long locked_pages, limit_pages;
236 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
237 return true;
239 locked_pages = bytes >> PAGE_SHIFT;
240 locked_pages += mm->locked_vm;
242 limit_pages = rlimit(RLIMIT_MEMLOCK);
243 limit_pages >>= PAGE_SHIFT;
245 return locked_pages <= limit_pages;
248 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
250 if (S_ISREG(inode->i_mode))
251 return MAX_LFS_FILESIZE;
253 if (S_ISBLK(inode->i_mode))
254 return MAX_LFS_FILESIZE;
256 if (S_ISSOCK(inode->i_mode))
257 return MAX_LFS_FILESIZE;
259 /* Special "we do even unsigned file positions" case */
260 if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)
261 return 0;
263 /* Yes, random drivers might want more. But I'm tired of buggy drivers */
264 return ULONG_MAX;
267 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
268 unsigned long pgoff, unsigned long len)
270 u64 maxsize = file_mmap_size_max(file, inode);
272 if (maxsize && len > maxsize)
273 return false;
274 maxsize -= len;
275 if (pgoff > maxsize >> PAGE_SHIFT)
276 return false;
277 return true;
281 * The caller must write-lock current->mm->mmap_lock.
283 unsigned long do_mmap(struct file *file, unsigned long addr,
284 unsigned long len, unsigned long prot,
285 unsigned long flags, vm_flags_t vm_flags,
286 unsigned long pgoff, unsigned long *populate,
287 struct list_head *uf)
289 struct mm_struct *mm = current->mm;
290 int pkey = 0;
292 *populate = 0;
294 if (!len)
295 return -EINVAL;
298 * Does the application expect PROT_READ to imply PROT_EXEC?
300 * (the exception is when the underlying filesystem is noexec
301 * mounted, in which case we don't add PROT_EXEC.)
303 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
304 if (!(file && path_noexec(&file->f_path)))
305 prot |= PROT_EXEC;
307 /* force arch specific MAP_FIXED handling in get_unmapped_area */
308 if (flags & MAP_FIXED_NOREPLACE)
309 flags |= MAP_FIXED;
311 if (!(flags & MAP_FIXED))
312 addr = round_hint_to_min(addr);
314 /* Careful about overflows.. */
315 len = PAGE_ALIGN(len);
316 if (!len)
317 return -ENOMEM;
319 /* offset overflow? */
320 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
321 return -EOVERFLOW;
323 /* Too many mappings? */
324 if (mm->map_count > sysctl_max_map_count)
325 return -ENOMEM;
328 * addr is returned from get_unmapped_area,
329 * There are two cases:
330 * 1> MAP_FIXED == false
331 * unallocated memory, no need to check sealing.
332 * 1> MAP_FIXED == true
333 * sealing is checked inside mmap_region when
334 * do_vmi_munmap is called.
337 if (prot == PROT_EXEC) {
338 pkey = execute_only_pkey(mm);
339 if (pkey < 0)
340 pkey = 0;
343 /* Do simple checking here so the lower-level routines won't have
344 * to. we assume access permissions have been handled by the open
345 * of the memory object, so we don't do any here.
347 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) |
348 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
350 /* Obtain the address to map to. we verify (or select) it and ensure
351 * that it represents a valid section of the address space.
353 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags);
354 if (IS_ERR_VALUE(addr))
355 return addr;
357 if (flags & MAP_FIXED_NOREPLACE) {
358 if (find_vma_intersection(mm, addr, addr + len))
359 return -EEXIST;
362 if (flags & MAP_LOCKED)
363 if (!can_do_mlock())
364 return -EPERM;
366 if (!mlock_future_ok(mm, vm_flags, len))
367 return -EAGAIN;
369 if (file) {
370 struct inode *inode = file_inode(file);
371 unsigned long flags_mask;
373 if (!file_mmap_ok(file, inode, pgoff, len))
374 return -EOVERFLOW;
376 flags_mask = LEGACY_MAP_MASK;
377 if (file->f_op->fop_flags & FOP_MMAP_SYNC)
378 flags_mask |= MAP_SYNC;
380 switch (flags & MAP_TYPE) {
381 case MAP_SHARED:
383 * Force use of MAP_SHARED_VALIDATE with non-legacy
384 * flags. E.g. MAP_SYNC is dangerous to use with
385 * MAP_SHARED as you don't know which consistency model
386 * you will get. We silently ignore unsupported flags
387 * with MAP_SHARED to preserve backward compatibility.
389 flags &= LEGACY_MAP_MASK;
390 fallthrough;
391 case MAP_SHARED_VALIDATE:
392 if (flags & ~flags_mask)
393 return -EOPNOTSUPP;
394 if (prot & PROT_WRITE) {
395 if (!(file->f_mode & FMODE_WRITE))
396 return -EACCES;
397 if (IS_SWAPFILE(file->f_mapping->host))
398 return -ETXTBSY;
402 * Make sure we don't allow writing to an append-only
403 * file..
405 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
406 return -EACCES;
408 vm_flags |= VM_SHARED | VM_MAYSHARE;
409 if (!(file->f_mode & FMODE_WRITE))
410 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
411 fallthrough;
412 case MAP_PRIVATE:
413 if (!(file->f_mode & FMODE_READ))
414 return -EACCES;
415 if (path_noexec(&file->f_path)) {
416 if (vm_flags & VM_EXEC)
417 return -EPERM;
418 vm_flags &= ~VM_MAYEXEC;
421 if (!file->f_op->mmap)
422 return -ENODEV;
423 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
424 return -EINVAL;
425 break;
427 default:
428 return -EINVAL;
430 } else {
431 switch (flags & MAP_TYPE) {
432 case MAP_SHARED:
433 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
434 return -EINVAL;
436 * Ignore pgoff.
438 pgoff = 0;
439 vm_flags |= VM_SHARED | VM_MAYSHARE;
440 break;
441 case MAP_DROPPABLE:
442 if (VM_DROPPABLE == VM_NONE)
443 return -ENOTSUPP;
445 * A locked or stack area makes no sense to be droppable.
447 * Also, since droppable pages can just go away at any time
448 * it makes no sense to copy them on fork or dump them.
450 * And don't attempt to combine with hugetlb for now.
452 if (flags & (MAP_LOCKED | MAP_HUGETLB))
453 return -EINVAL;
454 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
455 return -EINVAL;
457 vm_flags |= VM_DROPPABLE;
460 * If the pages can be dropped, then it doesn't make
461 * sense to reserve them.
463 vm_flags |= VM_NORESERVE;
466 * Likewise, they're volatile enough that they
467 * shouldn't survive forks or coredumps.
469 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP;
470 fallthrough;
471 case MAP_PRIVATE:
473 * Set pgoff according to addr for anon_vma.
475 pgoff = addr >> PAGE_SHIFT;
476 break;
477 default:
478 return -EINVAL;
483 * Set 'VM_NORESERVE' if we should not account for the
484 * memory use of this mapping.
486 if (flags & MAP_NORESERVE) {
487 /* We honor MAP_NORESERVE if allowed to overcommit */
488 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
489 vm_flags |= VM_NORESERVE;
491 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
492 if (file && is_file_hugepages(file))
493 vm_flags |= VM_NORESERVE;
496 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
497 if (!IS_ERR_VALUE(addr) &&
498 ((vm_flags & VM_LOCKED) ||
499 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
500 *populate = len;
501 return addr;
504 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
505 unsigned long prot, unsigned long flags,
506 unsigned long fd, unsigned long pgoff)
508 struct file *file = NULL;
509 unsigned long retval;
511 if (!(flags & MAP_ANONYMOUS)) {
512 audit_mmap_fd(fd, flags);
513 file = fget(fd);
514 if (!file)
515 return -EBADF;
516 if (is_file_hugepages(file)) {
517 len = ALIGN(len, huge_page_size(hstate_file(file)));
518 } else if (unlikely(flags & MAP_HUGETLB)) {
519 retval = -EINVAL;
520 goto out_fput;
522 } else if (flags & MAP_HUGETLB) {
523 struct hstate *hs;
525 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
526 if (!hs)
527 return -EINVAL;
529 len = ALIGN(len, huge_page_size(hs));
531 * VM_NORESERVE is used because the reservations will be
532 * taken when vm_ops->mmap() is called
534 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
535 VM_NORESERVE,
536 HUGETLB_ANONHUGE_INODE,
537 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
538 if (IS_ERR(file))
539 return PTR_ERR(file);
542 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
543 out_fput:
544 if (file)
545 fput(file);
546 return retval;
549 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
550 unsigned long, prot, unsigned long, flags,
551 unsigned long, fd, unsigned long, pgoff)
553 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
556 #ifdef __ARCH_WANT_SYS_OLD_MMAP
557 struct mmap_arg_struct {
558 unsigned long addr;
559 unsigned long len;
560 unsigned long prot;
561 unsigned long flags;
562 unsigned long fd;
563 unsigned long offset;
566 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
568 struct mmap_arg_struct a;
570 if (copy_from_user(&a, arg, sizeof(a)))
571 return -EFAULT;
572 if (offset_in_page(a.offset))
573 return -EINVAL;
575 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
576 a.offset >> PAGE_SHIFT);
578 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
581 * unmapped_area() - Find an area between the low_limit and the high_limit with
582 * the correct alignment and offset, all from @info. Note: current->mm is used
583 * for the search.
585 * @info: The unmapped area information including the range [low_limit -
586 * high_limit), the alignment offset and mask.
588 * Return: A memory address or -ENOMEM.
590 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
592 unsigned long length, gap;
593 unsigned long low_limit, high_limit;
594 struct vm_area_struct *tmp;
595 VMA_ITERATOR(vmi, current->mm, 0);
597 /* Adjust search length to account for worst case alignment overhead */
598 length = info->length + info->align_mask + info->start_gap;
599 if (length < info->length)
600 return -ENOMEM;
602 low_limit = info->low_limit;
603 if (low_limit < mmap_min_addr)
604 low_limit = mmap_min_addr;
605 high_limit = info->high_limit;
606 retry:
607 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
608 return -ENOMEM;
611 * Adjust for the gap first so it doesn't interfere with the
612 * later alignment. The first step is the minimum needed to
613 * fulill the start gap, the next steps is the minimum to align
614 * that. It is the minimum needed to fulill both.
616 gap = vma_iter_addr(&vmi) + info->start_gap;
617 gap += (info->align_offset - gap) & info->align_mask;
618 tmp = vma_next(&vmi);
619 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
620 if (vm_start_gap(tmp) < gap + length - 1) {
621 low_limit = tmp->vm_end;
622 vma_iter_reset(&vmi);
623 goto retry;
625 } else {
626 tmp = vma_prev(&vmi);
627 if (tmp && vm_end_gap(tmp) > gap) {
628 low_limit = vm_end_gap(tmp);
629 vma_iter_reset(&vmi);
630 goto retry;
634 return gap;
638 * unmapped_area_topdown() - Find an area between the low_limit and the
639 * high_limit with the correct alignment and offset at the highest available
640 * address, all from @info. Note: current->mm is used for the search.
642 * @info: The unmapped area information including the range [low_limit -
643 * high_limit), the alignment offset and mask.
645 * Return: A memory address or -ENOMEM.
647 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
649 unsigned long length, gap, gap_end;
650 unsigned long low_limit, high_limit;
651 struct vm_area_struct *tmp;
652 VMA_ITERATOR(vmi, current->mm, 0);
654 /* Adjust search length to account for worst case alignment overhead */
655 length = info->length + info->align_mask + info->start_gap;
656 if (length < info->length)
657 return -ENOMEM;
659 low_limit = info->low_limit;
660 if (low_limit < mmap_min_addr)
661 low_limit = mmap_min_addr;
662 high_limit = info->high_limit;
663 retry:
664 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
665 return -ENOMEM;
667 gap = vma_iter_end(&vmi) - info->length;
668 gap -= (gap - info->align_offset) & info->align_mask;
669 gap_end = vma_iter_end(&vmi);
670 tmp = vma_next(&vmi);
671 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
672 if (vm_start_gap(tmp) < gap_end) {
673 high_limit = vm_start_gap(tmp);
674 vma_iter_reset(&vmi);
675 goto retry;
677 } else {
678 tmp = vma_prev(&vmi);
679 if (tmp && vm_end_gap(tmp) > gap) {
680 high_limit = tmp->vm_start;
681 vma_iter_reset(&vmi);
682 goto retry;
686 return gap;
690 * Determine if the allocation needs to ensure that there is no
691 * existing mapping within it's guard gaps, for use as start_gap.
693 static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
695 if (vm_flags & VM_SHADOW_STACK)
696 return PAGE_SIZE;
698 return 0;
702 * Search for an unmapped address range.
704 * We are looking for a range that:
705 * - does not intersect with any VMA;
706 * - is contained within the [low_limit, high_limit) interval;
707 * - is at least the desired size.
708 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
710 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
712 unsigned long addr;
714 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
715 addr = unmapped_area_topdown(info);
716 else
717 addr = unmapped_area(info);
719 trace_vm_unmapped_area(addr, info);
720 return addr;
723 /* Get an address range which is currently unmapped.
724 * For shmat() with addr=0.
726 * Ugly calling convention alert:
727 * Return value with the low bits set means error value,
728 * ie
729 * if (ret & ~PAGE_MASK)
730 * error = ret;
732 * This function "knows" that -ENOMEM has the bits set.
734 unsigned long
735 generic_get_unmapped_area(struct file *filp, unsigned long addr,
736 unsigned long len, unsigned long pgoff,
737 unsigned long flags, vm_flags_t vm_flags)
739 struct mm_struct *mm = current->mm;
740 struct vm_area_struct *vma, *prev;
741 struct vm_unmapped_area_info info = {};
742 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
744 if (len > mmap_end - mmap_min_addr)
745 return -ENOMEM;
747 if (flags & MAP_FIXED)
748 return addr;
750 if (addr) {
751 addr = PAGE_ALIGN(addr);
752 vma = find_vma_prev(mm, addr, &prev);
753 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
754 (!vma || addr + len <= vm_start_gap(vma)) &&
755 (!prev || addr >= vm_end_gap(prev)))
756 return addr;
759 info.length = len;
760 info.low_limit = mm->mmap_base;
761 info.high_limit = mmap_end;
762 info.start_gap = stack_guard_placement(vm_flags);
763 if (filp && is_file_hugepages(filp))
764 info.align_mask = huge_page_mask_align(filp);
765 return vm_unmapped_area(&info);
768 #ifndef HAVE_ARCH_UNMAPPED_AREA
769 unsigned long
770 arch_get_unmapped_area(struct file *filp, unsigned long addr,
771 unsigned long len, unsigned long pgoff,
772 unsigned long flags, vm_flags_t vm_flags)
774 return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
775 vm_flags);
777 #endif
780 * This mmap-allocator allocates new areas top-down from below the
781 * stack's low limit (the base):
783 unsigned long
784 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
785 unsigned long len, unsigned long pgoff,
786 unsigned long flags, vm_flags_t vm_flags)
788 struct vm_area_struct *vma, *prev;
789 struct mm_struct *mm = current->mm;
790 struct vm_unmapped_area_info info = {};
791 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
793 /* requested length too big for entire address space */
794 if (len > mmap_end - mmap_min_addr)
795 return -ENOMEM;
797 if (flags & MAP_FIXED)
798 return addr;
800 /* requesting a specific address */
801 if (addr) {
802 addr = PAGE_ALIGN(addr);
803 vma = find_vma_prev(mm, addr, &prev);
804 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
805 (!vma || addr + len <= vm_start_gap(vma)) &&
806 (!prev || addr >= vm_end_gap(prev)))
807 return addr;
810 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
811 info.length = len;
812 info.low_limit = PAGE_SIZE;
813 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
814 info.start_gap = stack_guard_placement(vm_flags);
815 if (filp && is_file_hugepages(filp))
816 info.align_mask = huge_page_mask_align(filp);
817 addr = vm_unmapped_area(&info);
820 * A failed mmap() very likely causes application failure,
821 * so fall back to the bottom-up function here. This scenario
822 * can happen with large stack limits and large mmap()
823 * allocations.
825 if (offset_in_page(addr)) {
826 VM_BUG_ON(addr != -ENOMEM);
827 info.flags = 0;
828 info.low_limit = TASK_UNMAPPED_BASE;
829 info.high_limit = mmap_end;
830 addr = vm_unmapped_area(&info);
833 return addr;
836 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
837 unsigned long
838 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
839 unsigned long len, unsigned long pgoff,
840 unsigned long flags, vm_flags_t vm_flags)
842 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
843 vm_flags);
845 #endif
847 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp,
848 unsigned long addr, unsigned long len,
849 unsigned long pgoff, unsigned long flags,
850 vm_flags_t vm_flags)
852 if (test_bit(MMF_TOPDOWN, &mm->flags))
853 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
854 flags, vm_flags);
855 return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
858 unsigned long
859 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
860 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
862 unsigned long (*get_area)(struct file *, unsigned long,
863 unsigned long, unsigned long, unsigned long)
864 = NULL;
866 unsigned long error = arch_mmap_check(addr, len, flags);
867 if (error)
868 return error;
870 /* Careful about overflows.. */
871 if (len > TASK_SIZE)
872 return -ENOMEM;
874 if (file) {
875 if (file->f_op->get_unmapped_area)
876 get_area = file->f_op->get_unmapped_area;
877 } else if (flags & MAP_SHARED) {
879 * mmap_region() will call shmem_zero_setup() to create a file,
880 * so use shmem's get_unmapped_area in case it can be huge.
882 get_area = shmem_get_unmapped_area;
885 /* Always treat pgoff as zero for anonymous memory. */
886 if (!file)
887 pgoff = 0;
889 if (get_area) {
890 addr = get_area(file, addr, len, pgoff, flags);
891 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)
892 && IS_ALIGNED(len, PMD_SIZE)) {
893 /* Ensures that larger anonymous mappings are THP aligned. */
894 addr = thp_get_unmapped_area_vmflags(file, addr, len,
895 pgoff, flags, vm_flags);
896 } else {
897 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
898 pgoff, flags, vm_flags);
900 if (IS_ERR_VALUE(addr))
901 return addr;
903 if (addr > TASK_SIZE - len)
904 return -ENOMEM;
905 if (offset_in_page(addr))
906 return -EINVAL;
908 error = security_mmap_addr(addr);
909 return error ? error : addr;
912 unsigned long
913 mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
914 unsigned long addr, unsigned long len,
915 unsigned long pgoff, unsigned long flags)
917 if (test_bit(MMF_TOPDOWN, &mm->flags))
918 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0);
919 return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
921 EXPORT_SYMBOL(mm_get_unmapped_area);
924 * find_vma_intersection() - Look up the first VMA which intersects the interval
925 * @mm: The process address space.
926 * @start_addr: The inclusive start user address.
927 * @end_addr: The exclusive end user address.
929 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
930 * start_addr < end_addr.
932 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
933 unsigned long start_addr,
934 unsigned long end_addr)
936 unsigned long index = start_addr;
938 mmap_assert_locked(mm);
939 return mt_find(&mm->mm_mt, &index, end_addr - 1);
941 EXPORT_SYMBOL(find_vma_intersection);
944 * find_vma() - Find the VMA for a given address, or the next VMA.
945 * @mm: The mm_struct to check
946 * @addr: The address
948 * Returns: The VMA associated with addr, or the next VMA.
949 * May return %NULL in the case of no VMA at addr or above.
951 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
953 unsigned long index = addr;
955 mmap_assert_locked(mm);
956 return mt_find(&mm->mm_mt, &index, ULONG_MAX);
958 EXPORT_SYMBOL(find_vma);
961 * find_vma_prev() - Find the VMA for a given address, or the next vma and
962 * set %pprev to the previous VMA, if any.
963 * @mm: The mm_struct to check
964 * @addr: The address
965 * @pprev: The pointer to set to the previous VMA
967 * Note that RCU lock is missing here since the external mmap_lock() is used
968 * instead.
970 * Returns: The VMA associated with @addr, or the next vma.
971 * May return %NULL in the case of no vma at addr or above.
973 struct vm_area_struct *
974 find_vma_prev(struct mm_struct *mm, unsigned long addr,
975 struct vm_area_struct **pprev)
977 struct vm_area_struct *vma;
978 VMA_ITERATOR(vmi, mm, addr);
980 vma = vma_iter_load(&vmi);
981 *pprev = vma_prev(&vmi);
982 if (!vma)
983 vma = vma_next(&vmi);
984 return vma;
988 * Verify that the stack growth is acceptable and
989 * update accounting. This is shared with both the
990 * grow-up and grow-down cases.
992 static int acct_stack_growth(struct vm_area_struct *vma,
993 unsigned long size, unsigned long grow)
995 struct mm_struct *mm = vma->vm_mm;
996 unsigned long new_start;
998 /* address space limit tests */
999 if (!may_expand_vm(mm, vma->vm_flags, grow))
1000 return -ENOMEM;
1002 /* Stack limit test */
1003 if (size > rlimit(RLIMIT_STACK))
1004 return -ENOMEM;
1006 /* mlock limit tests */
1007 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1008 return -ENOMEM;
1010 /* Check to ensure the stack will not grow into a hugetlb-only region */
1011 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1012 vma->vm_end - size;
1013 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1014 return -EFAULT;
1017 * Overcommit.. This must be the final test, as it will
1018 * update security statistics.
1020 if (security_vm_enough_memory_mm(mm, grow))
1021 return -ENOMEM;
1023 return 0;
1026 #if defined(CONFIG_STACK_GROWSUP)
1028 * PA-RISC uses this for its stack.
1029 * vma is the last one with address > vma->vm_end. Have to extend vma.
1031 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1033 struct mm_struct *mm = vma->vm_mm;
1034 struct vm_area_struct *next;
1035 unsigned long gap_addr;
1036 int error = 0;
1037 VMA_ITERATOR(vmi, mm, vma->vm_start);
1039 if (!(vma->vm_flags & VM_GROWSUP))
1040 return -EFAULT;
1042 mmap_assert_write_locked(mm);
1044 /* Guard against exceeding limits of the address space. */
1045 address &= PAGE_MASK;
1046 if (address >= (TASK_SIZE & PAGE_MASK))
1047 return -ENOMEM;
1048 address += PAGE_SIZE;
1050 /* Enforce stack_guard_gap */
1051 gap_addr = address + stack_guard_gap;
1053 /* Guard against overflow */
1054 if (gap_addr < address || gap_addr > TASK_SIZE)
1055 gap_addr = TASK_SIZE;
1057 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1058 if (next && vma_is_accessible(next)) {
1059 if (!(next->vm_flags & VM_GROWSUP))
1060 return -ENOMEM;
1061 /* Check that both stack segments have the same anon_vma? */
1064 if (next)
1065 vma_iter_prev_range_limit(&vmi, address);
1067 vma_iter_config(&vmi, vma->vm_start, address);
1068 if (vma_iter_prealloc(&vmi, vma))
1069 return -ENOMEM;
1071 /* We must make sure the anon_vma is allocated. */
1072 if (unlikely(anon_vma_prepare(vma))) {
1073 vma_iter_free(&vmi);
1074 return -ENOMEM;
1077 /* Lock the VMA before expanding to prevent concurrent page faults */
1078 vma_start_write(vma);
1079 /* We update the anon VMA tree. */
1080 anon_vma_lock_write(vma->anon_vma);
1082 /* Somebody else might have raced and expanded it already */
1083 if (address > vma->vm_end) {
1084 unsigned long size, grow;
1086 size = address - vma->vm_start;
1087 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1089 error = -ENOMEM;
1090 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1091 error = acct_stack_growth(vma, size, grow);
1092 if (!error) {
1093 if (vma->vm_flags & VM_LOCKED)
1094 mm->locked_vm += grow;
1095 vm_stat_account(mm, vma->vm_flags, grow);
1096 anon_vma_interval_tree_pre_update_vma(vma);
1097 vma->vm_end = address;
1098 /* Overwrite old entry in mtree. */
1099 vma_iter_store(&vmi, vma);
1100 anon_vma_interval_tree_post_update_vma(vma);
1102 perf_event_mmap(vma);
1106 anon_vma_unlock_write(vma->anon_vma);
1107 vma_iter_free(&vmi);
1108 validate_mm(mm);
1109 return error;
1111 #endif /* CONFIG_STACK_GROWSUP */
1114 * vma is the first one with address < vma->vm_start. Have to extend vma.
1115 * mmap_lock held for writing.
1117 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
1119 struct mm_struct *mm = vma->vm_mm;
1120 struct vm_area_struct *prev;
1121 int error = 0;
1122 VMA_ITERATOR(vmi, mm, vma->vm_start);
1124 if (!(vma->vm_flags & VM_GROWSDOWN))
1125 return -EFAULT;
1127 mmap_assert_write_locked(mm);
1129 address &= PAGE_MASK;
1130 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
1131 return -EPERM;
1133 /* Enforce stack_guard_gap */
1134 prev = vma_prev(&vmi);
1135 /* Check that both stack segments have the same anon_vma? */
1136 if (prev) {
1137 if (!(prev->vm_flags & VM_GROWSDOWN) &&
1138 vma_is_accessible(prev) &&
1139 (address - prev->vm_end < stack_guard_gap))
1140 return -ENOMEM;
1143 if (prev)
1144 vma_iter_next_range_limit(&vmi, vma->vm_start);
1146 vma_iter_config(&vmi, address, vma->vm_end);
1147 if (vma_iter_prealloc(&vmi, vma))
1148 return -ENOMEM;
1150 /* We must make sure the anon_vma is allocated. */
1151 if (unlikely(anon_vma_prepare(vma))) {
1152 vma_iter_free(&vmi);
1153 return -ENOMEM;
1156 /* Lock the VMA before expanding to prevent concurrent page faults */
1157 vma_start_write(vma);
1158 /* We update the anon VMA tree. */
1159 anon_vma_lock_write(vma->anon_vma);
1161 /* Somebody else might have raced and expanded it already */
1162 if (address < vma->vm_start) {
1163 unsigned long size, grow;
1165 size = vma->vm_end - address;
1166 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1168 error = -ENOMEM;
1169 if (grow <= vma->vm_pgoff) {
1170 error = acct_stack_growth(vma, size, grow);
1171 if (!error) {
1172 if (vma->vm_flags & VM_LOCKED)
1173 mm->locked_vm += grow;
1174 vm_stat_account(mm, vma->vm_flags, grow);
1175 anon_vma_interval_tree_pre_update_vma(vma);
1176 vma->vm_start = address;
1177 vma->vm_pgoff -= grow;
1178 /* Overwrite old entry in mtree. */
1179 vma_iter_store(&vmi, vma);
1180 anon_vma_interval_tree_post_update_vma(vma);
1182 perf_event_mmap(vma);
1186 anon_vma_unlock_write(vma->anon_vma);
1187 vma_iter_free(&vmi);
1188 validate_mm(mm);
1189 return error;
1192 /* enforced gap between the expanding stack and other mappings. */
1193 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
1195 static int __init cmdline_parse_stack_guard_gap(char *p)
1197 unsigned long val;
1198 char *endptr;
1200 val = simple_strtoul(p, &endptr, 10);
1201 if (!*endptr)
1202 stack_guard_gap = val << PAGE_SHIFT;
1204 return 1;
1206 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
1208 #ifdef CONFIG_STACK_GROWSUP
1209 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1211 return expand_upwards(vma, address);
1214 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1216 struct vm_area_struct *vma, *prev;
1218 addr &= PAGE_MASK;
1219 vma = find_vma_prev(mm, addr, &prev);
1220 if (vma && (vma->vm_start <= addr))
1221 return vma;
1222 if (!prev)
1223 return NULL;
1224 if (expand_stack_locked(prev, addr))
1225 return NULL;
1226 if (prev->vm_flags & VM_LOCKED)
1227 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
1228 return prev;
1230 #else
1231 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1233 return expand_downwards(vma, address);
1236 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1238 struct vm_area_struct *vma;
1239 unsigned long start;
1241 addr &= PAGE_MASK;
1242 vma = find_vma(mm, addr);
1243 if (!vma)
1244 return NULL;
1245 if (vma->vm_start <= addr)
1246 return vma;
1247 start = vma->vm_start;
1248 if (expand_stack_locked(vma, addr))
1249 return NULL;
1250 if (vma->vm_flags & VM_LOCKED)
1251 populate_vma_page_range(vma, addr, start, NULL);
1252 return vma;
1254 #endif
1256 #if defined(CONFIG_STACK_GROWSUP)
1258 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
1259 #define vma_expand_down(vma, addr) (-EFAULT)
1261 #else
1263 #define vma_expand_up(vma,addr) (-EFAULT)
1264 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
1266 #endif
1269 * expand_stack(): legacy interface for page faulting. Don't use unless
1270 * you have to.
1272 * This is called with the mm locked for reading, drops the lock, takes
1273 * the lock for writing, tries to look up a vma again, expands it if
1274 * necessary, and downgrades the lock to reading again.
1276 * If no vma is found or it can't be expanded, it returns NULL and has
1277 * dropped the lock.
1279 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
1281 struct vm_area_struct *vma, *prev;
1283 mmap_read_unlock(mm);
1284 if (mmap_write_lock_killable(mm))
1285 return NULL;
1287 vma = find_vma_prev(mm, addr, &prev);
1288 if (vma && vma->vm_start <= addr)
1289 goto success;
1291 if (prev && !vma_expand_up(prev, addr)) {
1292 vma = prev;
1293 goto success;
1296 if (vma && !vma_expand_down(vma, addr))
1297 goto success;
1299 mmap_write_unlock(mm);
1300 return NULL;
1302 success:
1303 mmap_write_downgrade(mm);
1304 return vma;
1307 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
1308 * @mm: The mm_struct
1309 * @start: The start address to munmap
1310 * @len: The length to be munmapped.
1311 * @uf: The userfaultfd list_head
1313 * Return: 0 on success, error otherwise.
1315 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
1316 struct list_head *uf)
1318 VMA_ITERATOR(vmi, mm, start);
1320 return do_vmi_munmap(&vmi, mm, start, len, uf, false);
1323 unsigned long mmap_region(struct file *file, unsigned long addr,
1324 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1325 struct list_head *uf)
1327 unsigned long ret;
1328 bool writable_file_mapping = false;
1330 /* Check to see if MDWE is applicable. */
1331 if (map_deny_write_exec(vm_flags, vm_flags))
1332 return -EACCES;
1334 /* Allow architectures to sanity-check the vm_flags. */
1335 if (!arch_validate_flags(vm_flags))
1336 return -EINVAL;
1338 /* Map writable and ensure this isn't a sealed memfd. */
1339 if (file && is_shared_maywrite(vm_flags)) {
1340 int error = mapping_map_writable(file->f_mapping);
1342 if (error)
1343 return error;
1344 writable_file_mapping = true;
1347 ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
1349 /* Clear our write mapping regardless of error. */
1350 if (writable_file_mapping)
1351 mapping_unmap_writable(file->f_mapping);
1353 validate_mm(current->mm);
1354 return ret;
1357 static int __vm_munmap(unsigned long start, size_t len, bool unlock)
1359 int ret;
1360 struct mm_struct *mm = current->mm;
1361 LIST_HEAD(uf);
1362 VMA_ITERATOR(vmi, mm, start);
1364 if (mmap_write_lock_killable(mm))
1365 return -EINTR;
1367 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
1368 if (ret || !unlock)
1369 mmap_write_unlock(mm);
1371 userfaultfd_unmap_complete(mm, &uf);
1372 return ret;
1375 int vm_munmap(unsigned long start, size_t len)
1377 return __vm_munmap(start, len, false);
1379 EXPORT_SYMBOL(vm_munmap);
1381 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1383 addr = untagged_addr(addr);
1384 return __vm_munmap(addr, len, true);
1389 * Emulation of deprecated remap_file_pages() syscall.
1391 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
1392 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
1395 struct mm_struct *mm = current->mm;
1396 struct vm_area_struct *vma;
1397 unsigned long populate = 0;
1398 unsigned long ret = -EINVAL;
1399 struct file *file;
1400 vm_flags_t vm_flags;
1402 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
1403 current->comm, current->pid);
1405 if (prot)
1406 return ret;
1407 start = start & PAGE_MASK;
1408 size = size & PAGE_MASK;
1410 if (start + size <= start)
1411 return ret;
1413 /* Does pgoff wrap? */
1414 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
1415 return ret;
1417 if (mmap_read_lock_killable(mm))
1418 return -EINTR;
1421 * Look up VMA under read lock first so we can perform the security
1422 * without holding locks (which can be problematic). We reacquire a
1423 * write lock later and check nothing changed underneath us.
1425 vma = vma_lookup(mm, start);
1427 if (!vma || !(vma->vm_flags & VM_SHARED)) {
1428 mmap_read_unlock(mm);
1429 return -EINVAL;
1432 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
1433 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
1434 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
1436 flags &= MAP_NONBLOCK;
1437 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
1438 if (vma->vm_flags & VM_LOCKED)
1439 flags |= MAP_LOCKED;
1441 /* Save vm_flags used to calculate prot and flags, and recheck later. */
1442 vm_flags = vma->vm_flags;
1443 file = get_file(vma->vm_file);
1445 mmap_read_unlock(mm);
1447 /* Call outside mmap_lock to be consistent with other callers. */
1448 ret = security_mmap_file(file, prot, flags);
1449 if (ret) {
1450 fput(file);
1451 return ret;
1454 ret = -EINVAL;
1456 /* OK security check passed, take write lock + let it rip. */
1457 if (mmap_write_lock_killable(mm)) {
1458 fput(file);
1459 return -EINTR;
1462 vma = vma_lookup(mm, start);
1464 if (!vma)
1465 goto out;
1467 /* Make sure things didn't change under us. */
1468 if (vma->vm_flags != vm_flags)
1469 goto out;
1470 if (vma->vm_file != file)
1471 goto out;
1473 if (start + size > vma->vm_end) {
1474 VMA_ITERATOR(vmi, mm, vma->vm_end);
1475 struct vm_area_struct *next, *prev = vma;
1477 for_each_vma_range(vmi, next, start + size) {
1478 /* hole between vmas ? */
1479 if (next->vm_start != prev->vm_end)
1480 goto out;
1482 if (next->vm_file != vma->vm_file)
1483 goto out;
1485 if (next->vm_flags != vma->vm_flags)
1486 goto out;
1488 if (start + size <= next->vm_end)
1489 break;
1491 prev = next;
1494 if (!next)
1495 goto out;
1498 ret = do_mmap(vma->vm_file, start, size,
1499 prot, flags, 0, pgoff, &populate, NULL);
1500 out:
1501 mmap_write_unlock(mm);
1502 fput(file);
1503 if (populate)
1504 mm_populate(ret, populate);
1505 if (!IS_ERR_VALUE(ret))
1506 ret = 0;
1507 return ret;
1511 * do_brk_flags() - Increase the brk vma if the flags match.
1512 * @vmi: The vma iterator
1513 * @addr: The start address
1514 * @len: The length of the increase
1515 * @vma: The vma,
1516 * @flags: The VMA Flags
1518 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
1519 * do not match then create a new anonymous VMA. Eventually we may be able to
1520 * do some brk-specific accounting here.
1522 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
1523 unsigned long addr, unsigned long len, unsigned long flags)
1525 struct mm_struct *mm = current->mm;
1528 * Check against address space limits by the changed size
1529 * Note: This happens *after* clearing old mappings in some code paths.
1531 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1532 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
1533 return -ENOMEM;
1535 if (mm->map_count > sysctl_max_map_count)
1536 return -ENOMEM;
1538 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
1539 return -ENOMEM;
1542 * Expand the existing vma if possible; Note that singular lists do not
1543 * occur after forking, so the expand will only happen on new VMAs.
1545 if (vma && vma->vm_end == addr) {
1546 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
1548 vmg.prev = vma;
1549 /* vmi is positioned at prev, which this mode expects. */
1550 vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
1552 if (vma_merge_new_range(&vmg))
1553 goto out;
1554 else if (vmg_nomem(&vmg))
1555 goto unacct_fail;
1558 if (vma)
1559 vma_iter_next_range(vmi);
1560 /* create a vma struct for an anonymous mapping */
1561 vma = vm_area_alloc(mm);
1562 if (!vma)
1563 goto unacct_fail;
1565 vma_set_anonymous(vma);
1566 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
1567 vm_flags_init(vma, flags);
1568 vma->vm_page_prot = vm_get_page_prot(flags);
1569 vma_start_write(vma);
1570 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
1571 goto mas_store_fail;
1573 mm->map_count++;
1574 validate_mm(mm);
1575 ksm_add_vma(vma);
1576 out:
1577 perf_event_mmap(vma);
1578 mm->total_vm += len >> PAGE_SHIFT;
1579 mm->data_vm += len >> PAGE_SHIFT;
1580 if (flags & VM_LOCKED)
1581 mm->locked_vm += (len >> PAGE_SHIFT);
1582 vm_flags_set(vma, VM_SOFTDIRTY);
1583 return 0;
1585 mas_store_fail:
1586 vm_area_free(vma);
1587 unacct_fail:
1588 vm_unacct_memory(len >> PAGE_SHIFT);
1589 return -ENOMEM;
1592 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
1594 struct mm_struct *mm = current->mm;
1595 struct vm_area_struct *vma = NULL;
1596 unsigned long len;
1597 int ret;
1598 bool populate;
1599 LIST_HEAD(uf);
1600 VMA_ITERATOR(vmi, mm, addr);
1602 len = PAGE_ALIGN(request);
1603 if (len < request)
1604 return -ENOMEM;
1605 if (!len)
1606 return 0;
1608 /* Until we need other flags, refuse anything except VM_EXEC. */
1609 if ((flags & (~VM_EXEC)) != 0)
1610 return -EINVAL;
1612 if (mmap_write_lock_killable(mm))
1613 return -EINTR;
1615 ret = check_brk_limits(addr, len);
1616 if (ret)
1617 goto limits_failed;
1619 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
1620 if (ret)
1621 goto munmap_failed;
1623 vma = vma_prev(&vmi);
1624 ret = do_brk_flags(&vmi, vma, addr, len, flags);
1625 populate = ((mm->def_flags & VM_LOCKED) != 0);
1626 mmap_write_unlock(mm);
1627 userfaultfd_unmap_complete(mm, &uf);
1628 if (populate && !ret)
1629 mm_populate(addr, len);
1630 return ret;
1632 munmap_failed:
1633 limits_failed:
1634 mmap_write_unlock(mm);
1635 return ret;
1637 EXPORT_SYMBOL(vm_brk_flags);
1639 /* Release all mmaps. */
1640 void exit_mmap(struct mm_struct *mm)
1642 struct mmu_gather tlb;
1643 struct vm_area_struct *vma;
1644 unsigned long nr_accounted = 0;
1645 VMA_ITERATOR(vmi, mm, 0);
1646 int count = 0;
1648 /* mm's last user has gone, and its about to be pulled down */
1649 mmu_notifier_release(mm);
1651 mmap_read_lock(mm);
1652 arch_exit_mmap(mm);
1654 vma = vma_next(&vmi);
1655 if (!vma || unlikely(xa_is_zero(vma))) {
1656 /* Can happen if dup_mmap() received an OOM */
1657 mmap_read_unlock(mm);
1658 mmap_write_lock(mm);
1659 goto destroy;
1662 lru_add_drain();
1663 flush_cache_mm(mm);
1664 tlb_gather_mmu_fullmm(&tlb, mm);
1665 /* update_hiwater_rss(mm) here? but nobody should be looking */
1666 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
1667 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
1668 mmap_read_unlock(mm);
1671 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
1672 * because the memory has been already freed.
1674 set_bit(MMF_OOM_SKIP, &mm->flags);
1675 mmap_write_lock(mm);
1676 mt_clear_in_rcu(&mm->mm_mt);
1677 vma_iter_set(&vmi, vma->vm_end);
1678 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
1679 USER_PGTABLES_CEILING, true);
1680 tlb_finish_mmu(&tlb);
1683 * Walk the list again, actually closing and freeing it, with preemption
1684 * enabled, without holding any MM locks besides the unreachable
1685 * mmap_write_lock.
1687 vma_iter_set(&vmi, vma->vm_end);
1688 do {
1689 if (vma->vm_flags & VM_ACCOUNT)
1690 nr_accounted += vma_pages(vma);
1691 remove_vma(vma, /* unreachable = */ true);
1692 count++;
1693 cond_resched();
1694 vma = vma_next(&vmi);
1695 } while (vma && likely(!xa_is_zero(vma)));
1697 BUG_ON(count != mm->map_count);
1699 trace_exit_mmap(mm);
1700 destroy:
1701 __mt_destroy(&mm->mm_mt);
1702 mmap_write_unlock(mm);
1703 vm_unacct_memory(nr_accounted);
1706 /* Insert vm structure into process list sorted by address
1707 * and into the inode's i_mmap tree. If vm_file is non-NULL
1708 * then i_mmap_rwsem is taken here.
1710 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
1712 unsigned long charged = vma_pages(vma);
1715 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
1716 return -ENOMEM;
1718 if ((vma->vm_flags & VM_ACCOUNT) &&
1719 security_vm_enough_memory_mm(mm, charged))
1720 return -ENOMEM;
1723 * The vm_pgoff of a purely anonymous vma should be irrelevant
1724 * until its first write fault, when page's anon_vma and index
1725 * are set. But now set the vm_pgoff it will almost certainly
1726 * end up with (unless mremap moves it elsewhere before that
1727 * first wfault), so /proc/pid/maps tells a consistent story.
1729 * By setting it to reflect the virtual start address of the
1730 * vma, merges and splits can happen in a seamless way, just
1731 * using the existing file pgoff checks and manipulations.
1732 * Similarly in do_mmap and in do_brk_flags.
1734 if (vma_is_anonymous(vma)) {
1735 BUG_ON(vma->anon_vma);
1736 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
1739 if (vma_link(mm, vma)) {
1740 if (vma->vm_flags & VM_ACCOUNT)
1741 vm_unacct_memory(charged);
1742 return -ENOMEM;
1745 return 0;
1749 * Return true if the calling process may expand its vm space by the passed
1750 * number of pages
1752 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
1754 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
1755 return false;
1757 if (is_data_mapping(flags) &&
1758 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
1759 /* Workaround for Valgrind */
1760 if (rlimit(RLIMIT_DATA) == 0 &&
1761 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
1762 return true;
1764 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
1765 current->comm, current->pid,
1766 (mm->data_vm + npages) << PAGE_SHIFT,
1767 rlimit(RLIMIT_DATA),
1768 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
1770 if (!ignore_rlimit_data)
1771 return false;
1774 return true;
1777 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
1779 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
1781 if (is_exec_mapping(flags))
1782 mm->exec_vm += npages;
1783 else if (is_stack_mapping(flags))
1784 mm->stack_vm += npages;
1785 else if (is_data_mapping(flags))
1786 mm->data_vm += npages;
1789 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
1792 * Close hook, called for unmap() and on the old vma for mremap().
1794 * Having a close hook prevents vma merging regardless of flags.
1796 static void special_mapping_close(struct vm_area_struct *vma)
1798 const struct vm_special_mapping *sm = vma->vm_private_data;
1800 if (sm->close)
1801 sm->close(sm, vma);
1804 static const char *special_mapping_name(struct vm_area_struct *vma)
1806 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
1809 static int special_mapping_mremap(struct vm_area_struct *new_vma)
1811 struct vm_special_mapping *sm = new_vma->vm_private_data;
1813 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
1814 return -EFAULT;
1816 if (sm->mremap)
1817 return sm->mremap(sm, new_vma);
1819 return 0;
1822 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
1825 * Forbid splitting special mappings - kernel has expectations over
1826 * the number of pages in mapping. Together with VM_DONTEXPAND
1827 * the size of vma should stay the same over the special mapping's
1828 * lifetime.
1830 return -EINVAL;
1833 static const struct vm_operations_struct special_mapping_vmops = {
1834 .close = special_mapping_close,
1835 .fault = special_mapping_fault,
1836 .mremap = special_mapping_mremap,
1837 .name = special_mapping_name,
1838 /* vDSO code relies that VVAR can't be accessed remotely */
1839 .access = NULL,
1840 .may_split = special_mapping_split,
1843 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
1845 struct vm_area_struct *vma = vmf->vma;
1846 pgoff_t pgoff;
1847 struct page **pages;
1848 struct vm_special_mapping *sm = vma->vm_private_data;
1850 if (sm->fault)
1851 return sm->fault(sm, vmf->vma, vmf);
1853 pages = sm->pages;
1855 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
1856 pgoff--;
1858 if (*pages) {
1859 struct page *page = *pages;
1860 get_page(page);
1861 vmf->page = page;
1862 return 0;
1865 return VM_FAULT_SIGBUS;
1868 static struct vm_area_struct *__install_special_mapping(
1869 struct mm_struct *mm,
1870 unsigned long addr, unsigned long len,
1871 unsigned long vm_flags, void *priv,
1872 const struct vm_operations_struct *ops)
1874 int ret;
1875 struct vm_area_struct *vma;
1877 vma = vm_area_alloc(mm);
1878 if (unlikely(vma == NULL))
1879 return ERR_PTR(-ENOMEM);
1881 vma_set_range(vma, addr, addr + len, 0);
1882 vm_flags_init(vma, (vm_flags | mm->def_flags |
1883 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
1884 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1886 vma->vm_ops = ops;
1887 vma->vm_private_data = priv;
1889 ret = insert_vm_struct(mm, vma);
1890 if (ret)
1891 goto out;
1893 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
1895 perf_event_mmap(vma);
1897 return vma;
1899 out:
1900 vm_area_free(vma);
1901 return ERR_PTR(ret);
1904 bool vma_is_special_mapping(const struct vm_area_struct *vma,
1905 const struct vm_special_mapping *sm)
1907 return vma->vm_private_data == sm &&
1908 vma->vm_ops == &special_mapping_vmops;
1912 * Called with mm->mmap_lock held for writing.
1913 * Insert a new vma covering the given region, with the given flags.
1914 * Its pages are supplied by the given array of struct page *.
1915 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
1916 * The region past the last page supplied will always produce SIGBUS.
1917 * The array pointer and the pages it points to are assumed to stay alive
1918 * for as long as this mapping might exist.
1920 struct vm_area_struct *_install_special_mapping(
1921 struct mm_struct *mm,
1922 unsigned long addr, unsigned long len,
1923 unsigned long vm_flags, const struct vm_special_mapping *spec)
1925 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
1926 &special_mapping_vmops);
1930 * initialise the percpu counter for VM
1932 void __init mmap_init(void)
1934 int ret;
1936 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
1937 VM_BUG_ON(ret);
1941 * Initialise sysctl_user_reserve_kbytes.
1943 * This is intended to prevent a user from starting a single memory hogging
1944 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1945 * mode.
1947 * The default value is min(3% of free memory, 128MB)
1948 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1950 static int init_user_reserve(void)
1952 unsigned long free_kbytes;
1954 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1956 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K);
1957 return 0;
1959 subsys_initcall(init_user_reserve);
1962 * Initialise sysctl_admin_reserve_kbytes.
1964 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1965 * to log in and kill a memory hogging process.
1967 * Systems with more than 256MB will reserve 8MB, enough to recover
1968 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1969 * only reserve 3% of free pages by default.
1971 static int init_admin_reserve(void)
1973 unsigned long free_kbytes;
1975 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1977 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K);
1978 return 0;
1980 subsys_initcall(init_admin_reserve);
1983 * Reinititalise user and admin reserves if memory is added or removed.
1985 * The default user reserve max is 128MB, and the default max for the
1986 * admin reserve is 8MB. These are usually, but not always, enough to
1987 * enable recovery from a memory hogging process using login/sshd, a shell,
1988 * and tools like top. It may make sense to increase or even disable the
1989 * reserve depending on the existence of swap or variations in the recovery
1990 * tools. So, the admin may have changed them.
1992 * If memory is added and the reserves have been eliminated or increased above
1993 * the default max, then we'll trust the admin.
1995 * If memory is removed and there isn't enough free memory, then we
1996 * need to reset the reserves.
1998 * Otherwise keep the reserve set by the admin.
2000 static int reserve_mem_notifier(struct notifier_block *nb,
2001 unsigned long action, void *data)
2003 unsigned long tmp, free_kbytes;
2005 switch (action) {
2006 case MEM_ONLINE:
2007 /* Default max is 128MB. Leave alone if modified by operator. */
2008 tmp = sysctl_user_reserve_kbytes;
2009 if (tmp > 0 && tmp < SZ_128K)
2010 init_user_reserve();
2012 /* Default max is 8MB. Leave alone if modified by operator. */
2013 tmp = sysctl_admin_reserve_kbytes;
2014 if (tmp > 0 && tmp < SZ_8K)
2015 init_admin_reserve();
2017 break;
2018 case MEM_OFFLINE:
2019 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
2021 if (sysctl_user_reserve_kbytes > free_kbytes) {
2022 init_user_reserve();
2023 pr_info("vm.user_reserve_kbytes reset to %lu\n",
2024 sysctl_user_reserve_kbytes);
2027 if (sysctl_admin_reserve_kbytes > free_kbytes) {
2028 init_admin_reserve();
2029 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
2030 sysctl_admin_reserve_kbytes);
2032 break;
2033 default:
2034 break;
2036 return NOTIFY_OK;
2039 static int __meminit init_reserve_notifier(void)
2041 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
2042 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
2044 return 0;
2046 subsys_initcall(init_reserve_notifier);
2049 * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between
2050 * this VMA and its relocated range, which will now reside at [vma->vm_start -
2051 * shift, vma->vm_end - shift).
2053 * This function is almost certainly NOT what you want for anything other than
2054 * early executable temporary stack relocation.
2056 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
2059 * The process proceeds as follows:
2061 * 1) Use shift to calculate the new vma endpoints.
2062 * 2) Extend vma to cover both the old and new ranges. This ensures the
2063 * arguments passed to subsequent functions are consistent.
2064 * 3) Move vma's page tables to the new range.
2065 * 4) Free up any cleared pgd range.
2066 * 5) Shrink the vma to cover only the new range.
2069 struct mm_struct *mm = vma->vm_mm;
2070 unsigned long old_start = vma->vm_start;
2071 unsigned long old_end = vma->vm_end;
2072 unsigned long length = old_end - old_start;
2073 unsigned long new_start = old_start - shift;
2074 unsigned long new_end = old_end - shift;
2075 VMA_ITERATOR(vmi, mm, new_start);
2076 VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
2077 struct vm_area_struct *next;
2078 struct mmu_gather tlb;
2080 BUG_ON(new_start > new_end);
2083 * ensure there are no vmas between where we want to go
2084 * and where we are
2086 if (vma != vma_next(&vmi))
2087 return -EFAULT;
2089 vma_iter_prev_range(&vmi);
2091 * cover the whole range: [new_start, old_end)
2093 vmg.vma = vma;
2094 if (vma_expand(&vmg))
2095 return -ENOMEM;
2098 * move the page tables downwards, on failure we rely on
2099 * process cleanup to remove whatever mess we made.
2101 if (length != move_page_tables(vma, old_start,
2102 vma, new_start, length, false, true))
2103 return -ENOMEM;
2105 lru_add_drain();
2106 tlb_gather_mmu(&tlb, mm);
2107 next = vma_next(&vmi);
2108 if (new_end > old_start) {
2110 * when the old and new regions overlap clear from new_end.
2112 free_pgd_range(&tlb, new_end, old_end, new_end,
2113 next ? next->vm_start : USER_PGTABLES_CEILING);
2114 } else {
2116 * otherwise, clean from old_start; this is done to not touch
2117 * the address space in [new_end, old_start) some architectures
2118 * have constraints on va-space that make this illegal (IA64) -
2119 * for the others its just a little faster.
2121 free_pgd_range(&tlb, old_start, old_end, new_end,
2122 next ? next->vm_start : USER_PGTABLES_CEILING);
2124 tlb_finish_mmu(&tlb);
2126 vma_prev(&vmi);
2127 /* Shrink the vma to just the new range */
2128 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);