1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
20 #include <linux/uaccess.h>
25 * kfree_const - conditionally free memory
26 * @x: pointer to the memory
28 * Function calls kfree only if @x is not in .rodata section.
30 void kfree_const(const void *x
)
32 if (!is_kernel_rodata((unsigned long)x
))
35 EXPORT_SYMBOL(kfree_const
);
38 * kstrdup - allocate space for and copy an existing string
39 * @s: the string to duplicate
40 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
42 * Return: newly allocated copy of @s or %NULL in case of error
44 char *kstrdup(const char *s
, gfp_t gfp
)
53 buf
= kmalloc_track_caller(len
, gfp
);
58 EXPORT_SYMBOL(kstrdup
);
61 * kstrdup_const - conditionally duplicate an existing const string
62 * @s: the string to duplicate
63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
65 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
67 * Return: source string if it is in .rodata section otherwise
68 * fallback to kstrdup.
70 const char *kstrdup_const(const char *s
, gfp_t gfp
)
72 if (is_kernel_rodata((unsigned long)s
))
75 return kstrdup(s
, gfp
);
77 EXPORT_SYMBOL(kstrdup_const
);
80 * kstrndup - allocate space for and copy an existing string
81 * @s: the string to duplicate
82 * @max: read at most @max chars from @s
83 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
85 * Note: Use kmemdup_nul() instead if the size is known exactly.
87 * Return: newly allocated copy of @s or %NULL in case of error
89 char *kstrndup(const char *s
, size_t max
, gfp_t gfp
)
97 len
= strnlen(s
, max
);
98 buf
= kmalloc_track_caller(len
+1, gfp
);
105 EXPORT_SYMBOL(kstrndup
);
108 * kmemdup - duplicate region of memory
110 * @src: memory region to duplicate
111 * @len: memory region length
112 * @gfp: GFP mask to use
114 * Return: newly allocated copy of @src or %NULL in case of error
116 void *kmemdup(const void *src
, size_t len
, gfp_t gfp
)
120 p
= kmalloc_track_caller(len
, gfp
);
125 EXPORT_SYMBOL(kmemdup
);
128 * kmemdup_nul - Create a NUL-terminated string from unterminated data
129 * @s: The data to stringify
130 * @len: The size of the data
131 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
133 * Return: newly allocated copy of @s with NUL-termination or %NULL in
136 char *kmemdup_nul(const char *s
, size_t len
, gfp_t gfp
)
143 buf
= kmalloc_track_caller(len
+ 1, gfp
);
150 EXPORT_SYMBOL(kmemdup_nul
);
153 * memdup_user - duplicate memory region from user space
155 * @src: source address in user space
156 * @len: number of bytes to copy
158 * Return: an ERR_PTR() on failure. Result is physically
159 * contiguous, to be freed by kfree().
161 void *memdup_user(const void __user
*src
, size_t len
)
165 p
= kmalloc_track_caller(len
, GFP_USER
| __GFP_NOWARN
);
167 return ERR_PTR(-ENOMEM
);
169 if (copy_from_user(p
, src
, len
)) {
171 return ERR_PTR(-EFAULT
);
176 EXPORT_SYMBOL(memdup_user
);
179 * vmemdup_user - duplicate memory region from user space
181 * @src: source address in user space
182 * @len: number of bytes to copy
184 * Return: an ERR_PTR() on failure. Result may be not
185 * physically contiguous. Use kvfree() to free.
187 void *vmemdup_user(const void __user
*src
, size_t len
)
191 p
= kvmalloc(len
, GFP_USER
);
193 return ERR_PTR(-ENOMEM
);
195 if (copy_from_user(p
, src
, len
)) {
197 return ERR_PTR(-EFAULT
);
202 EXPORT_SYMBOL(vmemdup_user
);
205 * strndup_user - duplicate an existing string from user space
206 * @s: The string to duplicate
207 * @n: Maximum number of bytes to copy, including the trailing NUL.
209 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
211 char *strndup_user(const char __user
*s
, long n
)
216 length
= strnlen_user(s
, n
);
219 return ERR_PTR(-EFAULT
);
222 return ERR_PTR(-EINVAL
);
224 p
= memdup_user(s
, length
);
229 p
[length
- 1] = '\0';
233 EXPORT_SYMBOL(strndup_user
);
236 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
238 * @src: source address in user space
239 * @len: number of bytes to copy
241 * Return: an ERR_PTR() on failure.
243 void *memdup_user_nul(const void __user
*src
, size_t len
)
248 * Always use GFP_KERNEL, since copy_from_user() can sleep and
249 * cause pagefault, which makes it pointless to use GFP_NOFS
252 p
= kmalloc_track_caller(len
+ 1, GFP_KERNEL
);
254 return ERR_PTR(-ENOMEM
);
256 if (copy_from_user(p
, src
, len
)) {
258 return ERR_PTR(-EFAULT
);
264 EXPORT_SYMBOL(memdup_user_nul
);
266 void __vma_link_list(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
267 struct vm_area_struct
*prev
, struct rb_node
*rb_parent
)
269 struct vm_area_struct
*next
;
273 next
= prev
->vm_next
;
278 next
= rb_entry(rb_parent
,
279 struct vm_area_struct
, vm_rb
);
288 /* Check if the vma is being used as a stack by this task */
289 int vma_is_stack_for_current(struct vm_area_struct
*vma
)
291 struct task_struct
* __maybe_unused t
= current
;
293 return (vma
->vm_start
<= KSTK_ESP(t
) && vma
->vm_end
>= KSTK_ESP(t
));
296 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
297 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
299 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
300 mm
->get_unmapped_area
= arch_get_unmapped_area
;
305 * __account_locked_vm - account locked pages to an mm's locked_vm
306 * @mm: mm to account against
307 * @pages: number of pages to account
308 * @inc: %true if @pages should be considered positive, %false if not
309 * @task: task used to check RLIMIT_MEMLOCK
310 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
312 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
313 * that mmap_sem is held as writer.
317 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
319 int __account_locked_vm(struct mm_struct
*mm
, unsigned long pages
, bool inc
,
320 struct task_struct
*task
, bool bypass_rlim
)
322 unsigned long locked_vm
, limit
;
325 lockdep_assert_held_write(&mm
->mmap_sem
);
327 locked_vm
= mm
->locked_vm
;
330 limit
= task_rlimit(task
, RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
331 if (locked_vm
+ pages
> limit
)
335 mm
->locked_vm
= locked_vm
+ pages
;
337 WARN_ON_ONCE(pages
> locked_vm
);
338 mm
->locked_vm
= locked_vm
- pages
;
341 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__
, task
->pid
,
342 (void *)_RET_IP_
, (inc
) ? '+' : '-', pages
<< PAGE_SHIFT
,
343 locked_vm
<< PAGE_SHIFT
, task_rlimit(task
, RLIMIT_MEMLOCK
),
344 ret
? " - exceeded" : "");
348 EXPORT_SYMBOL_GPL(__account_locked_vm
);
351 * account_locked_vm - account locked pages to an mm's locked_vm
352 * @mm: mm to account against, may be NULL
353 * @pages: number of pages to account
354 * @inc: %true if @pages should be considered positive, %false if not
356 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
359 * * 0 on success, or if mm is NULL
360 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
362 int account_locked_vm(struct mm_struct
*mm
, unsigned long pages
, bool inc
)
366 if (pages
== 0 || !mm
)
369 down_write(&mm
->mmap_sem
);
370 ret
= __account_locked_vm(mm
, pages
, inc
, current
,
371 capable(CAP_IPC_LOCK
));
372 up_write(&mm
->mmap_sem
);
376 EXPORT_SYMBOL_GPL(account_locked_vm
);
378 unsigned long vm_mmap_pgoff(struct file
*file
, unsigned long addr
,
379 unsigned long len
, unsigned long prot
,
380 unsigned long flag
, unsigned long pgoff
)
383 struct mm_struct
*mm
= current
->mm
;
384 unsigned long populate
;
387 ret
= security_mmap_file(file
, prot
, flag
);
389 if (down_write_killable(&mm
->mmap_sem
))
391 ret
= do_mmap_pgoff(file
, addr
, len
, prot
, flag
, pgoff
,
393 up_write(&mm
->mmap_sem
);
394 userfaultfd_unmap_complete(mm
, &uf
);
396 mm_populate(ret
, populate
);
401 unsigned long vm_mmap(struct file
*file
, unsigned long addr
,
402 unsigned long len
, unsigned long prot
,
403 unsigned long flag
, unsigned long offset
)
405 if (unlikely(offset
+ PAGE_ALIGN(len
) < offset
))
407 if (unlikely(offset_in_page(offset
)))
410 return vm_mmap_pgoff(file
, addr
, len
, prot
, flag
, offset
>> PAGE_SHIFT
);
412 EXPORT_SYMBOL(vm_mmap
);
415 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
416 * failure, fall back to non-contiguous (vmalloc) allocation.
417 * @size: size of the request.
418 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
419 * @node: numa node to allocate from
421 * Uses kmalloc to get the memory but if the allocation fails then falls back
422 * to the vmalloc allocator. Use kvfree for freeing the memory.
424 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
425 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
426 * preferable to the vmalloc fallback, due to visible performance drawbacks.
428 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
429 * fall back to vmalloc.
431 * Return: pointer to the allocated memory of %NULL in case of failure
433 void *kvmalloc_node(size_t size
, gfp_t flags
, int node
)
435 gfp_t kmalloc_flags
= flags
;
439 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
440 * so the given set of flags has to be compatible.
442 if ((flags
& GFP_KERNEL
) != GFP_KERNEL
)
443 return kmalloc_node(size
, flags
, node
);
446 * We want to attempt a large physically contiguous block first because
447 * it is less likely to fragment multiple larger blocks and therefore
448 * contribute to a long term fragmentation less than vmalloc fallback.
449 * However make sure that larger requests are not too disruptive - no
450 * OOM killer and no allocation failure warnings as we have a fallback.
452 if (size
> PAGE_SIZE
) {
453 kmalloc_flags
|= __GFP_NOWARN
;
455 if (!(kmalloc_flags
& __GFP_RETRY_MAYFAIL
))
456 kmalloc_flags
|= __GFP_NORETRY
;
459 ret
= kmalloc_node(size
, kmalloc_flags
, node
);
462 * It doesn't really make sense to fallback to vmalloc for sub page
465 if (ret
|| size
<= PAGE_SIZE
)
468 return __vmalloc_node_flags_caller(size
, node
, flags
,
469 __builtin_return_address(0));
471 EXPORT_SYMBOL(kvmalloc_node
);
474 * kvfree() - Free memory.
475 * @addr: Pointer to allocated memory.
477 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
478 * It is slightly more efficient to use kfree() or vfree() if you are certain
479 * that you know which one to use.
481 * Context: Either preemptible task context or not-NMI interrupt.
483 void kvfree(const void *addr
)
485 if (is_vmalloc_addr(addr
))
490 EXPORT_SYMBOL(kvfree
);
492 static inline void *__page_rmapping(struct page
*page
)
494 unsigned long mapping
;
496 mapping
= (unsigned long)page
->mapping
;
497 mapping
&= ~PAGE_MAPPING_FLAGS
;
499 return (void *)mapping
;
502 /* Neutral page->mapping pointer to address_space or anon_vma or other */
503 void *page_rmapping(struct page
*page
)
505 page
= compound_head(page
);
506 return __page_rmapping(page
);
510 * Return true if this page is mapped into pagetables.
511 * For compound page it returns true if any subpage of compound page is mapped.
513 bool page_mapped(struct page
*page
)
517 if (likely(!PageCompound(page
)))
518 return atomic_read(&page
->_mapcount
) >= 0;
519 page
= compound_head(page
);
520 if (atomic_read(compound_mapcount_ptr(page
)) >= 0)
524 for (i
= 0; i
< (1 << compound_order(page
)); i
++) {
525 if (atomic_read(&page
[i
]._mapcount
) >= 0)
530 EXPORT_SYMBOL(page_mapped
);
532 struct anon_vma
*page_anon_vma(struct page
*page
)
534 unsigned long mapping
;
536 page
= compound_head(page
);
537 mapping
= (unsigned long)page
->mapping
;
538 if ((mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
540 return __page_rmapping(page
);
543 struct address_space
*page_mapping(struct page
*page
)
545 struct address_space
*mapping
;
547 page
= compound_head(page
);
549 /* This happens if someone calls flush_dcache_page on slab page */
550 if (unlikely(PageSlab(page
)))
553 if (unlikely(PageSwapCache(page
))) {
556 entry
.val
= page_private(page
);
557 return swap_address_space(entry
);
560 mapping
= page
->mapping
;
561 if ((unsigned long)mapping
& PAGE_MAPPING_ANON
)
564 return (void *)((unsigned long)mapping
& ~PAGE_MAPPING_FLAGS
);
566 EXPORT_SYMBOL(page_mapping
);
569 * For file cache pages, return the address_space, otherwise return NULL
571 struct address_space
*page_mapping_file(struct page
*page
)
573 if (unlikely(PageSwapCache(page
)))
575 return page_mapping(page
);
578 /* Slow path of page_mapcount() for compound pages */
579 int __page_mapcount(struct page
*page
)
583 ret
= atomic_read(&page
->_mapcount
) + 1;
585 * For file THP page->_mapcount contains total number of mapping
586 * of the page: no need to look into compound_mapcount.
588 if (!PageAnon(page
) && !PageHuge(page
))
590 page
= compound_head(page
);
591 ret
+= atomic_read(compound_mapcount_ptr(page
)) + 1;
592 if (PageDoubleMap(page
))
596 EXPORT_SYMBOL_GPL(__page_mapcount
);
598 int sysctl_overcommit_memory __read_mostly
= OVERCOMMIT_GUESS
;
599 int sysctl_overcommit_ratio __read_mostly
= 50;
600 unsigned long sysctl_overcommit_kbytes __read_mostly
;
601 int sysctl_max_map_count __read_mostly
= DEFAULT_MAX_MAP_COUNT
;
602 unsigned long sysctl_user_reserve_kbytes __read_mostly
= 1UL << 17; /* 128MB */
603 unsigned long sysctl_admin_reserve_kbytes __read_mostly
= 1UL << 13; /* 8MB */
605 int overcommit_ratio_handler(struct ctl_table
*table
, int write
,
606 void __user
*buffer
, size_t *lenp
,
611 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
612 if (ret
== 0 && write
)
613 sysctl_overcommit_kbytes
= 0;
617 int overcommit_kbytes_handler(struct ctl_table
*table
, int write
,
618 void __user
*buffer
, size_t *lenp
,
623 ret
= proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
624 if (ret
== 0 && write
)
625 sysctl_overcommit_ratio
= 0;
630 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
632 unsigned long vm_commit_limit(void)
634 unsigned long allowed
;
636 if (sysctl_overcommit_kbytes
)
637 allowed
= sysctl_overcommit_kbytes
>> (PAGE_SHIFT
- 10);
639 allowed
= ((totalram_pages() - hugetlb_total_pages())
640 * sysctl_overcommit_ratio
/ 100);
641 allowed
+= total_swap_pages
;
647 * Make sure vm_committed_as in one cacheline and not cacheline shared with
648 * other variables. It can be updated by several CPUs frequently.
650 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp
;
653 * The global memory commitment made in the system can be a metric
654 * that can be used to drive ballooning decisions when Linux is hosted
655 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
656 * balancing memory across competing virtual machines that are hosted.
657 * Several metrics drive this policy engine including the guest reported
660 unsigned long vm_memory_committed(void)
662 return percpu_counter_read_positive(&vm_committed_as
);
664 EXPORT_SYMBOL_GPL(vm_memory_committed
);
667 * Check that a process has enough memory to allocate a new virtual
668 * mapping. 0 means there is enough memory for the allocation to
669 * succeed and -ENOMEM implies there is not.
671 * We currently support three overcommit policies, which are set via the
672 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
674 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
675 * Additional code 2002 Jul 20 by Robert Love.
677 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
679 * Note this is a helper function intended to be used by LSMs which
680 * wish to use this logic.
682 int __vm_enough_memory(struct mm_struct
*mm
, long pages
, int cap_sys_admin
)
686 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as
) <
687 -(s64
)vm_committed_as_batch
* num_online_cpus(),
688 "memory commitment underflow");
690 vm_acct_memory(pages
);
693 * Sometimes we want to use more memory than we have
695 if (sysctl_overcommit_memory
== OVERCOMMIT_ALWAYS
)
698 if (sysctl_overcommit_memory
== OVERCOMMIT_GUESS
) {
699 if (pages
> totalram_pages() + total_swap_pages
)
704 allowed
= vm_commit_limit();
706 * Reserve some for root
709 allowed
-= sysctl_admin_reserve_kbytes
>> (PAGE_SHIFT
- 10);
712 * Don't let a single process grow so big a user can't recover
715 long reserve
= sysctl_user_reserve_kbytes
>> (PAGE_SHIFT
- 10);
717 allowed
-= min_t(long, mm
->total_vm
/ 32, reserve
);
720 if (percpu_counter_read_positive(&vm_committed_as
) < allowed
)
723 vm_unacct_memory(pages
);
729 * get_cmdline() - copy the cmdline value to a buffer.
730 * @task: the task whose cmdline value to copy.
731 * @buffer: the buffer to copy to.
732 * @buflen: the length of the buffer. Larger cmdline values are truncated
735 * Return: the size of the cmdline field copied. Note that the copy does
736 * not guarantee an ending NULL byte.
738 int get_cmdline(struct task_struct
*task
, char *buffer
, int buflen
)
742 struct mm_struct
*mm
= get_task_mm(task
);
743 unsigned long arg_start
, arg_end
, env_start
, env_end
;
747 goto out_mm
; /* Shh! No looking before we're done */
749 spin_lock(&mm
->arg_lock
);
750 arg_start
= mm
->arg_start
;
751 arg_end
= mm
->arg_end
;
752 env_start
= mm
->env_start
;
753 env_end
= mm
->env_end
;
754 spin_unlock(&mm
->arg_lock
);
756 len
= arg_end
- arg_start
;
761 res
= access_process_vm(task
, arg_start
, buffer
, len
, FOLL_FORCE
);
764 * If the nul at the end of args has been overwritten, then
765 * assume application is using setproctitle(3).
767 if (res
> 0 && buffer
[res
-1] != '\0' && len
< buflen
) {
768 len
= strnlen(buffer
, res
);
772 len
= env_end
- env_start
;
773 if (len
> buflen
- res
)
775 res
+= access_process_vm(task
, env_start
,
778 res
= strnlen(buffer
, res
);