2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
7 #include <linux/sched.h>
8 #include <linux/sched/mm.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/security.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/vmalloc.h>
16 #include <linux/userfaultfd_k.h>
18 #include <linux/uaccess.h>
23 * kfree_const - conditionally free memory
24 * @x: pointer to the memory
26 * Function calls kfree only if @x is not in .rodata section.
28 void kfree_const(const void *x
)
30 if (!is_kernel_rodata((unsigned long)x
))
33 EXPORT_SYMBOL(kfree_const
);
36 * kstrdup - allocate space for and copy an existing string
37 * @s: the string to duplicate
38 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
40 * Return: newly allocated copy of @s or %NULL in case of error
42 char *kstrdup(const char *s
, gfp_t gfp
)
51 buf
= kmalloc_track_caller(len
, gfp
);
56 EXPORT_SYMBOL(kstrdup
);
59 * kstrdup_const - conditionally duplicate an existing const string
60 * @s: the string to duplicate
61 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
63 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
65 * Return: source string if it is in .rodata section otherwise
66 * fallback to kstrdup.
68 const char *kstrdup_const(const char *s
, gfp_t gfp
)
70 if (is_kernel_rodata((unsigned long)s
))
73 return kstrdup(s
, gfp
);
75 EXPORT_SYMBOL(kstrdup_const
);
78 * kstrndup - allocate space for and copy an existing string
79 * @s: the string to duplicate
80 * @max: read at most @max chars from @s
81 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
83 * Note: Use kmemdup_nul() instead if the size is known exactly.
85 * Return: newly allocated copy of @s or %NULL in case of error
87 char *kstrndup(const char *s
, size_t max
, gfp_t gfp
)
95 len
= strnlen(s
, max
);
96 buf
= kmalloc_track_caller(len
+1, gfp
);
103 EXPORT_SYMBOL(kstrndup
);
106 * kmemdup - duplicate region of memory
108 * @src: memory region to duplicate
109 * @len: memory region length
110 * @gfp: GFP mask to use
112 * Return: newly allocated copy of @src or %NULL in case of error
114 void *kmemdup(const void *src
, size_t len
, gfp_t gfp
)
118 p
= kmalloc_track_caller(len
, gfp
);
123 EXPORT_SYMBOL(kmemdup
);
126 * kmemdup_nul - Create a NUL-terminated string from unterminated data
127 * @s: The data to stringify
128 * @len: The size of the data
129 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
131 * Return: newly allocated copy of @s with NUL-termination or %NULL in
134 char *kmemdup_nul(const char *s
, size_t len
, gfp_t gfp
)
141 buf
= kmalloc_track_caller(len
+ 1, gfp
);
148 EXPORT_SYMBOL(kmemdup_nul
);
151 * memdup_user - duplicate memory region from user space
153 * @src: source address in user space
154 * @len: number of bytes to copy
156 * Return: an ERR_PTR() on failure. Result is physically
157 * contiguous, to be freed by kfree().
159 void *memdup_user(const void __user
*src
, size_t len
)
163 p
= kmalloc_track_caller(len
, GFP_USER
| __GFP_NOWARN
);
165 return ERR_PTR(-ENOMEM
);
167 if (copy_from_user(p
, src
, len
)) {
169 return ERR_PTR(-EFAULT
);
174 EXPORT_SYMBOL(memdup_user
);
177 * vmemdup_user - duplicate memory region from user space
179 * @src: source address in user space
180 * @len: number of bytes to copy
182 * Return: an ERR_PTR() on failure. Result may be not
183 * physically contiguous. Use kvfree() to free.
185 void *vmemdup_user(const void __user
*src
, size_t len
)
189 p
= kvmalloc(len
, GFP_USER
);
191 return ERR_PTR(-ENOMEM
);
193 if (copy_from_user(p
, src
, len
)) {
195 return ERR_PTR(-EFAULT
);
200 EXPORT_SYMBOL(vmemdup_user
);
203 * strndup_user - duplicate an existing string from user space
204 * @s: The string to duplicate
205 * @n: Maximum number of bytes to copy, including the trailing NUL.
207 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
209 char *strndup_user(const char __user
*s
, long n
)
214 length
= strnlen_user(s
, n
);
217 return ERR_PTR(-EFAULT
);
220 return ERR_PTR(-EINVAL
);
222 p
= memdup_user(s
, length
);
227 p
[length
- 1] = '\0';
231 EXPORT_SYMBOL(strndup_user
);
234 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
236 * @src: source address in user space
237 * @len: number of bytes to copy
239 * Return: an ERR_PTR() on failure.
241 void *memdup_user_nul(const void __user
*src
, size_t len
)
246 * Always use GFP_KERNEL, since copy_from_user() can sleep and
247 * cause pagefault, which makes it pointless to use GFP_NOFS
250 p
= kmalloc_track_caller(len
+ 1, GFP_KERNEL
);
252 return ERR_PTR(-ENOMEM
);
254 if (copy_from_user(p
, src
, len
)) {
256 return ERR_PTR(-EFAULT
);
262 EXPORT_SYMBOL(memdup_user_nul
);
264 void __vma_link_list(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
265 struct vm_area_struct
*prev
, struct rb_node
*rb_parent
)
267 struct vm_area_struct
*next
;
271 next
= prev
->vm_next
;
276 next
= rb_entry(rb_parent
,
277 struct vm_area_struct
, vm_rb
);
286 /* Check if the vma is being used as a stack by this task */
287 int vma_is_stack_for_current(struct vm_area_struct
*vma
)
289 struct task_struct
* __maybe_unused t
= current
;
291 return (vma
->vm_start
<= KSTK_ESP(t
) && vma
->vm_end
>= KSTK_ESP(t
));
294 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
295 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
297 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
298 mm
->get_unmapped_area
= arch_get_unmapped_area
;
303 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
304 * back to the regular GUP.
305 * Note a difference with get_user_pages_fast: this always returns the
306 * number of pages pinned, 0 if no pages were pinned.
307 * If the architecture does not support this function, simply return with no
310 int __weak
__get_user_pages_fast(unsigned long start
,
311 int nr_pages
, int write
, struct page
**pages
)
315 EXPORT_SYMBOL_GPL(__get_user_pages_fast
);
318 * get_user_pages_fast() - pin user pages in memory
319 * @start: starting user address
320 * @nr_pages: number of pages from start to pin
321 * @write: whether pages will be written to
322 * @pages: array that receives pointers to the pages pinned.
323 * Should be at least nr_pages long.
325 * get_user_pages_fast provides equivalent functionality to get_user_pages,
326 * operating on current and current->mm, with force=0 and vma=NULL. However
327 * unlike get_user_pages, it must be called without mmap_sem held.
329 * get_user_pages_fast may take mmap_sem and page table locks, so no
330 * assumptions can be made about lack of locking. get_user_pages_fast is to be
331 * implemented in a way that is advantageous (vs get_user_pages()) when the
332 * user memory area is already faulted in and present in ptes. However if the
333 * pages have to be faulted in, it may turn out to be slightly slower so
334 * callers need to carefully consider what to use. On many architectures,
335 * get_user_pages_fast simply falls back to get_user_pages.
337 * Return: number of pages pinned. This may be fewer than the number
338 * requested. If nr_pages is 0 or negative, returns 0. If no pages
339 * were pinned, returns -errno.
341 int __weak
get_user_pages_fast(unsigned long start
,
342 int nr_pages
, int write
, struct page
**pages
)
344 return get_user_pages_unlocked(start
, nr_pages
, pages
,
345 write
? FOLL_WRITE
: 0);
347 EXPORT_SYMBOL_GPL(get_user_pages_fast
);
349 unsigned long vm_mmap_pgoff(struct file
*file
, unsigned long addr
,
350 unsigned long len
, unsigned long prot
,
351 unsigned long flag
, unsigned long pgoff
)
354 struct mm_struct
*mm
= current
->mm
;
355 unsigned long populate
;
358 ret
= security_mmap_file(file
, prot
, flag
);
360 if (down_write_killable(&mm
->mmap_sem
))
362 ret
= do_mmap_pgoff(file
, addr
, len
, prot
, flag
, pgoff
,
364 up_write(&mm
->mmap_sem
);
365 userfaultfd_unmap_complete(mm
, &uf
);
367 mm_populate(ret
, populate
);
372 unsigned long vm_mmap(struct file
*file
, unsigned long addr
,
373 unsigned long len
, unsigned long prot
,
374 unsigned long flag
, unsigned long offset
)
376 if (unlikely(offset
+ PAGE_ALIGN(len
) < offset
))
378 if (unlikely(offset_in_page(offset
)))
381 return vm_mmap_pgoff(file
, addr
, len
, prot
, flag
, offset
>> PAGE_SHIFT
);
383 EXPORT_SYMBOL(vm_mmap
);
386 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
387 * failure, fall back to non-contiguous (vmalloc) allocation.
388 * @size: size of the request.
389 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
390 * @node: numa node to allocate from
392 * Uses kmalloc to get the memory but if the allocation fails then falls back
393 * to the vmalloc allocator. Use kvfree for freeing the memory.
395 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
396 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
397 * preferable to the vmalloc fallback, due to visible performance drawbacks.
399 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
400 * fall back to vmalloc.
402 * Return: pointer to the allocated memory of %NULL in case of failure
404 void *kvmalloc_node(size_t size
, gfp_t flags
, int node
)
406 gfp_t kmalloc_flags
= flags
;
410 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
411 * so the given set of flags has to be compatible.
413 if ((flags
& GFP_KERNEL
) != GFP_KERNEL
)
414 return kmalloc_node(size
, flags
, node
);
417 * We want to attempt a large physically contiguous block first because
418 * it is less likely to fragment multiple larger blocks and therefore
419 * contribute to a long term fragmentation less than vmalloc fallback.
420 * However make sure that larger requests are not too disruptive - no
421 * OOM killer and no allocation failure warnings as we have a fallback.
423 if (size
> PAGE_SIZE
) {
424 kmalloc_flags
|= __GFP_NOWARN
;
426 if (!(kmalloc_flags
& __GFP_RETRY_MAYFAIL
))
427 kmalloc_flags
|= __GFP_NORETRY
;
430 ret
= kmalloc_node(size
, kmalloc_flags
, node
);
433 * It doesn't really make sense to fallback to vmalloc for sub page
436 if (ret
|| size
<= PAGE_SIZE
)
439 return __vmalloc_node_flags_caller(size
, node
, flags
,
440 __builtin_return_address(0));
442 EXPORT_SYMBOL(kvmalloc_node
);
445 * kvfree() - Free memory.
446 * @addr: Pointer to allocated memory.
448 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
449 * It is slightly more efficient to use kfree() or vfree() if you are certain
450 * that you know which one to use.
452 * Context: Either preemptible task context or not-NMI interrupt.
454 void kvfree(const void *addr
)
456 if (is_vmalloc_addr(addr
))
461 EXPORT_SYMBOL(kvfree
);
463 static inline void *__page_rmapping(struct page
*page
)
465 unsigned long mapping
;
467 mapping
= (unsigned long)page
->mapping
;
468 mapping
&= ~PAGE_MAPPING_FLAGS
;
470 return (void *)mapping
;
473 /* Neutral page->mapping pointer to address_space or anon_vma or other */
474 void *page_rmapping(struct page
*page
)
476 page
= compound_head(page
);
477 return __page_rmapping(page
);
481 * Return true if this page is mapped into pagetables.
482 * For compound page it returns true if any subpage of compound page is mapped.
484 bool page_mapped(struct page
*page
)
488 if (likely(!PageCompound(page
)))
489 return atomic_read(&page
->_mapcount
) >= 0;
490 page
= compound_head(page
);
491 if (atomic_read(compound_mapcount_ptr(page
)) >= 0)
495 for (i
= 0; i
< (1 << compound_order(page
)); i
++) {
496 if (atomic_read(&page
[i
]._mapcount
) >= 0)
501 EXPORT_SYMBOL(page_mapped
);
503 struct anon_vma
*page_anon_vma(struct page
*page
)
505 unsigned long mapping
;
507 page
= compound_head(page
);
508 mapping
= (unsigned long)page
->mapping
;
509 if ((mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
511 return __page_rmapping(page
);
514 struct address_space
*page_mapping(struct page
*page
)
516 struct address_space
*mapping
;
518 page
= compound_head(page
);
520 /* This happens if someone calls flush_dcache_page on slab page */
521 if (unlikely(PageSlab(page
)))
524 if (unlikely(PageSwapCache(page
))) {
527 entry
.val
= page_private(page
);
528 return swap_address_space(entry
);
531 mapping
= page
->mapping
;
532 if ((unsigned long)mapping
& PAGE_MAPPING_ANON
)
535 return (void *)((unsigned long)mapping
& ~PAGE_MAPPING_FLAGS
);
537 EXPORT_SYMBOL(page_mapping
);
540 * For file cache pages, return the address_space, otherwise return NULL
542 struct address_space
*page_mapping_file(struct page
*page
)
544 if (unlikely(PageSwapCache(page
)))
546 return page_mapping(page
);
549 /* Slow path of page_mapcount() for compound pages */
550 int __page_mapcount(struct page
*page
)
554 ret
= atomic_read(&page
->_mapcount
) + 1;
556 * For file THP page->_mapcount contains total number of mapping
557 * of the page: no need to look into compound_mapcount.
559 if (!PageAnon(page
) && !PageHuge(page
))
561 page
= compound_head(page
);
562 ret
+= atomic_read(compound_mapcount_ptr(page
)) + 1;
563 if (PageDoubleMap(page
))
567 EXPORT_SYMBOL_GPL(__page_mapcount
);
569 int sysctl_overcommit_memory __read_mostly
= OVERCOMMIT_GUESS
;
570 int sysctl_overcommit_ratio __read_mostly
= 50;
571 unsigned long sysctl_overcommit_kbytes __read_mostly
;
572 int sysctl_max_map_count __read_mostly
= DEFAULT_MAX_MAP_COUNT
;
573 unsigned long sysctl_user_reserve_kbytes __read_mostly
= 1UL << 17; /* 128MB */
574 unsigned long sysctl_admin_reserve_kbytes __read_mostly
= 1UL << 13; /* 8MB */
576 int overcommit_ratio_handler(struct ctl_table
*table
, int write
,
577 void __user
*buffer
, size_t *lenp
,
582 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
583 if (ret
== 0 && write
)
584 sysctl_overcommit_kbytes
= 0;
588 int overcommit_kbytes_handler(struct ctl_table
*table
, int write
,
589 void __user
*buffer
, size_t *lenp
,
594 ret
= proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
595 if (ret
== 0 && write
)
596 sysctl_overcommit_ratio
= 0;
601 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
603 unsigned long vm_commit_limit(void)
605 unsigned long allowed
;
607 if (sysctl_overcommit_kbytes
)
608 allowed
= sysctl_overcommit_kbytes
>> (PAGE_SHIFT
- 10);
610 allowed
= ((totalram_pages() - hugetlb_total_pages())
611 * sysctl_overcommit_ratio
/ 100);
612 allowed
+= total_swap_pages
;
618 * Make sure vm_committed_as in one cacheline and not cacheline shared with
619 * other variables. It can be updated by several CPUs frequently.
621 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp
;
624 * The global memory commitment made in the system can be a metric
625 * that can be used to drive ballooning decisions when Linux is hosted
626 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
627 * balancing memory across competing virtual machines that are hosted.
628 * Several metrics drive this policy engine including the guest reported
631 unsigned long vm_memory_committed(void)
633 return percpu_counter_read_positive(&vm_committed_as
);
635 EXPORT_SYMBOL_GPL(vm_memory_committed
);
638 * Check that a process has enough memory to allocate a new virtual
639 * mapping. 0 means there is enough memory for the allocation to
640 * succeed and -ENOMEM implies there is not.
642 * We currently support three overcommit policies, which are set via the
643 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
645 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
646 * Additional code 2002 Jul 20 by Robert Love.
648 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
650 * Note this is a helper function intended to be used by LSMs which
651 * wish to use this logic.
653 int __vm_enough_memory(struct mm_struct
*mm
, long pages
, int cap_sys_admin
)
655 long free
, allowed
, reserve
;
657 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as
) <
658 -(s64
)vm_committed_as_batch
* num_online_cpus(),
659 "memory commitment underflow");
661 vm_acct_memory(pages
);
664 * Sometimes we want to use more memory than we have
666 if (sysctl_overcommit_memory
== OVERCOMMIT_ALWAYS
)
669 if (sysctl_overcommit_memory
== OVERCOMMIT_GUESS
) {
670 free
= global_zone_page_state(NR_FREE_PAGES
);
671 free
+= global_node_page_state(NR_FILE_PAGES
);
674 * shmem pages shouldn't be counted as free in this
675 * case, they can't be purged, only swapped out, and
676 * that won't affect the overall amount of available
677 * memory in the system.
679 free
-= global_node_page_state(NR_SHMEM
);
681 free
+= get_nr_swap_pages();
684 * Any slabs which are created with the
685 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
686 * which are reclaimable, under pressure. The dentry
687 * cache and most inode caches should fall into this
689 free
+= global_node_page_state(NR_SLAB_RECLAIMABLE
);
692 * Part of the kernel memory, which can be released
693 * under memory pressure.
695 free
+= global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE
);
698 * Leave reserved pages. The pages are not for anonymous pages.
700 if (free
<= totalreserve_pages
)
703 free
-= totalreserve_pages
;
706 * Reserve some for root
709 free
-= sysctl_admin_reserve_kbytes
>> (PAGE_SHIFT
- 10);
717 allowed
= vm_commit_limit();
719 * Reserve some for root
722 allowed
-= sysctl_admin_reserve_kbytes
>> (PAGE_SHIFT
- 10);
725 * Don't let a single process grow so big a user can't recover
728 reserve
= sysctl_user_reserve_kbytes
>> (PAGE_SHIFT
- 10);
729 allowed
-= min_t(long, mm
->total_vm
/ 32, reserve
);
732 if (percpu_counter_read_positive(&vm_committed_as
) < allowed
)
735 vm_unacct_memory(pages
);
741 * get_cmdline() - copy the cmdline value to a buffer.
742 * @task: the task whose cmdline value to copy.
743 * @buffer: the buffer to copy to.
744 * @buflen: the length of the buffer. Larger cmdline values are truncated
747 * Return: the size of the cmdline field copied. Note that the copy does
748 * not guarantee an ending NULL byte.
750 int get_cmdline(struct task_struct
*task
, char *buffer
, int buflen
)
754 struct mm_struct
*mm
= get_task_mm(task
);
755 unsigned long arg_start
, arg_end
, env_start
, env_end
;
759 goto out_mm
; /* Shh! No looking before we're done */
761 down_read(&mm
->mmap_sem
);
762 arg_start
= mm
->arg_start
;
763 arg_end
= mm
->arg_end
;
764 env_start
= mm
->env_start
;
765 env_end
= mm
->env_end
;
766 up_read(&mm
->mmap_sem
);
768 len
= arg_end
- arg_start
;
773 res
= access_process_vm(task
, arg_start
, buffer
, len
, FOLL_FORCE
);
776 * If the nul at the end of args has been overwritten, then
777 * assume application is using setproctitle(3).
779 if (res
> 0 && buffer
[res
-1] != '\0' && len
< buflen
) {
780 len
= strnlen(buffer
, res
);
784 len
= env_end
- env_start
;
785 if (len
> buflen
- res
)
787 res
+= access_process_vm(task
, env_start
,
790 res
= strnlen(buffer
, res
);