percpu: fix first chunk size calculation for populated bitmap
[linux/fpc-iii.git] / mm / util.c
blobab358c64bbd3e022be65992b9addb13e02dcffc6
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
27 #include <linux/uaccess.h>
29 #include "internal.h"
31 /**
32 * kfree_const - conditionally free memory
33 * @x: pointer to the memory
35 * Function calls kfree only if @x is not in .rodata section.
37 void kfree_const(const void *x)
39 if (!is_kernel_rodata((unsigned long)x))
40 kfree(x);
42 EXPORT_SYMBOL(kfree_const);
44 /**
45 * kstrdup - allocate space for and copy an existing string
46 * @s: the string to duplicate
47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
49 * Return: newly allocated copy of @s or %NULL in case of error
51 char *kstrdup(const char *s, gfp_t gfp)
53 size_t len;
54 char *buf;
56 if (!s)
57 return NULL;
59 len = strlen(s) + 1;
60 buf = kmalloc_track_caller(len, gfp);
61 if (buf)
62 memcpy(buf, s, len);
63 return buf;
65 EXPORT_SYMBOL(kstrdup);
67 /**
68 * kstrdup_const - conditionally duplicate an existing const string
69 * @s: the string to duplicate
70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
74 * Return: source string if it is in .rodata section otherwise
75 * fallback to kstrdup.
77 const char *kstrdup_const(const char *s, gfp_t gfp)
79 if (is_kernel_rodata((unsigned long)s))
80 return s;
82 return kstrdup(s, gfp);
84 EXPORT_SYMBOL(kstrdup_const);
86 /**
87 * kstrndup - allocate space for and copy an existing string
88 * @s: the string to duplicate
89 * @max: read at most @max chars from @s
90 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
92 * Note: Use kmemdup_nul() instead if the size is known exactly.
94 * Return: newly allocated copy of @s or %NULL in case of error
96 char *kstrndup(const char *s, size_t max, gfp_t gfp)
98 size_t len;
99 char *buf;
101 if (!s)
102 return NULL;
104 len = strnlen(s, max);
105 buf = kmalloc_track_caller(len+1, gfp);
106 if (buf) {
107 memcpy(buf, s, len);
108 buf[len] = '\0';
110 return buf;
112 EXPORT_SYMBOL(kstrndup);
115 * kmemdup - duplicate region of memory
117 * @src: memory region to duplicate
118 * @len: memory region length
119 * @gfp: GFP mask to use
121 * Return: newly allocated copy of @src or %NULL in case of error
123 void *kmemdup(const void *src, size_t len, gfp_t gfp)
125 void *p;
127 p = kmalloc_track_caller(len, gfp);
128 if (p)
129 memcpy(p, src, len);
130 return p;
132 EXPORT_SYMBOL(kmemdup);
135 * kmemdup_nul - Create a NUL-terminated string from unterminated data
136 * @s: The data to stringify
137 * @len: The size of the data
138 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
140 * Return: newly allocated copy of @s with NUL-termination or %NULL in
141 * case of error
143 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
145 char *buf;
147 if (!s)
148 return NULL;
150 buf = kmalloc_track_caller(len + 1, gfp);
151 if (buf) {
152 memcpy(buf, s, len);
153 buf[len] = '\0';
155 return buf;
157 EXPORT_SYMBOL(kmemdup_nul);
160 * memdup_user - duplicate memory region from user space
162 * @src: source address in user space
163 * @len: number of bytes to copy
165 * Return: an ERR_PTR() on failure. Result is physically
166 * contiguous, to be freed by kfree().
168 void *memdup_user(const void __user *src, size_t len)
170 void *p;
172 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
173 if (!p)
174 return ERR_PTR(-ENOMEM);
176 if (copy_from_user(p, src, len)) {
177 kfree(p);
178 return ERR_PTR(-EFAULT);
181 return p;
183 EXPORT_SYMBOL(memdup_user);
186 * vmemdup_user - duplicate memory region from user space
188 * @src: source address in user space
189 * @len: number of bytes to copy
191 * Return: an ERR_PTR() on failure. Result may be not
192 * physically contiguous. Use kvfree() to free.
194 void *vmemdup_user(const void __user *src, size_t len)
196 void *p;
198 p = kvmalloc(len, GFP_USER);
199 if (!p)
200 return ERR_PTR(-ENOMEM);
202 if (copy_from_user(p, src, len)) {
203 kvfree(p);
204 return ERR_PTR(-EFAULT);
207 return p;
209 EXPORT_SYMBOL(vmemdup_user);
212 * strndup_user - duplicate an existing string from user space
213 * @s: The string to duplicate
214 * @n: Maximum number of bytes to copy, including the trailing NUL.
216 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
218 char *strndup_user(const char __user *s, long n)
220 char *p;
221 long length;
223 length = strnlen_user(s, n);
225 if (!length)
226 return ERR_PTR(-EFAULT);
228 if (length > n)
229 return ERR_PTR(-EINVAL);
231 p = memdup_user(s, length);
233 if (IS_ERR(p))
234 return p;
236 p[length - 1] = '\0';
238 return p;
240 EXPORT_SYMBOL(strndup_user);
243 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
245 * @src: source address in user space
246 * @len: number of bytes to copy
248 * Return: an ERR_PTR() on failure.
250 void *memdup_user_nul(const void __user *src, size_t len)
252 char *p;
255 * Always use GFP_KERNEL, since copy_from_user() can sleep and
256 * cause pagefault, which makes it pointless to use GFP_NOFS
257 * or GFP_ATOMIC.
259 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
260 if (!p)
261 return ERR_PTR(-ENOMEM);
263 if (copy_from_user(p, src, len)) {
264 kfree(p);
265 return ERR_PTR(-EFAULT);
267 p[len] = '\0';
269 return p;
271 EXPORT_SYMBOL(memdup_user_nul);
273 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
274 struct vm_area_struct *prev, struct rb_node *rb_parent)
276 struct vm_area_struct *next;
278 vma->vm_prev = prev;
279 if (prev) {
280 next = prev->vm_next;
281 prev->vm_next = vma;
282 } else {
283 mm->mmap = vma;
284 if (rb_parent)
285 next = rb_entry(rb_parent,
286 struct vm_area_struct, vm_rb);
287 else
288 next = NULL;
290 vma->vm_next = next;
291 if (next)
292 next->vm_prev = vma;
295 /* Check if the vma is being used as a stack by this task */
296 int vma_is_stack_for_current(struct vm_area_struct *vma)
298 struct task_struct * __maybe_unused t = current;
300 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
303 #ifndef STACK_RND_MASK
304 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
305 #endif
307 unsigned long randomize_stack_top(unsigned long stack_top)
309 unsigned long random_variable = 0;
311 if (current->flags & PF_RANDOMIZE) {
312 random_variable = get_random_long();
313 random_variable &= STACK_RND_MASK;
314 random_variable <<= PAGE_SHIFT;
316 #ifdef CONFIG_STACK_GROWSUP
317 return PAGE_ALIGN(stack_top) + random_variable;
318 #else
319 return PAGE_ALIGN(stack_top) - random_variable;
320 #endif
323 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
324 unsigned long arch_randomize_brk(struct mm_struct *mm)
326 /* Is the current task 32bit ? */
327 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
328 return randomize_page(mm->brk, SZ_32M);
330 return randomize_page(mm->brk, SZ_1G);
333 unsigned long arch_mmap_rnd(void)
335 unsigned long rnd;
337 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
338 if (is_compat_task())
339 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
340 else
341 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
342 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
344 return rnd << PAGE_SHIFT;
347 static int mmap_is_legacy(struct rlimit *rlim_stack)
349 if (current->personality & ADDR_COMPAT_LAYOUT)
350 return 1;
352 if (rlim_stack->rlim_cur == RLIM_INFINITY)
353 return 1;
355 return sysctl_legacy_va_layout;
359 * Leave enough space between the mmap area and the stack to honour ulimit in
360 * the face of randomisation.
362 #define MIN_GAP (SZ_128M)
363 #define MAX_GAP (STACK_TOP / 6 * 5)
365 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
367 unsigned long gap = rlim_stack->rlim_cur;
368 unsigned long pad = stack_guard_gap;
370 /* Account for stack randomization if necessary */
371 if (current->flags & PF_RANDOMIZE)
372 pad += (STACK_RND_MASK << PAGE_SHIFT);
374 /* Values close to RLIM_INFINITY can overflow. */
375 if (gap + pad > gap)
376 gap += pad;
378 if (gap < MIN_GAP)
379 gap = MIN_GAP;
380 else if (gap > MAX_GAP)
381 gap = MAX_GAP;
383 return PAGE_ALIGN(STACK_TOP - gap - rnd);
386 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
388 unsigned long random_factor = 0UL;
390 if (current->flags & PF_RANDOMIZE)
391 random_factor = arch_mmap_rnd();
393 if (mmap_is_legacy(rlim_stack)) {
394 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
395 mm->get_unmapped_area = arch_get_unmapped_area;
396 } else {
397 mm->mmap_base = mmap_base(random_factor, rlim_stack);
398 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
401 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
402 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
404 mm->mmap_base = TASK_UNMAPPED_BASE;
405 mm->get_unmapped_area = arch_get_unmapped_area;
407 #endif
410 * __account_locked_vm - account locked pages to an mm's locked_vm
411 * @mm: mm to account against
412 * @pages: number of pages to account
413 * @inc: %true if @pages should be considered positive, %false if not
414 * @task: task used to check RLIMIT_MEMLOCK
415 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
417 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
418 * that mmap_sem is held as writer.
420 * Return:
421 * * 0 on success
422 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
424 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
425 struct task_struct *task, bool bypass_rlim)
427 unsigned long locked_vm, limit;
428 int ret = 0;
430 lockdep_assert_held_write(&mm->mmap_sem);
432 locked_vm = mm->locked_vm;
433 if (inc) {
434 if (!bypass_rlim) {
435 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
436 if (locked_vm + pages > limit)
437 ret = -ENOMEM;
439 if (!ret)
440 mm->locked_vm = locked_vm + pages;
441 } else {
442 WARN_ON_ONCE(pages > locked_vm);
443 mm->locked_vm = locked_vm - pages;
446 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
447 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
448 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
449 ret ? " - exceeded" : "");
451 return ret;
453 EXPORT_SYMBOL_GPL(__account_locked_vm);
456 * account_locked_vm - account locked pages to an mm's locked_vm
457 * @mm: mm to account against, may be NULL
458 * @pages: number of pages to account
459 * @inc: %true if @pages should be considered positive, %false if not
461 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
463 * Return:
464 * * 0 on success, or if mm is NULL
465 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
467 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
469 int ret;
471 if (pages == 0 || !mm)
472 return 0;
474 down_write(&mm->mmap_sem);
475 ret = __account_locked_vm(mm, pages, inc, current,
476 capable(CAP_IPC_LOCK));
477 up_write(&mm->mmap_sem);
479 return ret;
481 EXPORT_SYMBOL_GPL(account_locked_vm);
483 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
484 unsigned long len, unsigned long prot,
485 unsigned long flag, unsigned long pgoff)
487 unsigned long ret;
488 struct mm_struct *mm = current->mm;
489 unsigned long populate;
490 LIST_HEAD(uf);
492 ret = security_mmap_file(file, prot, flag);
493 if (!ret) {
494 if (down_write_killable(&mm->mmap_sem))
495 return -EINTR;
496 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
497 &populate, &uf);
498 up_write(&mm->mmap_sem);
499 userfaultfd_unmap_complete(mm, &uf);
500 if (populate)
501 mm_populate(ret, populate);
503 return ret;
506 unsigned long vm_mmap(struct file *file, unsigned long addr,
507 unsigned long len, unsigned long prot,
508 unsigned long flag, unsigned long offset)
510 if (unlikely(offset + PAGE_ALIGN(len) < offset))
511 return -EINVAL;
512 if (unlikely(offset_in_page(offset)))
513 return -EINVAL;
515 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
517 EXPORT_SYMBOL(vm_mmap);
520 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
521 * failure, fall back to non-contiguous (vmalloc) allocation.
522 * @size: size of the request.
523 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
524 * @node: numa node to allocate from
526 * Uses kmalloc to get the memory but if the allocation fails then falls back
527 * to the vmalloc allocator. Use kvfree for freeing the memory.
529 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
530 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
531 * preferable to the vmalloc fallback, due to visible performance drawbacks.
533 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
534 * fall back to vmalloc.
536 * Return: pointer to the allocated memory of %NULL in case of failure
538 void *kvmalloc_node(size_t size, gfp_t flags, int node)
540 gfp_t kmalloc_flags = flags;
541 void *ret;
544 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
545 * so the given set of flags has to be compatible.
547 if ((flags & GFP_KERNEL) != GFP_KERNEL)
548 return kmalloc_node(size, flags, node);
551 * We want to attempt a large physically contiguous block first because
552 * it is less likely to fragment multiple larger blocks and therefore
553 * contribute to a long term fragmentation less than vmalloc fallback.
554 * However make sure that larger requests are not too disruptive - no
555 * OOM killer and no allocation failure warnings as we have a fallback.
557 if (size > PAGE_SIZE) {
558 kmalloc_flags |= __GFP_NOWARN;
560 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
561 kmalloc_flags |= __GFP_NORETRY;
564 ret = kmalloc_node(size, kmalloc_flags, node);
567 * It doesn't really make sense to fallback to vmalloc for sub page
568 * requests
570 if (ret || size <= PAGE_SIZE)
571 return ret;
573 return __vmalloc_node_flags_caller(size, node, flags,
574 __builtin_return_address(0));
576 EXPORT_SYMBOL(kvmalloc_node);
579 * kvfree() - Free memory.
580 * @addr: Pointer to allocated memory.
582 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
583 * It is slightly more efficient to use kfree() or vfree() if you are certain
584 * that you know which one to use.
586 * Context: Either preemptible task context or not-NMI interrupt.
588 void kvfree(const void *addr)
590 if (is_vmalloc_addr(addr))
591 vfree(addr);
592 else
593 kfree(addr);
595 EXPORT_SYMBOL(kvfree);
598 * kvfree_sensitive - Free a data object containing sensitive information.
599 * @addr: address of the data object to be freed.
600 * @len: length of the data object.
602 * Use the special memzero_explicit() function to clear the content of a
603 * kvmalloc'ed object containing sensitive data to make sure that the
604 * compiler won't optimize out the data clearing.
606 void kvfree_sensitive(const void *addr, size_t len)
608 if (likely(!ZERO_OR_NULL_PTR(addr))) {
609 memzero_explicit((void *)addr, len);
610 kvfree(addr);
613 EXPORT_SYMBOL(kvfree_sensitive);
615 static inline void *__page_rmapping(struct page *page)
617 unsigned long mapping;
619 mapping = (unsigned long)page->mapping;
620 mapping &= ~PAGE_MAPPING_FLAGS;
622 return (void *)mapping;
625 /* Neutral page->mapping pointer to address_space or anon_vma or other */
626 void *page_rmapping(struct page *page)
628 page = compound_head(page);
629 return __page_rmapping(page);
633 * Return true if this page is mapped into pagetables.
634 * For compound page it returns true if any subpage of compound page is mapped.
636 bool page_mapped(struct page *page)
638 int i;
640 if (likely(!PageCompound(page)))
641 return atomic_read(&page->_mapcount) >= 0;
642 page = compound_head(page);
643 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
644 return true;
645 if (PageHuge(page))
646 return false;
647 for (i = 0; i < compound_nr(page); i++) {
648 if (atomic_read(&page[i]._mapcount) >= 0)
649 return true;
651 return false;
653 EXPORT_SYMBOL(page_mapped);
655 struct anon_vma *page_anon_vma(struct page *page)
657 unsigned long mapping;
659 page = compound_head(page);
660 mapping = (unsigned long)page->mapping;
661 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
662 return NULL;
663 return __page_rmapping(page);
666 struct address_space *page_mapping(struct page *page)
668 struct address_space *mapping;
670 page = compound_head(page);
672 /* This happens if someone calls flush_dcache_page on slab page */
673 if (unlikely(PageSlab(page)))
674 return NULL;
676 if (unlikely(PageSwapCache(page))) {
677 swp_entry_t entry;
679 entry.val = page_private(page);
680 return swap_address_space(entry);
683 mapping = page->mapping;
684 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
685 return NULL;
687 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
689 EXPORT_SYMBOL(page_mapping);
692 * For file cache pages, return the address_space, otherwise return NULL
694 struct address_space *page_mapping_file(struct page *page)
696 if (unlikely(PageSwapCache(page)))
697 return NULL;
698 return page_mapping(page);
701 /* Slow path of page_mapcount() for compound pages */
702 int __page_mapcount(struct page *page)
704 int ret;
706 ret = atomic_read(&page->_mapcount) + 1;
708 * For file THP page->_mapcount contains total number of mapping
709 * of the page: no need to look into compound_mapcount.
711 if (!PageAnon(page) && !PageHuge(page))
712 return ret;
713 page = compound_head(page);
714 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
715 if (PageDoubleMap(page))
716 ret--;
717 return ret;
719 EXPORT_SYMBOL_GPL(__page_mapcount);
721 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
722 int sysctl_overcommit_ratio __read_mostly = 50;
723 unsigned long sysctl_overcommit_kbytes __read_mostly;
724 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
725 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
726 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
728 int overcommit_ratio_handler(struct ctl_table *table, int write,
729 void __user *buffer, size_t *lenp,
730 loff_t *ppos)
732 int ret;
734 ret = proc_dointvec(table, write, buffer, lenp, ppos);
735 if (ret == 0 && write)
736 sysctl_overcommit_kbytes = 0;
737 return ret;
740 int overcommit_kbytes_handler(struct ctl_table *table, int write,
741 void __user *buffer, size_t *lenp,
742 loff_t *ppos)
744 int ret;
746 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
747 if (ret == 0 && write)
748 sysctl_overcommit_ratio = 0;
749 return ret;
753 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
755 unsigned long vm_commit_limit(void)
757 unsigned long allowed;
759 if (sysctl_overcommit_kbytes)
760 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
761 else
762 allowed = ((totalram_pages() - hugetlb_total_pages())
763 * sysctl_overcommit_ratio / 100);
764 allowed += total_swap_pages;
766 return allowed;
770 * Make sure vm_committed_as in one cacheline and not cacheline shared with
771 * other variables. It can be updated by several CPUs frequently.
773 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
776 * The global memory commitment made in the system can be a metric
777 * that can be used to drive ballooning decisions when Linux is hosted
778 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
779 * balancing memory across competing virtual machines that are hosted.
780 * Several metrics drive this policy engine including the guest reported
781 * memory commitment.
783 unsigned long vm_memory_committed(void)
785 return percpu_counter_read_positive(&vm_committed_as);
787 EXPORT_SYMBOL_GPL(vm_memory_committed);
790 * Check that a process has enough memory to allocate a new virtual
791 * mapping. 0 means there is enough memory for the allocation to
792 * succeed and -ENOMEM implies there is not.
794 * We currently support three overcommit policies, which are set via the
795 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
797 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
798 * Additional code 2002 Jul 20 by Robert Love.
800 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
802 * Note this is a helper function intended to be used by LSMs which
803 * wish to use this logic.
805 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
807 long allowed;
809 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
810 -(s64)vm_committed_as_batch * num_online_cpus(),
811 "memory commitment underflow");
813 vm_acct_memory(pages);
816 * Sometimes we want to use more memory than we have
818 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
819 return 0;
821 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
822 if (pages > totalram_pages() + total_swap_pages)
823 goto error;
824 return 0;
827 allowed = vm_commit_limit();
829 * Reserve some for root
831 if (!cap_sys_admin)
832 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
835 * Don't let a single process grow so big a user can't recover
837 if (mm) {
838 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
840 allowed -= min_t(long, mm->total_vm / 32, reserve);
843 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
844 return 0;
845 error:
846 vm_unacct_memory(pages);
848 return -ENOMEM;
852 * get_cmdline() - copy the cmdline value to a buffer.
853 * @task: the task whose cmdline value to copy.
854 * @buffer: the buffer to copy to.
855 * @buflen: the length of the buffer. Larger cmdline values are truncated
856 * to this length.
858 * Return: the size of the cmdline field copied. Note that the copy does
859 * not guarantee an ending NULL byte.
861 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
863 int res = 0;
864 unsigned int len;
865 struct mm_struct *mm = get_task_mm(task);
866 unsigned long arg_start, arg_end, env_start, env_end;
867 if (!mm)
868 goto out;
869 if (!mm->arg_end)
870 goto out_mm; /* Shh! No looking before we're done */
872 spin_lock(&mm->arg_lock);
873 arg_start = mm->arg_start;
874 arg_end = mm->arg_end;
875 env_start = mm->env_start;
876 env_end = mm->env_end;
877 spin_unlock(&mm->arg_lock);
879 len = arg_end - arg_start;
881 if (len > buflen)
882 len = buflen;
884 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
887 * If the nul at the end of args has been overwritten, then
888 * assume application is using setproctitle(3).
890 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
891 len = strnlen(buffer, res);
892 if (len < res) {
893 res = len;
894 } else {
895 len = env_end - env_start;
896 if (len > buflen - res)
897 len = buflen - res;
898 res += access_process_vm(task, env_start,
899 buffer+res, len,
900 FOLL_FORCE);
901 res = strnlen(buffer, res);
904 out_mm:
905 mmput(mm);
906 out:
907 return res;
910 int memcmp_pages(struct page *page1, struct page *page2)
912 char *addr1, *addr2;
913 int ret;
915 addr1 = kmap_atomic(page1);
916 addr2 = kmap_atomic(page2);
917 ret = memcmp(addr1, addr2, PAGE_SIZE);
918 kunmap_atomic(addr2);
919 kunmap_atomic(addr1);
920 return ret;