1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
27 #include <linux/uaccess.h>
29 #include <kunit/visibility.h>
35 * kfree_const - conditionally free memory
36 * @x: pointer to the memory
38 * Function calls kfree only if @x is not in .rodata section.
40 void kfree_const(const void *x
)
42 if (!is_kernel_rodata((unsigned long)x
))
45 EXPORT_SYMBOL(kfree_const
);
48 * __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated.
49 * @s: The data to copy
50 * @len: The size of the data, not including the NUL terminator
51 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
53 * Return: newly allocated copy of @s with NUL-termination or %NULL in
56 static __always_inline
char *__kmemdup_nul(const char *s
, size_t len
, gfp_t gfp
)
60 /* '+1' for the NUL terminator */
61 buf
= kmalloc_track_caller(len
+ 1, gfp
);
66 /* Ensure the buf is always NUL-terminated, regardless of @s. */
72 * kstrdup - allocate space for and copy an existing string
73 * @s: the string to duplicate
74 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
76 * Return: newly allocated copy of @s or %NULL in case of error
79 char *kstrdup(const char *s
, gfp_t gfp
)
81 return s
? __kmemdup_nul(s
, strlen(s
), gfp
) : NULL
;
83 EXPORT_SYMBOL(kstrdup
);
86 * kstrdup_const - conditionally duplicate an existing const string
87 * @s: the string to duplicate
88 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
90 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
91 * must not be passed to krealloc().
93 * Return: source string if it is in .rodata section otherwise
94 * fallback to kstrdup.
96 const char *kstrdup_const(const char *s
, gfp_t gfp
)
98 if (is_kernel_rodata((unsigned long)s
))
101 return kstrdup(s
, gfp
);
103 EXPORT_SYMBOL(kstrdup_const
);
106 * kstrndup - allocate space for and copy an existing string
107 * @s: the string to duplicate
108 * @max: read at most @max chars from @s
109 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
111 * Note: Use kmemdup_nul() instead if the size is known exactly.
113 * Return: newly allocated copy of @s or %NULL in case of error
115 char *kstrndup(const char *s
, size_t max
, gfp_t gfp
)
117 return s
? __kmemdup_nul(s
, strnlen(s
, max
), gfp
) : NULL
;
119 EXPORT_SYMBOL(kstrndup
);
122 * kmemdup - duplicate region of memory
124 * @src: memory region to duplicate
125 * @len: memory region length
126 * @gfp: GFP mask to use
128 * Return: newly allocated copy of @src or %NULL in case of error,
129 * result is physically contiguous. Use kfree() to free.
131 void *kmemdup_noprof(const void *src
, size_t len
, gfp_t gfp
)
135 p
= kmalloc_node_track_caller_noprof(len
, gfp
, NUMA_NO_NODE
, _RET_IP_
);
140 EXPORT_SYMBOL(kmemdup_noprof
);
143 * kmemdup_array - duplicate a given array.
145 * @src: array to duplicate.
146 * @count: number of elements to duplicate from array.
147 * @element_size: size of each element of array.
148 * @gfp: GFP mask to use.
150 * Return: duplicated array of @src or %NULL in case of error,
151 * result is physically contiguous. Use kfree() to free.
153 void *kmemdup_array(const void *src
, size_t count
, size_t element_size
, gfp_t gfp
)
155 return kmemdup(src
, size_mul(element_size
, count
), gfp
);
157 EXPORT_SYMBOL(kmemdup_array
);
160 * kvmemdup - duplicate region of memory
162 * @src: memory region to duplicate
163 * @len: memory region length
164 * @gfp: GFP mask to use
166 * Return: newly allocated copy of @src or %NULL in case of error,
167 * result may be not physically contiguous. Use kvfree() to free.
169 void *kvmemdup(const void *src
, size_t len
, gfp_t gfp
)
173 p
= kvmalloc(len
, gfp
);
178 EXPORT_SYMBOL(kvmemdup
);
181 * kmemdup_nul - Create a NUL-terminated string from unterminated data
182 * @s: The data to stringify
183 * @len: The size of the data
184 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
186 * Return: newly allocated copy of @s with NUL-termination or %NULL in
189 char *kmemdup_nul(const char *s
, size_t len
, gfp_t gfp
)
191 return s
? __kmemdup_nul(s
, len
, gfp
) : NULL
;
193 EXPORT_SYMBOL(kmemdup_nul
);
195 static kmem_buckets
*user_buckets __ro_after_init
;
197 static int __init
init_user_buckets(void)
199 user_buckets
= kmem_buckets_create("memdup_user", 0, 0, INT_MAX
, NULL
);
203 subsys_initcall(init_user_buckets
);
206 * memdup_user - duplicate memory region from user space
208 * @src: source address in user space
209 * @len: number of bytes to copy
211 * Return: an ERR_PTR() on failure. Result is physically
212 * contiguous, to be freed by kfree().
214 void *memdup_user(const void __user
*src
, size_t len
)
218 p
= kmem_buckets_alloc_track_caller(user_buckets
, len
, GFP_USER
| __GFP_NOWARN
);
220 return ERR_PTR(-ENOMEM
);
222 if (copy_from_user(p
, src
, len
)) {
224 return ERR_PTR(-EFAULT
);
229 EXPORT_SYMBOL(memdup_user
);
232 * vmemdup_user - duplicate memory region from user space
234 * @src: source address in user space
235 * @len: number of bytes to copy
237 * Return: an ERR_PTR() on failure. Result may be not
238 * physically contiguous. Use kvfree() to free.
240 void *vmemdup_user(const void __user
*src
, size_t len
)
244 p
= kmem_buckets_valloc(user_buckets
, len
, GFP_USER
);
246 return ERR_PTR(-ENOMEM
);
248 if (copy_from_user(p
, src
, len
)) {
250 return ERR_PTR(-EFAULT
);
255 EXPORT_SYMBOL(vmemdup_user
);
258 * strndup_user - duplicate an existing string from user space
259 * @s: The string to duplicate
260 * @n: Maximum number of bytes to copy, including the trailing NUL.
262 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
264 char *strndup_user(const char __user
*s
, long n
)
269 length
= strnlen_user(s
, n
);
272 return ERR_PTR(-EFAULT
);
275 return ERR_PTR(-EINVAL
);
277 p
= memdup_user(s
, length
);
282 p
[length
- 1] = '\0';
286 EXPORT_SYMBOL(strndup_user
);
289 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
291 * @src: source address in user space
292 * @len: number of bytes to copy
294 * Return: an ERR_PTR() on failure.
296 void *memdup_user_nul(const void __user
*src
, size_t len
)
301 * Always use GFP_KERNEL, since copy_from_user() can sleep and
302 * cause pagefault, which makes it pointless to use GFP_NOFS
305 p
= kmalloc_track_caller(len
+ 1, GFP_KERNEL
);
307 return ERR_PTR(-ENOMEM
);
309 if (copy_from_user(p
, src
, len
)) {
311 return ERR_PTR(-EFAULT
);
317 EXPORT_SYMBOL(memdup_user_nul
);
319 /* Check if the vma is being used as a stack by this task */
320 int vma_is_stack_for_current(struct vm_area_struct
*vma
)
322 struct task_struct
* __maybe_unused t
= current
;
324 return (vma
->vm_start
<= KSTK_ESP(t
) && vma
->vm_end
>= KSTK_ESP(t
));
328 * Change backing file, only valid to use during initial VMA setup.
330 void vma_set_file(struct vm_area_struct
*vma
, struct file
*file
)
332 /* Changing an anonymous vma with this is illegal */
334 swap(vma
->vm_file
, file
);
337 EXPORT_SYMBOL(vma_set_file
);
339 #ifndef STACK_RND_MASK
340 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
343 unsigned long randomize_stack_top(unsigned long stack_top
)
345 unsigned long random_variable
= 0;
347 if (current
->flags
& PF_RANDOMIZE
) {
348 random_variable
= get_random_long();
349 random_variable
&= STACK_RND_MASK
;
350 random_variable
<<= PAGE_SHIFT
;
352 #ifdef CONFIG_STACK_GROWSUP
353 return PAGE_ALIGN(stack_top
) + random_variable
;
355 return PAGE_ALIGN(stack_top
) - random_variable
;
360 * randomize_page - Generate a random, page aligned address
361 * @start: The smallest acceptable address the caller will take.
362 * @range: The size of the area, starting at @start, within which the
363 * random address must fall.
365 * If @start + @range would overflow, @range is capped.
367 * NOTE: Historical use of randomize_range, which this replaces, presumed that
368 * @start was already page aligned. We now align it regardless.
370 * Return: A page aligned address within [start, start + range). On error,
371 * @start is returned.
373 unsigned long randomize_page(unsigned long start
, unsigned long range
)
375 if (!PAGE_ALIGNED(start
)) {
376 range
-= PAGE_ALIGN(start
) - start
;
377 start
= PAGE_ALIGN(start
);
380 if (start
> ULONG_MAX
- range
)
381 range
= ULONG_MAX
- start
;
383 range
>>= PAGE_SHIFT
;
388 return start
+ (get_random_long() % range
<< PAGE_SHIFT
);
391 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
392 unsigned long __weak
arch_randomize_brk(struct mm_struct
*mm
)
394 /* Is the current task 32bit ? */
395 if (!IS_ENABLED(CONFIG_64BIT
) || is_compat_task())
396 return randomize_page(mm
->brk
, SZ_32M
);
398 return randomize_page(mm
->brk
, SZ_1G
);
401 unsigned long arch_mmap_rnd(void)
405 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
406 if (is_compat_task())
407 rnd
= get_random_long() & ((1UL << mmap_rnd_compat_bits
) - 1);
409 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
410 rnd
= get_random_long() & ((1UL << mmap_rnd_bits
) - 1);
412 return rnd
<< PAGE_SHIFT
;
415 static int mmap_is_legacy(struct rlimit
*rlim_stack
)
417 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
420 /* On parisc the stack always grows up - so a unlimited stack should
421 * not be an indicator to use the legacy memory layout. */
422 if (rlim_stack
->rlim_cur
== RLIM_INFINITY
&&
423 !IS_ENABLED(CONFIG_STACK_GROWSUP
))
426 return sysctl_legacy_va_layout
;
430 * Leave enough space between the mmap area and the stack to honour ulimit in
431 * the face of randomisation.
433 #define MIN_GAP (SZ_128M)
434 #define MAX_GAP (STACK_TOP / 6 * 5)
436 static unsigned long mmap_base(unsigned long rnd
, struct rlimit
*rlim_stack
)
438 #ifdef CONFIG_STACK_GROWSUP
440 * For an upwards growing stack the calculation is much simpler.
441 * Memory for the maximum stack size is reserved at the top of the
442 * task. mmap_base starts directly below the stack and grows
445 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack
) - rnd
);
447 unsigned long gap
= rlim_stack
->rlim_cur
;
448 unsigned long pad
= stack_guard_gap
;
450 /* Account for stack randomization if necessary */
451 if (current
->flags
& PF_RANDOMIZE
)
452 pad
+= (STACK_RND_MASK
<< PAGE_SHIFT
);
454 /* Values close to RLIM_INFINITY can overflow. */
458 if (gap
< MIN_GAP
&& MIN_GAP
< MAX_GAP
)
460 else if (gap
> MAX_GAP
)
463 return PAGE_ALIGN(STACK_TOP
- gap
- rnd
);
467 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
469 unsigned long random_factor
= 0UL;
471 if (current
->flags
& PF_RANDOMIZE
)
472 random_factor
= arch_mmap_rnd();
474 if (mmap_is_legacy(rlim_stack
)) {
475 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
476 clear_bit(MMF_TOPDOWN
, &mm
->flags
);
478 mm
->mmap_base
= mmap_base(random_factor
, rlim_stack
);
479 set_bit(MMF_TOPDOWN
, &mm
->flags
);
482 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
483 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
485 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
486 clear_bit(MMF_TOPDOWN
, &mm
->flags
);
490 EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout
);
494 * __account_locked_vm - account locked pages to an mm's locked_vm
495 * @mm: mm to account against
496 * @pages: number of pages to account
497 * @inc: %true if @pages should be considered positive, %false if not
498 * @task: task used to check RLIMIT_MEMLOCK
499 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
501 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
502 * that mmap_lock is held as writer.
506 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
508 int __account_locked_vm(struct mm_struct
*mm
, unsigned long pages
, bool inc
,
509 struct task_struct
*task
, bool bypass_rlim
)
511 unsigned long locked_vm
, limit
;
514 mmap_assert_write_locked(mm
);
516 locked_vm
= mm
->locked_vm
;
519 limit
= task_rlimit(task
, RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
520 if (locked_vm
+ pages
> limit
)
524 mm
->locked_vm
= locked_vm
+ pages
;
526 WARN_ON_ONCE(pages
> locked_vm
);
527 mm
->locked_vm
= locked_vm
- pages
;
530 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__
, task
->pid
,
531 (void *)_RET_IP_
, (inc
) ? '+' : '-', pages
<< PAGE_SHIFT
,
532 locked_vm
<< PAGE_SHIFT
, task_rlimit(task
, RLIMIT_MEMLOCK
),
533 ret
? " - exceeded" : "");
537 EXPORT_SYMBOL_GPL(__account_locked_vm
);
540 * account_locked_vm - account locked pages to an mm's locked_vm
541 * @mm: mm to account against, may be NULL
542 * @pages: number of pages to account
543 * @inc: %true if @pages should be considered positive, %false if not
545 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
548 * * 0 on success, or if mm is NULL
549 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
551 int account_locked_vm(struct mm_struct
*mm
, unsigned long pages
, bool inc
)
555 if (pages
== 0 || !mm
)
559 ret
= __account_locked_vm(mm
, pages
, inc
, current
,
560 capable(CAP_IPC_LOCK
));
561 mmap_write_unlock(mm
);
565 EXPORT_SYMBOL_GPL(account_locked_vm
);
567 unsigned long vm_mmap_pgoff(struct file
*file
, unsigned long addr
,
568 unsigned long len
, unsigned long prot
,
569 unsigned long flag
, unsigned long pgoff
)
572 struct mm_struct
*mm
= current
->mm
;
573 unsigned long populate
;
576 ret
= security_mmap_file(file
, prot
, flag
);
578 if (mmap_write_lock_killable(mm
))
580 ret
= do_mmap(file
, addr
, len
, prot
, flag
, 0, pgoff
, &populate
,
582 mmap_write_unlock(mm
);
583 userfaultfd_unmap_complete(mm
, &uf
);
585 mm_populate(ret
, populate
);
590 unsigned long vm_mmap(struct file
*file
, unsigned long addr
,
591 unsigned long len
, unsigned long prot
,
592 unsigned long flag
, unsigned long offset
)
594 if (unlikely(offset
+ PAGE_ALIGN(len
) < offset
))
596 if (unlikely(offset_in_page(offset
)))
599 return vm_mmap_pgoff(file
, addr
, len
, prot
, flag
, offset
>> PAGE_SHIFT
);
601 EXPORT_SYMBOL(vm_mmap
);
603 static gfp_t
kmalloc_gfp_adjust(gfp_t flags
, size_t size
)
606 * We want to attempt a large physically contiguous block first because
607 * it is less likely to fragment multiple larger blocks and therefore
608 * contribute to a long term fragmentation less than vmalloc fallback.
609 * However make sure that larger requests are not too disruptive - no
610 * OOM killer and no allocation failure warnings as we have a fallback.
612 if (size
> PAGE_SIZE
) {
613 flags
|= __GFP_NOWARN
;
615 if (!(flags
& __GFP_RETRY_MAYFAIL
))
616 flags
|= __GFP_NORETRY
;
618 /* nofail semantic is implemented by the vmalloc fallback */
619 flags
&= ~__GFP_NOFAIL
;
626 * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
627 * failure, fall back to non-contiguous (vmalloc) allocation.
628 * @size: size of the request.
629 * @b: which set of kmalloc buckets to allocate from.
630 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
631 * @node: numa node to allocate from
633 * Uses kmalloc to get the memory but if the allocation fails then falls back
634 * to the vmalloc allocator. Use kvfree for freeing the memory.
636 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
637 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
638 * preferable to the vmalloc fallback, due to visible performance drawbacks.
640 * Return: pointer to the allocated memory of %NULL in case of failure
642 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size
, b
), gfp_t flags
, int node
)
647 * It doesn't really make sense to fallback to vmalloc for sub page
650 ret
= __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size
, b
),
651 kmalloc_gfp_adjust(flags
, size
),
653 if (ret
|| size
<= PAGE_SIZE
)
656 /* non-sleeping allocations are not supported by vmalloc */
657 if (!gfpflags_allow_blocking(flags
))
660 /* Don't even allow crazy sizes */
661 if (unlikely(size
> INT_MAX
)) {
662 WARN_ON_ONCE(!(flags
& __GFP_NOWARN
));
667 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
668 * since the callers already cannot assume anything
669 * about the resulting pointer, and cannot play
672 return __vmalloc_node_range_noprof(size
, 1, VMALLOC_START
, VMALLOC_END
,
673 flags
, PAGE_KERNEL
, VM_ALLOW_HUGE_VMAP
,
674 node
, __builtin_return_address(0));
676 EXPORT_SYMBOL(__kvmalloc_node_noprof
);
679 * kvfree() - Free memory.
680 * @addr: Pointer to allocated memory.
682 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
683 * It is slightly more efficient to use kfree() or vfree() if you are certain
684 * that you know which one to use.
686 * Context: Either preemptible task context or not-NMI interrupt.
688 void kvfree(const void *addr
)
690 if (is_vmalloc_addr(addr
))
695 EXPORT_SYMBOL(kvfree
);
698 * kvfree_sensitive - Free a data object containing sensitive information.
699 * @addr: address of the data object to be freed.
700 * @len: length of the data object.
702 * Use the special memzero_explicit() function to clear the content of a
703 * kvmalloc'ed object containing sensitive data to make sure that the
704 * compiler won't optimize out the data clearing.
706 void kvfree_sensitive(const void *addr
, size_t len
)
708 if (likely(!ZERO_OR_NULL_PTR(addr
))) {
709 memzero_explicit((void *)addr
, len
);
713 EXPORT_SYMBOL(kvfree_sensitive
);
716 * kvrealloc - reallocate memory; contents remain unchanged
717 * @p: object to reallocate memory for
718 * @size: the size to reallocate
719 * @flags: the flags for the page level allocator
721 * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
722 * and @p is not a %NULL pointer, the object pointed to is freed.
724 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
725 * initial memory allocation, every subsequent call to this API for the same
726 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
727 * __GFP_ZERO is not fully honored by this API.
729 * In any case, the contents of the object pointed to are preserved up to the
730 * lesser of the new and old sizes.
732 * This function must not be called concurrently with itself or kvfree() for the
733 * same memory allocation.
735 * Return: pointer to the allocated memory or %NULL in case of error
737 void *kvrealloc_noprof(const void *p
, size_t size
, gfp_t flags
)
741 if (is_vmalloc_addr(p
))
742 return vrealloc_noprof(p
, size
, flags
);
744 n
= krealloc_noprof(p
, size
, kmalloc_gfp_adjust(flags
, size
));
746 /* We failed to krealloc(), fall back to kvmalloc(). */
747 n
= kvmalloc_noprof(size
, flags
);
752 /* We already know that `p` is not a vmalloc address. */
753 kasan_disable_current();
754 memcpy(n
, kasan_reset_tag(p
), ksize(p
));
755 kasan_enable_current();
763 EXPORT_SYMBOL(kvrealloc_noprof
);
766 * __vmalloc_array - allocate memory for a virtually contiguous array.
767 * @n: number of elements.
768 * @size: element size.
769 * @flags: the type of memory to allocate (see kmalloc).
771 void *__vmalloc_array_noprof(size_t n
, size_t size
, gfp_t flags
)
775 if (unlikely(check_mul_overflow(n
, size
, &bytes
)))
777 return __vmalloc_noprof(bytes
, flags
);
779 EXPORT_SYMBOL(__vmalloc_array_noprof
);
782 * vmalloc_array - allocate memory for a virtually contiguous array.
783 * @n: number of elements.
784 * @size: element size.
786 void *vmalloc_array_noprof(size_t n
, size_t size
)
788 return __vmalloc_array_noprof(n
, size
, GFP_KERNEL
);
790 EXPORT_SYMBOL(vmalloc_array_noprof
);
793 * __vcalloc - allocate and zero memory for a virtually contiguous array.
794 * @n: number of elements.
795 * @size: element size.
796 * @flags: the type of memory to allocate (see kmalloc).
798 void *__vcalloc_noprof(size_t n
, size_t size
, gfp_t flags
)
800 return __vmalloc_array_noprof(n
, size
, flags
| __GFP_ZERO
);
802 EXPORT_SYMBOL(__vcalloc_noprof
);
805 * vcalloc - allocate and zero memory for a virtually contiguous array.
806 * @n: number of elements.
807 * @size: element size.
809 void *vcalloc_noprof(size_t n
, size_t size
)
811 return __vmalloc_array_noprof(n
, size
, GFP_KERNEL
| __GFP_ZERO
);
813 EXPORT_SYMBOL(vcalloc_noprof
);
815 struct anon_vma
*folio_anon_vma(const struct folio
*folio
)
817 unsigned long mapping
= (unsigned long)folio
->mapping
;
819 if ((mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
821 return (void *)(mapping
- PAGE_MAPPING_ANON
);
825 * folio_mapping - Find the mapping where this folio is stored.
828 * For folios which are in the page cache, return the mapping that this
829 * page belongs to. Folios in the swap cache return the swap mapping
830 * this page is stored in (which is different from the mapping for the
831 * swap file or swap device where the data is stored).
833 * You can call this for folios which aren't in the swap cache or page
834 * cache and it will return NULL.
836 struct address_space
*folio_mapping(struct folio
*folio
)
838 struct address_space
*mapping
;
840 /* This happens if someone calls flush_dcache_page on slab page */
841 if (unlikely(folio_test_slab(folio
)))
844 if (unlikely(folio_test_swapcache(folio
)))
845 return swap_address_space(folio
->swap
);
847 mapping
= folio
->mapping
;
848 if ((unsigned long)mapping
& PAGE_MAPPING_FLAGS
)
853 EXPORT_SYMBOL(folio_mapping
);
856 * folio_copy - Copy the contents of one folio to another.
857 * @dst: Folio to copy to.
858 * @src: Folio to copy from.
860 * The bytes in the folio represented by @src are copied to @dst.
861 * Assumes the caller has validated that @dst is at least as large as @src.
862 * Can be called in atomic context for order-0 folios, but if the folio is
863 * larger, it may sleep.
865 void folio_copy(struct folio
*dst
, struct folio
*src
)
868 long nr
= folio_nr_pages(src
);
871 copy_highpage(folio_page(dst
, i
), folio_page(src
, i
));
877 EXPORT_SYMBOL(folio_copy
);
879 int folio_mc_copy(struct folio
*dst
, struct folio
*src
)
881 long nr
= folio_nr_pages(src
);
885 if (copy_mc_highpage(folio_page(dst
, i
), folio_page(src
, i
)))
894 EXPORT_SYMBOL(folio_mc_copy
);
896 int sysctl_overcommit_memory __read_mostly
= OVERCOMMIT_GUESS
;
897 int sysctl_overcommit_ratio __read_mostly
= 50;
898 unsigned long sysctl_overcommit_kbytes __read_mostly
;
899 int sysctl_max_map_count __read_mostly
= DEFAULT_MAX_MAP_COUNT
;
900 unsigned long sysctl_user_reserve_kbytes __read_mostly
= 1UL << 17; /* 128MB */
901 unsigned long sysctl_admin_reserve_kbytes __read_mostly
= 1UL << 13; /* 8MB */
903 int overcommit_ratio_handler(const struct ctl_table
*table
, int write
, void *buffer
,
904 size_t *lenp
, loff_t
*ppos
)
908 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
909 if (ret
== 0 && write
)
910 sysctl_overcommit_kbytes
= 0;
914 static void sync_overcommit_as(struct work_struct
*dummy
)
916 percpu_counter_sync(&vm_committed_as
);
919 int overcommit_policy_handler(const struct ctl_table
*table
, int write
, void *buffer
,
920 size_t *lenp
, loff_t
*ppos
)
927 * The deviation of sync_overcommit_as could be big with loose policy
928 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
929 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
930 * with the strict "NEVER", and to avoid possible race condition (even
931 * though user usually won't too frequently do the switching to policy
932 * OVERCOMMIT_NEVER), the switch is done in the following order:
933 * 1. changing the batch
934 * 2. sync percpu count on each CPU
935 * 3. switch the policy
939 t
.data
= &new_policy
;
940 ret
= proc_dointvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
941 if (ret
|| new_policy
== -1)
944 mm_compute_batch(new_policy
);
945 if (new_policy
== OVERCOMMIT_NEVER
)
946 schedule_on_each_cpu(sync_overcommit_as
);
947 sysctl_overcommit_memory
= new_policy
;
949 ret
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
955 int overcommit_kbytes_handler(const struct ctl_table
*table
, int write
, void *buffer
,
956 size_t *lenp
, loff_t
*ppos
)
960 ret
= proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
961 if (ret
== 0 && write
)
962 sysctl_overcommit_ratio
= 0;
967 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
969 unsigned long vm_commit_limit(void)
971 unsigned long allowed
;
973 if (sysctl_overcommit_kbytes
)
974 allowed
= sysctl_overcommit_kbytes
>> (PAGE_SHIFT
- 10);
976 allowed
= ((totalram_pages() - hugetlb_total_pages())
977 * sysctl_overcommit_ratio
/ 100);
978 allowed
+= total_swap_pages
;
984 * Make sure vm_committed_as in one cacheline and not cacheline shared with
985 * other variables. It can be updated by several CPUs frequently.
987 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp
;
990 * The global memory commitment made in the system can be a metric
991 * that can be used to drive ballooning decisions when Linux is hosted
992 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
993 * balancing memory across competing virtual machines that are hosted.
994 * Several metrics drive this policy engine including the guest reported
997 * The time cost of this is very low for small platforms, and for big
998 * platform like a 2S/36C/72T Skylake server, in worst case where
999 * vm_committed_as's spinlock is under severe contention, the time cost
1000 * could be about 30~40 microseconds.
1002 unsigned long vm_memory_committed(void)
1004 return percpu_counter_sum_positive(&vm_committed_as
);
1006 EXPORT_SYMBOL_GPL(vm_memory_committed
);
1009 * Check that a process has enough memory to allocate a new virtual
1010 * mapping. 0 means there is enough memory for the allocation to
1011 * succeed and -ENOMEM implies there is not.
1013 * We currently support three overcommit policies, which are set via the
1014 * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
1016 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1017 * Additional code 2002 Jul 20 by Robert Love.
1019 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1021 * Note this is a helper function intended to be used by LSMs which
1022 * wish to use this logic.
1024 int __vm_enough_memory(struct mm_struct
*mm
, long pages
, int cap_sys_admin
)
1027 unsigned long bytes_failed
;
1029 vm_acct_memory(pages
);
1032 * Sometimes we want to use more memory than we have
1034 if (sysctl_overcommit_memory
== OVERCOMMIT_ALWAYS
)
1037 if (sysctl_overcommit_memory
== OVERCOMMIT_GUESS
) {
1038 if (pages
> totalram_pages() + total_swap_pages
)
1043 allowed
= vm_commit_limit();
1045 * Reserve some for root
1048 allowed
-= sysctl_admin_reserve_kbytes
>> (PAGE_SHIFT
- 10);
1051 * Don't let a single process grow so big a user can't recover
1054 long reserve
= sysctl_user_reserve_kbytes
>> (PAGE_SHIFT
- 10);
1056 allowed
-= min_t(long, mm
->total_vm
/ 32, reserve
);
1059 if (percpu_counter_read_positive(&vm_committed_as
) < allowed
)
1062 bytes_failed
= pages
<< PAGE_SHIFT
;
1063 pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
1064 __func__
, current
->pid
, current
->comm
, bytes_failed
);
1065 vm_unacct_memory(pages
);
1071 * get_cmdline() - copy the cmdline value to a buffer.
1072 * @task: the task whose cmdline value to copy.
1073 * @buffer: the buffer to copy to.
1074 * @buflen: the length of the buffer. Larger cmdline values are truncated
1077 * Return: the size of the cmdline field copied. Note that the copy does
1078 * not guarantee an ending NULL byte.
1080 int get_cmdline(struct task_struct
*task
, char *buffer
, int buflen
)
1084 struct mm_struct
*mm
= get_task_mm(task
);
1085 unsigned long arg_start
, arg_end
, env_start
, env_end
;
1089 goto out_mm
; /* Shh! No looking before we're done */
1091 spin_lock(&mm
->arg_lock
);
1092 arg_start
= mm
->arg_start
;
1093 arg_end
= mm
->arg_end
;
1094 env_start
= mm
->env_start
;
1095 env_end
= mm
->env_end
;
1096 spin_unlock(&mm
->arg_lock
);
1098 len
= arg_end
- arg_start
;
1103 res
= access_process_vm(task
, arg_start
, buffer
, len
, FOLL_FORCE
);
1106 * If the nul at the end of args has been overwritten, then
1107 * assume application is using setproctitle(3).
1109 if (res
> 0 && buffer
[res
-1] != '\0' && len
< buflen
) {
1110 len
= strnlen(buffer
, res
);
1114 len
= env_end
- env_start
;
1115 if (len
> buflen
- res
)
1117 res
+= access_process_vm(task
, env_start
,
1120 res
= strnlen(buffer
, res
);
1129 int __weak
memcmp_pages(struct page
*page1
, struct page
*page2
)
1131 char *addr1
, *addr2
;
1134 addr1
= kmap_local_page(page1
);
1135 addr2
= kmap_local_page(page2
);
1136 ret
= memcmp(addr1
, addr2
, PAGE_SIZE
);
1137 kunmap_local(addr2
);
1138 kunmap_local(addr1
);
1142 #ifdef CONFIG_PRINTK
1144 * mem_dump_obj - Print available provenance information
1145 * @object: object for which to find provenance information.
1147 * This function uses pr_cont(), so that the caller is expected to have
1148 * printed out whatever preamble is appropriate. The provenance information
1149 * depends on the type of object and on how much debugging is enabled.
1150 * For example, for a slab-cache object, the slab name is printed, and,
1151 * if available, the return address and stack trace from the allocation
1152 * and last free path of that object.
1154 void mem_dump_obj(void *object
)
1158 if (kmem_dump_obj(object
))
1161 if (vmalloc_dump_obj(object
))
1164 if (is_vmalloc_addr(object
))
1165 type
= "vmalloc memory";
1166 else if (virt_addr_valid(object
))
1167 type
= "non-slab/vmalloc memory";
1168 else if (object
== NULL
)
1169 type
= "NULL pointer";
1170 else if (object
== ZERO_SIZE_PTR
)
1171 type
= "zero-size pointer";
1173 type
= "non-paged memory";
1175 pr_cont(" %s\n", type
);
1177 EXPORT_SYMBOL_GPL(mem_dump_obj
);
1181 * A driver might set a page logically offline -- PageOffline() -- and
1182 * turn the page inaccessible in the hypervisor; after that, access to page
1183 * content can be fatal.
1185 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1186 * pages after checking PageOffline(); however, these PFN walkers can race
1187 * with drivers that set PageOffline().
1189 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1190 * synchronize with such drivers, achieving that a page cannot be set
1191 * PageOffline() while frozen.
1193 * page_offline_begin()/page_offline_end() is used by drivers that care about
1194 * such races when setting a page PageOffline().
1196 static DECLARE_RWSEM(page_offline_rwsem
);
1198 void page_offline_freeze(void)
1200 down_read(&page_offline_rwsem
);
1203 void page_offline_thaw(void)
1205 up_read(&page_offline_rwsem
);
1208 void page_offline_begin(void)
1210 down_write(&page_offline_rwsem
);
1212 EXPORT_SYMBOL(page_offline_begin
);
1214 void page_offline_end(void)
1216 up_write(&page_offline_rwsem
);
1218 EXPORT_SYMBOL(page_offline_end
);
1220 #ifndef flush_dcache_folio
1221 void flush_dcache_folio(struct folio
*folio
)
1223 long i
, nr
= folio_nr_pages(folio
);
1225 for (i
= 0; i
< nr
; i
++)
1226 flush_dcache_page(folio_page(folio
, i
));
1228 EXPORT_SYMBOL(flush_dcache_folio
);