2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
7 #include <linux/sched.h>
8 #include <linux/security.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/mman.h>
12 #include <linux/hugetlb.h>
13 #include <linux/vmalloc.h>
15 #include <asm/sections.h>
16 #include <asm/uaccess.h>
20 static inline int is_kernel_rodata(unsigned long addr
)
22 return addr
>= (unsigned long)__start_rodata
&&
23 addr
< (unsigned long)__end_rodata
;
27 * kfree_const - conditionally free memory
28 * @x: pointer to the memory
30 * Function calls kfree only if @x is not in .rodata section.
32 void kfree_const(const void *x
)
34 if (!is_kernel_rodata((unsigned long)x
))
37 EXPORT_SYMBOL(kfree_const
);
40 * kstrdup - allocate space for and copy an existing string
41 * @s: the string to duplicate
42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
44 char *kstrdup(const char *s
, gfp_t gfp
)
53 buf
= kmalloc_track_caller(len
, gfp
);
58 EXPORT_SYMBOL(kstrdup
);
61 * kstrdup_const - conditionally duplicate an existing const string
62 * @s: the string to duplicate
63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
65 * Function returns source string if it is in .rodata section otherwise it
66 * fallbacks to kstrdup.
67 * Strings allocated by kstrdup_const should be freed by kfree_const.
69 const char *kstrdup_const(const char *s
, gfp_t gfp
)
71 if (is_kernel_rodata((unsigned long)s
))
74 return kstrdup(s
, gfp
);
76 EXPORT_SYMBOL(kstrdup_const
);
79 * kstrndup - allocate space for and copy an existing string
80 * @s: the string to duplicate
81 * @max: read at most @max chars from @s
82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
84 * Note: Use kmemdup_nul() instead if the size is known exactly.
86 char *kstrndup(const char *s
, size_t max
, gfp_t gfp
)
94 len
= strnlen(s
, max
);
95 buf
= kmalloc_track_caller(len
+1, gfp
);
102 EXPORT_SYMBOL(kstrndup
);
105 * kmemdup - duplicate region of memory
107 * @src: memory region to duplicate
108 * @len: memory region length
109 * @gfp: GFP mask to use
111 void *kmemdup(const void *src
, size_t len
, gfp_t gfp
)
115 p
= kmalloc_track_caller(len
, gfp
);
120 EXPORT_SYMBOL(kmemdup
);
123 * kmemdup_nul - Create a NUL-terminated string from unterminated data
124 * @s: The data to stringify
125 * @len: The size of the data
126 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
128 char *kmemdup_nul(const char *s
, size_t len
, gfp_t gfp
)
135 buf
= kmalloc_track_caller(len
+ 1, gfp
);
142 EXPORT_SYMBOL(kmemdup_nul
);
145 * memdup_user - duplicate memory region from user space
147 * @src: source address in user space
148 * @len: number of bytes to copy
150 * Returns an ERR_PTR() on failure.
152 void *memdup_user(const void __user
*src
, size_t len
)
157 * Always use GFP_KERNEL, since copy_from_user() can sleep and
158 * cause pagefault, which makes it pointless to use GFP_NOFS
161 p
= kmalloc_track_caller(len
, GFP_KERNEL
);
163 return ERR_PTR(-ENOMEM
);
165 if (copy_from_user(p
, src
, len
)) {
167 return ERR_PTR(-EFAULT
);
172 EXPORT_SYMBOL(memdup_user
);
175 * strndup_user - duplicate an existing string from user space
176 * @s: The string to duplicate
177 * @n: Maximum number of bytes to copy, including the trailing NUL.
179 char *strndup_user(const char __user
*s
, long n
)
184 length
= strnlen_user(s
, n
);
187 return ERR_PTR(-EFAULT
);
190 return ERR_PTR(-EINVAL
);
192 p
= memdup_user(s
, length
);
197 p
[length
- 1] = '\0';
201 EXPORT_SYMBOL(strndup_user
);
204 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
206 * @src: source address in user space
207 * @len: number of bytes to copy
209 * Returns an ERR_PTR() on failure.
211 void *memdup_user_nul(const void __user
*src
, size_t len
)
216 * Always use GFP_KERNEL, since copy_from_user() can sleep and
217 * cause pagefault, which makes it pointless to use GFP_NOFS
220 p
= kmalloc_track_caller(len
+ 1, GFP_KERNEL
);
222 return ERR_PTR(-ENOMEM
);
224 if (copy_from_user(p
, src
, len
)) {
226 return ERR_PTR(-EFAULT
);
232 EXPORT_SYMBOL(memdup_user_nul
);
234 void __vma_link_list(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
235 struct vm_area_struct
*prev
, struct rb_node
*rb_parent
)
237 struct vm_area_struct
*next
;
241 next
= prev
->vm_next
;
246 next
= rb_entry(rb_parent
,
247 struct vm_area_struct
, vm_rb
);
256 /* Check if the vma is being used as a stack by this task */
257 int vma_is_stack_for_current(struct vm_area_struct
*vma
)
259 struct task_struct
* __maybe_unused t
= current
;
261 return (vma
->vm_start
<= KSTK_ESP(t
) && vma
->vm_end
>= KSTK_ESP(t
));
264 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
265 void arch_pick_mmap_layout(struct mm_struct
*mm
)
267 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
268 mm
->get_unmapped_area
= arch_get_unmapped_area
;
273 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
274 * back to the regular GUP.
275 * If the architecture not support this function, simply return with no
278 int __weak
__get_user_pages_fast(unsigned long start
,
279 int nr_pages
, int write
, struct page
**pages
)
283 EXPORT_SYMBOL_GPL(__get_user_pages_fast
);
286 * get_user_pages_fast() - pin user pages in memory
287 * @start: starting user address
288 * @nr_pages: number of pages from start to pin
289 * @write: whether pages will be written to
290 * @pages: array that receives pointers to the pages pinned.
291 * Should be at least nr_pages long.
293 * Returns number of pages pinned. This may be fewer than the number
294 * requested. If nr_pages is 0 or negative, returns 0. If no pages
295 * were pinned, returns -errno.
297 * get_user_pages_fast provides equivalent functionality to get_user_pages,
298 * operating on current and current->mm, with force=0 and vma=NULL. However
299 * unlike get_user_pages, it must be called without mmap_sem held.
301 * get_user_pages_fast may take mmap_sem and page table locks, so no
302 * assumptions can be made about lack of locking. get_user_pages_fast is to be
303 * implemented in a way that is advantageous (vs get_user_pages()) when the
304 * user memory area is already faulted in and present in ptes. However if the
305 * pages have to be faulted in, it may turn out to be slightly slower so
306 * callers need to carefully consider what to use. On many architectures,
307 * get_user_pages_fast simply falls back to get_user_pages.
309 int __weak
get_user_pages_fast(unsigned long start
,
310 int nr_pages
, int write
, struct page
**pages
)
312 return get_user_pages_unlocked(start
, nr_pages
, pages
,
313 write
? FOLL_WRITE
: 0);
315 EXPORT_SYMBOL_GPL(get_user_pages_fast
);
317 unsigned long vm_mmap_pgoff(struct file
*file
, unsigned long addr
,
318 unsigned long len
, unsigned long prot
,
319 unsigned long flag
, unsigned long pgoff
)
322 struct mm_struct
*mm
= current
->mm
;
323 unsigned long populate
;
325 ret
= security_mmap_file(file
, prot
, flag
);
327 if (down_write_killable(&mm
->mmap_sem
))
329 ret
= do_mmap_pgoff(file
, addr
, len
, prot
, flag
, pgoff
,
331 up_write(&mm
->mmap_sem
);
333 mm_populate(ret
, populate
);
338 unsigned long vm_mmap(struct file
*file
, unsigned long addr
,
339 unsigned long len
, unsigned long prot
,
340 unsigned long flag
, unsigned long offset
)
342 if (unlikely(offset
+ PAGE_ALIGN(len
) < offset
))
344 if (unlikely(offset_in_page(offset
)))
347 return vm_mmap_pgoff(file
, addr
, len
, prot
, flag
, offset
>> PAGE_SHIFT
);
349 EXPORT_SYMBOL(vm_mmap
);
351 void kvfree(const void *addr
)
353 if (is_vmalloc_addr(addr
))
358 EXPORT_SYMBOL(kvfree
);
360 static inline void *__page_rmapping(struct page
*page
)
362 unsigned long mapping
;
364 mapping
= (unsigned long)page
->mapping
;
365 mapping
&= ~PAGE_MAPPING_FLAGS
;
367 return (void *)mapping
;
370 /* Neutral page->mapping pointer to address_space or anon_vma or other */
371 void *page_rmapping(struct page
*page
)
373 page
= compound_head(page
);
374 return __page_rmapping(page
);
378 * Return true if this page is mapped into pagetables.
379 * For compound page it returns true if any subpage of compound page is mapped.
381 bool page_mapped(struct page
*page
)
385 if (likely(!PageCompound(page
)))
386 return atomic_read(&page
->_mapcount
) >= 0;
387 page
= compound_head(page
);
388 if (atomic_read(compound_mapcount_ptr(page
)) >= 0)
392 for (i
= 0; i
< (1 << compound_order(page
)); i
++) {
393 if (atomic_read(&page
[i
]._mapcount
) >= 0)
398 EXPORT_SYMBOL(page_mapped
);
400 struct anon_vma
*page_anon_vma(struct page
*page
)
402 unsigned long mapping
;
404 page
= compound_head(page
);
405 mapping
= (unsigned long)page
->mapping
;
406 if ((mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
408 return __page_rmapping(page
);
411 struct address_space
*page_mapping(struct page
*page
)
413 struct address_space
*mapping
;
415 page
= compound_head(page
);
417 /* This happens if someone calls flush_dcache_page on slab page */
418 if (unlikely(PageSlab(page
)))
421 if (unlikely(PageSwapCache(page
))) {
424 entry
.val
= page_private(page
);
425 return swap_address_space(entry
);
428 mapping
= page
->mapping
;
429 if ((unsigned long)mapping
& PAGE_MAPPING_ANON
)
432 return (void *)((unsigned long)mapping
& ~PAGE_MAPPING_FLAGS
);
434 EXPORT_SYMBOL(page_mapping
);
436 /* Slow path of page_mapcount() for compound pages */
437 int __page_mapcount(struct page
*page
)
441 ret
= atomic_read(&page
->_mapcount
) + 1;
443 * For file THP page->_mapcount contains total number of mapping
444 * of the page: no need to look into compound_mapcount.
446 if (!PageAnon(page
) && !PageHuge(page
))
448 page
= compound_head(page
);
449 ret
+= atomic_read(compound_mapcount_ptr(page
)) + 1;
450 if (PageDoubleMap(page
))
454 EXPORT_SYMBOL_GPL(__page_mapcount
);
456 int sysctl_overcommit_memory __read_mostly
= OVERCOMMIT_GUESS
;
457 int sysctl_overcommit_ratio __read_mostly
= 50;
458 unsigned long sysctl_overcommit_kbytes __read_mostly
;
459 int sysctl_max_map_count __read_mostly
= DEFAULT_MAX_MAP_COUNT
;
460 unsigned long sysctl_user_reserve_kbytes __read_mostly
= 1UL << 17; /* 128MB */
461 unsigned long sysctl_admin_reserve_kbytes __read_mostly
= 1UL << 13; /* 8MB */
463 int overcommit_ratio_handler(struct ctl_table
*table
, int write
,
464 void __user
*buffer
, size_t *lenp
,
469 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
470 if (ret
== 0 && write
)
471 sysctl_overcommit_kbytes
= 0;
475 int overcommit_kbytes_handler(struct ctl_table
*table
, int write
,
476 void __user
*buffer
, size_t *lenp
,
481 ret
= proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
482 if (ret
== 0 && write
)
483 sysctl_overcommit_ratio
= 0;
488 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
490 unsigned long vm_commit_limit(void)
492 unsigned long allowed
;
494 if (sysctl_overcommit_kbytes
)
495 allowed
= sysctl_overcommit_kbytes
>> (PAGE_SHIFT
- 10);
497 allowed
= ((totalram_pages
- hugetlb_total_pages())
498 * sysctl_overcommit_ratio
/ 100);
499 allowed
+= total_swap_pages
;
505 * Make sure vm_committed_as in one cacheline and not cacheline shared with
506 * other variables. It can be updated by several CPUs frequently.
508 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp
;
511 * The global memory commitment made in the system can be a metric
512 * that can be used to drive ballooning decisions when Linux is hosted
513 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
514 * balancing memory across competing virtual machines that are hosted.
515 * Several metrics drive this policy engine including the guest reported
518 unsigned long vm_memory_committed(void)
520 return percpu_counter_read_positive(&vm_committed_as
);
522 EXPORT_SYMBOL_GPL(vm_memory_committed
);
525 * Check that a process has enough memory to allocate a new virtual
526 * mapping. 0 means there is enough memory for the allocation to
527 * succeed and -ENOMEM implies there is not.
529 * We currently support three overcommit policies, which are set via the
530 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
532 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
533 * Additional code 2002 Jul 20 by Robert Love.
535 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
537 * Note this is a helper function intended to be used by LSMs which
538 * wish to use this logic.
540 int __vm_enough_memory(struct mm_struct
*mm
, long pages
, int cap_sys_admin
)
542 long free
, allowed
, reserve
;
544 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as
) <
545 -(s64
)vm_committed_as_batch
* num_online_cpus(),
546 "memory commitment underflow");
548 vm_acct_memory(pages
);
551 * Sometimes we want to use more memory than we have
553 if (sysctl_overcommit_memory
== OVERCOMMIT_ALWAYS
)
556 if (sysctl_overcommit_memory
== OVERCOMMIT_GUESS
) {
557 free
= global_page_state(NR_FREE_PAGES
);
558 free
+= global_node_page_state(NR_FILE_PAGES
);
561 * shmem pages shouldn't be counted as free in this
562 * case, they can't be purged, only swapped out, and
563 * that won't affect the overall amount of available
564 * memory in the system.
566 free
-= global_node_page_state(NR_SHMEM
);
568 free
+= get_nr_swap_pages();
571 * Any slabs which are created with the
572 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
573 * which are reclaimable, under pressure. The dentry
574 * cache and most inode caches should fall into this
576 free
+= global_page_state(NR_SLAB_RECLAIMABLE
);
579 * Leave reserved pages. The pages are not for anonymous pages.
581 if (free
<= totalreserve_pages
)
584 free
-= totalreserve_pages
;
587 * Reserve some for root
590 free
-= sysctl_admin_reserve_kbytes
>> (PAGE_SHIFT
- 10);
598 allowed
= vm_commit_limit();
600 * Reserve some for root
603 allowed
-= sysctl_admin_reserve_kbytes
>> (PAGE_SHIFT
- 10);
606 * Don't let a single process grow so big a user can't recover
609 reserve
= sysctl_user_reserve_kbytes
>> (PAGE_SHIFT
- 10);
610 allowed
-= min_t(long, mm
->total_vm
/ 32, reserve
);
613 if (percpu_counter_read_positive(&vm_committed_as
) < allowed
)
616 vm_unacct_memory(pages
);
622 * get_cmdline() - copy the cmdline value to a buffer.
623 * @task: the task whose cmdline value to copy.
624 * @buffer: the buffer to copy to.
625 * @buflen: the length of the buffer. Larger cmdline values are truncated
627 * Returns the size of the cmdline field copied. Note that the copy does
628 * not guarantee an ending NULL byte.
630 int get_cmdline(struct task_struct
*task
, char *buffer
, int buflen
)
634 struct mm_struct
*mm
= get_task_mm(task
);
635 unsigned long arg_start
, arg_end
, env_start
, env_end
;
639 goto out_mm
; /* Shh! No looking before we're done */
641 down_read(&mm
->mmap_sem
);
642 arg_start
= mm
->arg_start
;
643 arg_end
= mm
->arg_end
;
644 env_start
= mm
->env_start
;
645 env_end
= mm
->env_end
;
646 up_read(&mm
->mmap_sem
);
648 len
= arg_end
- arg_start
;
653 res
= access_process_vm(task
, arg_start
, buffer
, len
, FOLL_FORCE
);
656 * If the nul at the end of args has been overwritten, then
657 * assume application is using setproctitle(3).
659 if (res
> 0 && buffer
[res
-1] != '\0' && len
< buflen
) {
660 len
= strnlen(buffer
, res
);
664 len
= env_end
- env_start
;
665 if (len
> buflen
- res
)
667 res
+= access_process_vm(task
, env_start
,
670 res
= strnlen(buffer
, res
);