2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/export.h>
6 #include <linux/sched.h>
7 #include <linux/security.h>
8 #include <linux/swap.h>
9 #include <linux/swapops.h>
10 #include <linux/mman.h>
11 #include <linux/hugetlb.h>
13 #include <asm/uaccess.h>
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/kmem.h>
21 * kstrdup - allocate space for and copy an existing string
22 * @s: the string to duplicate
23 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
25 char *kstrdup(const char *s
, gfp_t gfp
)
34 buf
= kmalloc_track_caller(len
, gfp
);
39 EXPORT_SYMBOL(kstrdup
);
42 * kstrndup - allocate space for and copy an existing string
43 * @s: the string to duplicate
44 * @max: read at most @max chars from @s
45 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
47 char *kstrndup(const char *s
, size_t max
, gfp_t gfp
)
55 len
= strnlen(s
, max
);
56 buf
= kmalloc_track_caller(len
+1, gfp
);
63 EXPORT_SYMBOL(kstrndup
);
66 * kmemdup - duplicate region of memory
68 * @src: memory region to duplicate
69 * @len: memory region length
70 * @gfp: GFP mask to use
72 void *kmemdup(const void *src
, size_t len
, gfp_t gfp
)
76 p
= kmalloc_track_caller(len
, gfp
);
81 EXPORT_SYMBOL(kmemdup
);
84 * memdup_user - duplicate memory region from user space
86 * @src: source address in user space
87 * @len: number of bytes to copy
89 * Returns an ERR_PTR() on failure.
91 void *memdup_user(const void __user
*src
, size_t len
)
96 * Always use GFP_KERNEL, since copy_from_user() can sleep and
97 * cause pagefault, which makes it pointless to use GFP_NOFS
100 p
= kmalloc_track_caller(len
, GFP_KERNEL
);
102 return ERR_PTR(-ENOMEM
);
104 if (copy_from_user(p
, src
, len
)) {
106 return ERR_PTR(-EFAULT
);
111 EXPORT_SYMBOL(memdup_user
);
113 static __always_inline
void *__do_krealloc(const void *p
, size_t new_size
,
125 ret
= kmalloc_track_caller(new_size
, flags
);
133 * __krealloc - like krealloc() but don't free @p.
134 * @p: object to reallocate memory for.
135 * @new_size: how many bytes of memory are required.
136 * @flags: the type of memory to allocate.
138 * This function is like krealloc() except it never frees the originally
139 * allocated buffer. Use this if you don't want to free the buffer immediately
140 * like, for example, with RCU.
142 void *__krealloc(const void *p
, size_t new_size
, gfp_t flags
)
144 if (unlikely(!new_size
))
145 return ZERO_SIZE_PTR
;
147 return __do_krealloc(p
, new_size
, flags
);
150 EXPORT_SYMBOL(__krealloc
);
153 * krealloc - reallocate memory. The contents will remain unchanged.
154 * @p: object to reallocate memory for.
155 * @new_size: how many bytes of memory are required.
156 * @flags: the type of memory to allocate.
158 * The contents of the object pointed to are preserved up to the
159 * lesser of the new and old sizes. If @p is %NULL, krealloc()
160 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
161 * %NULL pointer, the object pointed to is freed.
163 void *krealloc(const void *p
, size_t new_size
, gfp_t flags
)
167 if (unlikely(!new_size
)) {
169 return ZERO_SIZE_PTR
;
172 ret
= __do_krealloc(p
, new_size
, flags
);
178 EXPORT_SYMBOL(krealloc
);
181 * kzfree - like kfree but zero memory
182 * @p: object to free memory of
184 * The memory of the object @p points to is zeroed before freed.
185 * If @p is %NULL, kzfree() does nothing.
187 * Note: this function zeroes the whole allocated buffer which can be a good
188 * deal bigger than the requested buffer size passed to kmalloc(). So be
189 * careful when using this function in performance sensitive code.
191 void kzfree(const void *p
)
194 void *mem
= (void *)p
;
196 if (unlikely(ZERO_OR_NULL_PTR(mem
)))
202 EXPORT_SYMBOL(kzfree
);
205 * strndup_user - duplicate an existing string from user space
206 * @s: The string to duplicate
207 * @n: Maximum number of bytes to copy, including the trailing NUL.
209 char *strndup_user(const char __user
*s
, long n
)
214 length
= strnlen_user(s
, n
);
217 return ERR_PTR(-EFAULT
);
220 return ERR_PTR(-EINVAL
);
222 p
= memdup_user(s
, length
);
227 p
[length
- 1] = '\0';
231 EXPORT_SYMBOL(strndup_user
);
233 void __vma_link_list(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
234 struct vm_area_struct
*prev
, struct rb_node
*rb_parent
)
236 struct vm_area_struct
*next
;
240 next
= prev
->vm_next
;
245 next
= rb_entry(rb_parent
,
246 struct vm_area_struct
, vm_rb
);
255 /* Check if the vma is being used as a stack by this task */
256 static int vm_is_stack_for_task(struct task_struct
*t
,
257 struct vm_area_struct
*vma
)
259 return (vma
->vm_start
<= KSTK_ESP(t
) && vma
->vm_end
>= KSTK_ESP(t
));
263 * Check if the vma is being used as a stack.
264 * If is_group is non-zero, check in the entire thread group or else
265 * just check in the current task. Returns the pid of the task that
266 * the vma is stack for.
268 pid_t
vm_is_stack(struct task_struct
*task
,
269 struct vm_area_struct
*vma
, int in_group
)
273 if (vm_is_stack_for_task(task
, vma
))
277 struct task_struct
*t
;
279 if (!pid_alive(task
))
284 if (vm_is_stack_for_task(t
, vma
)) {
288 } while_each_thread(task
, t
);
296 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
297 void arch_pick_mmap_layout(struct mm_struct
*mm
)
299 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
300 mm
->get_unmapped_area
= arch_get_unmapped_area
;
305 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
306 * back to the regular GUP.
307 * If the architecture not support this function, simply return with no
310 int __attribute__((weak
)) __get_user_pages_fast(unsigned long start
,
311 int nr_pages
, int write
, struct page
**pages
)
315 EXPORT_SYMBOL_GPL(__get_user_pages_fast
);
318 * get_user_pages_fast() - pin user pages in memory
319 * @start: starting user address
320 * @nr_pages: number of pages from start to pin
321 * @write: whether pages will be written to
322 * @pages: array that receives pointers to the pages pinned.
323 * Should be at least nr_pages long.
325 * Returns number of pages pinned. This may be fewer than the number
326 * requested. If nr_pages is 0 or negative, returns 0. If no pages
327 * were pinned, returns -errno.
329 * get_user_pages_fast provides equivalent functionality to get_user_pages,
330 * operating on current and current->mm, with force=0 and vma=NULL. However
331 * unlike get_user_pages, it must be called without mmap_sem held.
333 * get_user_pages_fast may take mmap_sem and page table locks, so no
334 * assumptions can be made about lack of locking. get_user_pages_fast is to be
335 * implemented in a way that is advantageous (vs get_user_pages()) when the
336 * user memory area is already faulted in and present in ptes. However if the
337 * pages have to be faulted in, it may turn out to be slightly slower so
338 * callers need to carefully consider what to use. On many architectures,
339 * get_user_pages_fast simply falls back to get_user_pages.
341 int __attribute__((weak
)) get_user_pages_fast(unsigned long start
,
342 int nr_pages
, int write
, struct page
**pages
)
344 struct mm_struct
*mm
= current
->mm
;
347 down_read(&mm
->mmap_sem
);
348 ret
= get_user_pages(current
, mm
, start
, nr_pages
,
349 write
, 0, pages
, NULL
);
350 up_read(&mm
->mmap_sem
);
354 EXPORT_SYMBOL_GPL(get_user_pages_fast
);
356 unsigned long vm_mmap_pgoff(struct file
*file
, unsigned long addr
,
357 unsigned long len
, unsigned long prot
,
358 unsigned long flag
, unsigned long pgoff
)
361 struct mm_struct
*mm
= current
->mm
;
362 unsigned long populate
;
364 ret
= security_mmap_file(file
, prot
, flag
);
366 down_write(&mm
->mmap_sem
);
367 ret
= do_mmap_pgoff(file
, addr
, len
, prot
, flag
, pgoff
,
369 up_write(&mm
->mmap_sem
);
371 mm_populate(ret
, populate
);
376 unsigned long vm_mmap(struct file
*file
, unsigned long addr
,
377 unsigned long len
, unsigned long prot
,
378 unsigned long flag
, unsigned long offset
)
380 if (unlikely(offset
+ PAGE_ALIGN(len
) < offset
))
382 if (unlikely(offset
& ~PAGE_MASK
))
385 return vm_mmap_pgoff(file
, addr
, len
, prot
, flag
, offset
>> PAGE_SHIFT
);
387 EXPORT_SYMBOL(vm_mmap
);
389 struct address_space
*page_mapping(struct page
*page
)
391 struct address_space
*mapping
= page
->mapping
;
393 /* This happens if someone calls flush_dcache_page on slab page */
394 if (unlikely(PageSlab(page
)))
397 if (unlikely(PageSwapCache(page
))) {
400 entry
.val
= page_private(page
);
401 mapping
= swap_address_space(entry
);
402 } else if ((unsigned long)mapping
& PAGE_MAPPING_ANON
)
407 int overcommit_ratio_handler(struct ctl_table
*table
, int write
,
408 void __user
*buffer
, size_t *lenp
,
413 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
414 if (ret
== 0 && write
)
415 sysctl_overcommit_kbytes
= 0;
419 int overcommit_kbytes_handler(struct ctl_table
*table
, int write
,
420 void __user
*buffer
, size_t *lenp
,
425 ret
= proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
426 if (ret
== 0 && write
)
427 sysctl_overcommit_ratio
= 0;
432 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
434 unsigned long vm_commit_limit(void)
436 unsigned long allowed
;
438 if (sysctl_overcommit_kbytes
)
439 allowed
= sysctl_overcommit_kbytes
>> (PAGE_SHIFT
- 10);
441 allowed
= ((totalram_pages
- hugetlb_total_pages())
442 * sysctl_overcommit_ratio
/ 100);
443 allowed
+= total_swap_pages
;
449 /* Tracepoints definitions. */
450 EXPORT_TRACEPOINT_SYMBOL(kmalloc
);
451 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc
);
452 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node
);
453 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node
);
454 EXPORT_TRACEPOINT_SYMBOL(kfree
);
455 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free
);