vfs: export check_sticky()
[linux/fpc-iii.git] / mm / util.c
blobfec39d4509a958763685fb6b70499590f9e363fb
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
6 #include <linux/err.h>
7 #include <linux/sched.h>
8 #include <linux/security.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/mman.h>
12 #include <linux/hugetlb.h>
13 #include <linux/vmalloc.h>
15 #include <asm/uaccess.h>
17 #include "internal.h"
19 /**
20 * kstrdup - allocate space for and copy an existing string
21 * @s: the string to duplicate
22 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
24 char *kstrdup(const char *s, gfp_t gfp)
26 size_t len;
27 char *buf;
29 if (!s)
30 return NULL;
32 len = strlen(s) + 1;
33 buf = kmalloc_track_caller(len, gfp);
34 if (buf)
35 memcpy(buf, s, len);
36 return buf;
38 EXPORT_SYMBOL(kstrdup);
40 /**
41 * kstrndup - allocate space for and copy an existing string
42 * @s: the string to duplicate
43 * @max: read at most @max chars from @s
44 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
46 char *kstrndup(const char *s, size_t max, gfp_t gfp)
48 size_t len;
49 char *buf;
51 if (!s)
52 return NULL;
54 len = strnlen(s, max);
55 buf = kmalloc_track_caller(len+1, gfp);
56 if (buf) {
57 memcpy(buf, s, len);
58 buf[len] = '\0';
60 return buf;
62 EXPORT_SYMBOL(kstrndup);
64 /**
65 * kmemdup - duplicate region of memory
67 * @src: memory region to duplicate
68 * @len: memory region length
69 * @gfp: GFP mask to use
71 void *kmemdup(const void *src, size_t len, gfp_t gfp)
73 void *p;
75 p = kmalloc_track_caller(len, gfp);
76 if (p)
77 memcpy(p, src, len);
78 return p;
80 EXPORT_SYMBOL(kmemdup);
82 /**
83 * memdup_user - duplicate memory region from user space
85 * @src: source address in user space
86 * @len: number of bytes to copy
88 * Returns an ERR_PTR() on failure.
90 void *memdup_user(const void __user *src, size_t len)
92 void *p;
95 * Always use GFP_KERNEL, since copy_from_user() can sleep and
96 * cause pagefault, which makes it pointless to use GFP_NOFS
97 * or GFP_ATOMIC.
99 p = kmalloc_track_caller(len, GFP_KERNEL);
100 if (!p)
101 return ERR_PTR(-ENOMEM);
103 if (copy_from_user(p, src, len)) {
104 kfree(p);
105 return ERR_PTR(-EFAULT);
108 return p;
110 EXPORT_SYMBOL(memdup_user);
113 * strndup_user - duplicate an existing string from user space
114 * @s: The string to duplicate
115 * @n: Maximum number of bytes to copy, including the trailing NUL.
117 char *strndup_user(const char __user *s, long n)
119 char *p;
120 long length;
122 length = strnlen_user(s, n);
124 if (!length)
125 return ERR_PTR(-EFAULT);
127 if (length > n)
128 return ERR_PTR(-EINVAL);
130 p = memdup_user(s, length);
132 if (IS_ERR(p))
133 return p;
135 p[length - 1] = '\0';
137 return p;
139 EXPORT_SYMBOL(strndup_user);
141 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
142 struct vm_area_struct *prev, struct rb_node *rb_parent)
144 struct vm_area_struct *next;
146 vma->vm_prev = prev;
147 if (prev) {
148 next = prev->vm_next;
149 prev->vm_next = vma;
150 } else {
151 mm->mmap = vma;
152 if (rb_parent)
153 next = rb_entry(rb_parent,
154 struct vm_area_struct, vm_rb);
155 else
156 next = NULL;
158 vma->vm_next = next;
159 if (next)
160 next->vm_prev = vma;
163 /* Check if the vma is being used as a stack by this task */
164 static int vm_is_stack_for_task(struct task_struct *t,
165 struct vm_area_struct *vma)
167 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
171 * Check if the vma is being used as a stack.
172 * If is_group is non-zero, check in the entire thread group or else
173 * just check in the current task. Returns the task_struct of the task
174 * that the vma is stack for. Must be called under rcu_read_lock().
176 struct task_struct *task_of_stack(struct task_struct *task,
177 struct vm_area_struct *vma, bool in_group)
179 if (vm_is_stack_for_task(task, vma))
180 return task;
182 if (in_group) {
183 struct task_struct *t;
185 for_each_thread(task, t) {
186 if (vm_is_stack_for_task(t, vma))
187 return t;
191 return NULL;
194 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
195 void arch_pick_mmap_layout(struct mm_struct *mm)
197 mm->mmap_base = TASK_UNMAPPED_BASE;
198 mm->get_unmapped_area = arch_get_unmapped_area;
200 #endif
203 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
204 * back to the regular GUP.
205 * If the architecture not support this function, simply return with no
206 * page pinned
208 int __weak __get_user_pages_fast(unsigned long start,
209 int nr_pages, int write, struct page **pages)
211 return 0;
213 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
216 * get_user_pages_fast() - pin user pages in memory
217 * @start: starting user address
218 * @nr_pages: number of pages from start to pin
219 * @write: whether pages will be written to
220 * @pages: array that receives pointers to the pages pinned.
221 * Should be at least nr_pages long.
223 * Returns number of pages pinned. This may be fewer than the number
224 * requested. If nr_pages is 0 or negative, returns 0. If no pages
225 * were pinned, returns -errno.
227 * get_user_pages_fast provides equivalent functionality to get_user_pages,
228 * operating on current and current->mm, with force=0 and vma=NULL. However
229 * unlike get_user_pages, it must be called without mmap_sem held.
231 * get_user_pages_fast may take mmap_sem and page table locks, so no
232 * assumptions can be made about lack of locking. get_user_pages_fast is to be
233 * implemented in a way that is advantageous (vs get_user_pages()) when the
234 * user memory area is already faulted in and present in ptes. However if the
235 * pages have to be faulted in, it may turn out to be slightly slower so
236 * callers need to carefully consider what to use. On many architectures,
237 * get_user_pages_fast simply falls back to get_user_pages.
239 int __weak get_user_pages_fast(unsigned long start,
240 int nr_pages, int write, struct page **pages)
242 struct mm_struct *mm = current->mm;
243 int ret;
245 down_read(&mm->mmap_sem);
246 ret = get_user_pages(current, mm, start, nr_pages,
247 write, 0, pages, NULL);
248 up_read(&mm->mmap_sem);
250 return ret;
252 EXPORT_SYMBOL_GPL(get_user_pages_fast);
254 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
255 unsigned long len, unsigned long prot,
256 unsigned long flag, unsigned long pgoff)
258 unsigned long ret;
259 struct mm_struct *mm = current->mm;
260 unsigned long populate;
262 ret = security_mmap_file(file, prot, flag);
263 if (!ret) {
264 down_write(&mm->mmap_sem);
265 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
266 &populate);
267 up_write(&mm->mmap_sem);
268 if (populate)
269 mm_populate(ret, populate);
271 return ret;
274 unsigned long vm_mmap(struct file *file, unsigned long addr,
275 unsigned long len, unsigned long prot,
276 unsigned long flag, unsigned long offset)
278 if (unlikely(offset + PAGE_ALIGN(len) < offset))
279 return -EINVAL;
280 if (unlikely(offset & ~PAGE_MASK))
281 return -EINVAL;
283 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
285 EXPORT_SYMBOL(vm_mmap);
287 void kvfree(const void *addr)
289 if (is_vmalloc_addr(addr))
290 vfree(addr);
291 else
292 kfree(addr);
294 EXPORT_SYMBOL(kvfree);
296 struct address_space *page_mapping(struct page *page)
298 struct address_space *mapping = page->mapping;
300 /* This happens if someone calls flush_dcache_page on slab page */
301 if (unlikely(PageSlab(page)))
302 return NULL;
304 if (unlikely(PageSwapCache(page))) {
305 swp_entry_t entry;
307 entry.val = page_private(page);
308 mapping = swap_address_space(entry);
309 } else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
310 mapping = NULL;
311 return mapping;
314 int overcommit_ratio_handler(struct ctl_table *table, int write,
315 void __user *buffer, size_t *lenp,
316 loff_t *ppos)
318 int ret;
320 ret = proc_dointvec(table, write, buffer, lenp, ppos);
321 if (ret == 0 && write)
322 sysctl_overcommit_kbytes = 0;
323 return ret;
326 int overcommit_kbytes_handler(struct ctl_table *table, int write,
327 void __user *buffer, size_t *lenp,
328 loff_t *ppos)
330 int ret;
332 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
333 if (ret == 0 && write)
334 sysctl_overcommit_ratio = 0;
335 return ret;
339 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
341 unsigned long vm_commit_limit(void)
343 unsigned long allowed;
345 if (sysctl_overcommit_kbytes)
346 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
347 else
348 allowed = ((totalram_pages - hugetlb_total_pages())
349 * sysctl_overcommit_ratio / 100);
350 allowed += total_swap_pages;
352 return allowed;
356 * get_cmdline() - copy the cmdline value to a buffer.
357 * @task: the task whose cmdline value to copy.
358 * @buffer: the buffer to copy to.
359 * @buflen: the length of the buffer. Larger cmdline values are truncated
360 * to this length.
361 * Returns the size of the cmdline field copied. Note that the copy does
362 * not guarantee an ending NULL byte.
364 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
366 int res = 0;
367 unsigned int len;
368 struct mm_struct *mm = get_task_mm(task);
369 if (!mm)
370 goto out;
371 if (!mm->arg_end)
372 goto out_mm; /* Shh! No looking before we're done */
374 len = mm->arg_end - mm->arg_start;
376 if (len > buflen)
377 len = buflen;
379 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
382 * If the nul at the end of args has been overwritten, then
383 * assume application is using setproctitle(3).
385 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
386 len = strnlen(buffer, res);
387 if (len < res) {
388 res = len;
389 } else {
390 len = mm->env_end - mm->env_start;
391 if (len > buflen - res)
392 len = buflen - res;
393 res += access_process_vm(task, mm->env_start,
394 buffer+res, len, 0);
395 res = strnlen(buffer, res);
398 out_mm:
399 mmput(mm);
400 out:
401 return res;