Merge branch 'v6v7' into devel
[linux/fpc-iii.git] / mm / util.c
blobf126975ef23e7bea33e5fa5e15d191bb8dfd1b2e
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/sched.h>
7 #include <asm/uaccess.h>
9 #define CREATE_TRACE_POINTS
10 #include <trace/events/kmem.h>
12 /**
13 * kstrdup - allocate space for and copy an existing string
14 * @s: the string to duplicate
15 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
17 char *kstrdup(const char *s, gfp_t gfp)
19 size_t len;
20 char *buf;
22 if (!s)
23 return NULL;
25 len = strlen(s) + 1;
26 buf = kmalloc_track_caller(len, gfp);
27 if (buf)
28 memcpy(buf, s, len);
29 return buf;
31 EXPORT_SYMBOL(kstrdup);
33 /**
34 * kstrndup - allocate space for and copy an existing string
35 * @s: the string to duplicate
36 * @max: read at most @max chars from @s
37 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
39 char *kstrndup(const char *s, size_t max, gfp_t gfp)
41 size_t len;
42 char *buf;
44 if (!s)
45 return NULL;
47 len = strnlen(s, max);
48 buf = kmalloc_track_caller(len+1, gfp);
49 if (buf) {
50 memcpy(buf, s, len);
51 buf[len] = '\0';
53 return buf;
55 EXPORT_SYMBOL(kstrndup);
57 /**
58 * kmemdup - duplicate region of memory
60 * @src: memory region to duplicate
61 * @len: memory region length
62 * @gfp: GFP mask to use
64 void *kmemdup(const void *src, size_t len, gfp_t gfp)
66 void *p;
68 p = kmalloc_track_caller(len, gfp);
69 if (p)
70 memcpy(p, src, len);
71 return p;
73 EXPORT_SYMBOL(kmemdup);
75 /**
76 * memdup_user - duplicate memory region from user space
78 * @src: source address in user space
79 * @len: number of bytes to copy
81 * Returns an ERR_PTR() on failure.
83 void *memdup_user(const void __user *src, size_t len)
85 void *p;
88 * Always use GFP_KERNEL, since copy_from_user() can sleep and
89 * cause pagefault, which makes it pointless to use GFP_NOFS
90 * or GFP_ATOMIC.
92 p = kmalloc_track_caller(len, GFP_KERNEL);
93 if (!p)
94 return ERR_PTR(-ENOMEM);
96 if (copy_from_user(p, src, len)) {
97 kfree(p);
98 return ERR_PTR(-EFAULT);
101 return p;
103 EXPORT_SYMBOL(memdup_user);
106 * __krealloc - like krealloc() but don't free @p.
107 * @p: object to reallocate memory for.
108 * @new_size: how many bytes of memory are required.
109 * @flags: the type of memory to allocate.
111 * This function is like krealloc() except it never frees the originally
112 * allocated buffer. Use this if you don't want to free the buffer immediately
113 * like, for example, with RCU.
115 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
117 void *ret;
118 size_t ks = 0;
120 if (unlikely(!new_size))
121 return ZERO_SIZE_PTR;
123 if (p)
124 ks = ksize(p);
126 if (ks >= new_size)
127 return (void *)p;
129 ret = kmalloc_track_caller(new_size, flags);
130 if (ret && p)
131 memcpy(ret, p, ks);
133 return ret;
135 EXPORT_SYMBOL(__krealloc);
138 * krealloc - reallocate memory. The contents will remain unchanged.
139 * @p: object to reallocate memory for.
140 * @new_size: how many bytes of memory are required.
141 * @flags: the type of memory to allocate.
143 * The contents of the object pointed to are preserved up to the
144 * lesser of the new and old sizes. If @p is %NULL, krealloc()
145 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
146 * %NULL pointer, the object pointed to is freed.
148 void *krealloc(const void *p, size_t new_size, gfp_t flags)
150 void *ret;
152 if (unlikely(!new_size)) {
153 kfree(p);
154 return ZERO_SIZE_PTR;
157 ret = __krealloc(p, new_size, flags);
158 if (ret && p != ret)
159 kfree(p);
161 return ret;
163 EXPORT_SYMBOL(krealloc);
166 * kzfree - like kfree but zero memory
167 * @p: object to free memory of
169 * The memory of the object @p points to is zeroed before freed.
170 * If @p is %NULL, kzfree() does nothing.
172 * Note: this function zeroes the whole allocated buffer which can be a good
173 * deal bigger than the requested buffer size passed to kmalloc(). So be
174 * careful when using this function in performance sensitive code.
176 void kzfree(const void *p)
178 size_t ks;
179 void *mem = (void *)p;
181 if (unlikely(ZERO_OR_NULL_PTR(mem)))
182 return;
183 ks = ksize(mem);
184 memset(mem, 0, ks);
185 kfree(mem);
187 EXPORT_SYMBOL(kzfree);
190 * strndup_user - duplicate an existing string from user space
191 * @s: The string to duplicate
192 * @n: Maximum number of bytes to copy, including the trailing NUL.
194 char *strndup_user(const char __user *s, long n)
196 char *p;
197 long length;
199 length = strnlen_user(s, n);
201 if (!length)
202 return ERR_PTR(-EFAULT);
204 if (length > n)
205 return ERR_PTR(-EINVAL);
207 p = memdup_user(s, length);
209 if (IS_ERR(p))
210 return p;
212 p[length - 1] = '\0';
214 return p;
216 EXPORT_SYMBOL(strndup_user);
218 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
219 void arch_pick_mmap_layout(struct mm_struct *mm)
221 mm->mmap_base = TASK_UNMAPPED_BASE;
222 mm->get_unmapped_area = arch_get_unmapped_area;
223 mm->unmap_area = arch_unmap_area;
225 #endif
228 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
229 * back to the regular GUP.
230 * If the architecture not support this fucntion, simply return with no
231 * page pinned
233 int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
234 int nr_pages, int write, struct page **pages)
236 return 0;
238 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
241 * get_user_pages_fast() - pin user pages in memory
242 * @start: starting user address
243 * @nr_pages: number of pages from start to pin
244 * @write: whether pages will be written to
245 * @pages: array that receives pointers to the pages pinned.
246 * Should be at least nr_pages long.
248 * Returns number of pages pinned. This may be fewer than the number
249 * requested. If nr_pages is 0 or negative, returns 0. If no pages
250 * were pinned, returns -errno.
252 * get_user_pages_fast provides equivalent functionality to get_user_pages,
253 * operating on current and current->mm, with force=0 and vma=NULL. However
254 * unlike get_user_pages, it must be called without mmap_sem held.
256 * get_user_pages_fast may take mmap_sem and page table locks, so no
257 * assumptions can be made about lack of locking. get_user_pages_fast is to be
258 * implemented in a way that is advantageous (vs get_user_pages()) when the
259 * user memory area is already faulted in and present in ptes. However if the
260 * pages have to be faulted in, it may turn out to be slightly slower so
261 * callers need to carefully consider what to use. On many architectures,
262 * get_user_pages_fast simply falls back to get_user_pages.
264 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
265 int nr_pages, int write, struct page **pages)
267 struct mm_struct *mm = current->mm;
268 int ret;
270 down_read(&mm->mmap_sem);
271 ret = get_user_pages(current, mm, start, nr_pages,
272 write, 0, pages, NULL);
273 up_read(&mm->mmap_sem);
275 return ret;
277 EXPORT_SYMBOL_GPL(get_user_pages_fast);
279 /* Tracepoints definitions. */
280 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
281 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
282 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
283 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
284 EXPORT_TRACEPOINT_SYMBOL(kfree);
285 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);