Avoid beyond bounds copy while caching ACL
[zen-stable.git] / lib / flex_array.c
blob9b8b89458c4ce4a8995afd9448f000c6e73335a7
1 /*
2 * Flexible array managed in PAGE_SIZE parts
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2009
20 * Author: Dave Hansen <dave@linux.vnet.ibm.com>
23 #include <linux/flex_array.h>
24 #include <linux/slab.h>
25 #include <linux/stddef.h>
26 #include <linux/module.h>
27 #include <linux/reciprocal_div.h>
29 struct flex_array_part {
30 char elements[FLEX_ARRAY_PART_SIZE];
34 * If a user requests an allocation which is small
35 * enough, we may simply use the space in the
36 * flex_array->parts[] array to store the user
37 * data.
39 static inline int elements_fit_in_base(struct flex_array *fa)
41 int data_size = fa->element_size * fa->total_nr_elements;
42 if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT)
43 return 1;
44 return 0;
47 /**
48 * flex_array_alloc - allocate a new flexible array
49 * @element_size: the size of individual elements in the array
50 * @total: total number of elements that this should hold
51 * @flags: page allocation flags to use for base array
53 * Note: all locking must be provided by the caller.
55 * @total is used to size internal structures. If the user ever
56 * accesses any array indexes >=@total, it will produce errors.
58 * The maximum number of elements is defined as: the number of
59 * elements that can be stored in a page times the number of
60 * page pointers that we can fit in the base structure or (using
61 * integer math):
63 * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
65 * Here's a table showing example capacities. Note that the maximum
66 * index that the get/put() functions is just nr_objects-1. This
67 * basically means that you get 4MB of storage on 32-bit and 2MB on
68 * 64-bit.
71 * Element size | Objects | Objects |
72 * PAGE_SIZE=4k | 32-bit | 64-bit |
73 * ---------------------------------|
74 * 1 bytes | 4177920 | 2088960 |
75 * 2 bytes | 2088960 | 1044480 |
76 * 3 bytes | 1392300 | 696150 |
77 * 4 bytes | 1044480 | 522240 |
78 * 32 bytes | 130560 | 65408 |
79 * 33 bytes | 126480 | 63240 |
80 * 2048 bytes | 2040 | 1020 |
81 * 2049 bytes | 1020 | 510 |
82 * void * | 1044480 | 261120 |
84 * Since 64-bit pointers are twice the size, we lose half the
85 * capacity in the base structure. Also note that no effort is made
86 * to efficiently pack objects across page boundaries.
88 struct flex_array *flex_array_alloc(int element_size, unsigned int total,
89 gfp_t flags)
91 struct flex_array *ret;
92 int elems_per_part = 0;
93 int reciprocal_elems = 0;
94 int max_size = 0;
96 if (element_size) {
97 elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
98 reciprocal_elems = reciprocal_value(elems_per_part);
99 max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
102 /* max_size will end up 0 if element_size > PAGE_SIZE */
103 if (total > max_size)
104 return NULL;
105 ret = kzalloc(sizeof(struct flex_array), flags);
106 if (!ret)
107 return NULL;
108 ret->element_size = element_size;
109 ret->total_nr_elements = total;
110 ret->elems_per_part = elems_per_part;
111 ret->reciprocal_elems = reciprocal_elems;
112 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
113 memset(&ret->parts[0], FLEX_ARRAY_FREE,
114 FLEX_ARRAY_BASE_BYTES_LEFT);
115 return ret;
117 EXPORT_SYMBOL(flex_array_alloc);
119 static int fa_element_to_part_nr(struct flex_array *fa,
120 unsigned int element_nr)
122 return reciprocal_divide(element_nr, fa->reciprocal_elems);
126 * flex_array_free_parts - just free the second-level pages
127 * @fa: the flex array from which to free parts
129 * This is to be used in cases where the base 'struct flex_array'
130 * has been statically allocated and should not be free.
132 void flex_array_free_parts(struct flex_array *fa)
134 int part_nr;
136 if (elements_fit_in_base(fa))
137 return;
138 for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
139 kfree(fa->parts[part_nr]);
141 EXPORT_SYMBOL(flex_array_free_parts);
143 void flex_array_free(struct flex_array *fa)
145 flex_array_free_parts(fa);
146 kfree(fa);
148 EXPORT_SYMBOL(flex_array_free);
150 static unsigned int index_inside_part(struct flex_array *fa,
151 unsigned int element_nr,
152 unsigned int part_nr)
154 unsigned int part_offset;
156 part_offset = element_nr - part_nr * fa->elems_per_part;
157 return part_offset * fa->element_size;
160 static struct flex_array_part *
161 __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
163 struct flex_array_part *part = fa->parts[part_nr];
164 if (!part) {
165 part = kmalloc(sizeof(struct flex_array_part), flags);
166 if (!part)
167 return NULL;
168 if (!(flags & __GFP_ZERO))
169 memset(part, FLEX_ARRAY_FREE,
170 sizeof(struct flex_array_part));
171 fa->parts[part_nr] = part;
173 return part;
177 * flex_array_put - copy data into the array at @element_nr
178 * @fa: the flex array to copy data into
179 * @element_nr: index of the position in which to insert
180 * the new element.
181 * @src: address of data to copy into the array
182 * @flags: page allocation flags to use for array expansion
185 * Note that this *copies* the contents of @src into
186 * the array. If you are trying to store an array of
187 * pointers, make sure to pass in &ptr instead of ptr.
188 * You may instead wish to use the flex_array_put_ptr()
189 * helper function.
191 * Locking must be provided by the caller.
193 int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
194 gfp_t flags)
196 int part_nr = 0;
197 struct flex_array_part *part;
198 void *dst;
200 if (element_nr >= fa->total_nr_elements)
201 return -ENOSPC;
202 if (!fa->element_size)
203 return 0;
204 if (elements_fit_in_base(fa))
205 part = (struct flex_array_part *)&fa->parts[0];
206 else {
207 part_nr = fa_element_to_part_nr(fa, element_nr);
208 part = __fa_get_part(fa, part_nr, flags);
209 if (!part)
210 return -ENOMEM;
212 dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
213 memcpy(dst, src, fa->element_size);
214 return 0;
216 EXPORT_SYMBOL(flex_array_put);
219 * flex_array_clear - clear element in array at @element_nr
220 * @fa: the flex array of the element.
221 * @element_nr: index of the position to clear.
223 * Locking must be provided by the caller.
225 int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
227 int part_nr = 0;
228 struct flex_array_part *part;
229 void *dst;
231 if (element_nr >= fa->total_nr_elements)
232 return -ENOSPC;
233 if (!fa->element_size)
234 return 0;
235 if (elements_fit_in_base(fa))
236 part = (struct flex_array_part *)&fa->parts[0];
237 else {
238 part_nr = fa_element_to_part_nr(fa, element_nr);
239 part = fa->parts[part_nr];
240 if (!part)
241 return -EINVAL;
243 dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
244 memset(dst, FLEX_ARRAY_FREE, fa->element_size);
245 return 0;
247 EXPORT_SYMBOL(flex_array_clear);
250 * flex_array_prealloc - guarantee that array space exists
251 * @fa: the flex array for which to preallocate parts
252 * @start: index of first array element for which space is allocated
253 * @nr_elements: number of elements for which space is allocated
254 * @flags: page allocation flags
256 * This will guarantee that no future calls to flex_array_put()
257 * will allocate memory. It can be used if you are expecting to
258 * be holding a lock or in some atomic context while writing
259 * data into the array.
261 * Locking must be provided by the caller.
263 int flex_array_prealloc(struct flex_array *fa, unsigned int start,
264 unsigned int nr_elements, gfp_t flags)
266 int start_part;
267 int end_part;
268 int part_nr;
269 unsigned int end;
270 struct flex_array_part *part;
272 if (!start && !nr_elements)
273 return 0;
274 if (start >= fa->total_nr_elements)
275 return -ENOSPC;
276 if (!nr_elements)
277 return 0;
279 end = start + nr_elements - 1;
281 if (end >= fa->total_nr_elements)
282 return -ENOSPC;
283 if (!fa->element_size)
284 return 0;
285 if (elements_fit_in_base(fa))
286 return 0;
287 start_part = fa_element_to_part_nr(fa, start);
288 end_part = fa_element_to_part_nr(fa, end);
289 for (part_nr = start_part; part_nr <= end_part; part_nr++) {
290 part = __fa_get_part(fa, part_nr, flags);
291 if (!part)
292 return -ENOMEM;
294 return 0;
296 EXPORT_SYMBOL(flex_array_prealloc);
299 * flex_array_get - pull data back out of the array
300 * @fa: the flex array from which to extract data
301 * @element_nr: index of the element to fetch from the array
303 * Returns a pointer to the data at index @element_nr. Note
304 * that this is a copy of the data that was passed in. If you
305 * are using this to store pointers, you'll get back &ptr. You
306 * may instead wish to use the flex_array_get_ptr helper.
308 * Locking must be provided by the caller.
310 void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
312 int part_nr = 0;
313 struct flex_array_part *part;
315 if (!fa->element_size)
316 return NULL;
317 if (element_nr >= fa->total_nr_elements)
318 return NULL;
319 if (elements_fit_in_base(fa))
320 part = (struct flex_array_part *)&fa->parts[0];
321 else {
322 part_nr = fa_element_to_part_nr(fa, element_nr);
323 part = fa->parts[part_nr];
324 if (!part)
325 return NULL;
327 return &part->elements[index_inside_part(fa, element_nr, part_nr)];
329 EXPORT_SYMBOL(flex_array_get);
332 * flex_array_get_ptr - pull a ptr back out of the array
333 * @fa: the flex array from which to extract data
334 * @element_nr: index of the element to fetch from the array
336 * Returns the pointer placed in the flex array at element_nr using
337 * flex_array_put_ptr(). This function should not be called if the
338 * element in question was not set using the _put_ptr() helper.
340 void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr)
342 void **tmp;
344 tmp = flex_array_get(fa, element_nr);
345 if (!tmp)
346 return NULL;
348 return *tmp;
350 EXPORT_SYMBOL(flex_array_get_ptr);
352 static int part_is_free(struct flex_array_part *part)
354 int i;
356 for (i = 0; i < sizeof(struct flex_array_part); i++)
357 if (part->elements[i] != FLEX_ARRAY_FREE)
358 return 0;
359 return 1;
363 * flex_array_shrink - free unused second-level pages
364 * @fa: the flex array to shrink
366 * Frees all second-level pages that consist solely of unused
367 * elements. Returns the number of pages freed.
369 * Locking must be provided by the caller.
371 int flex_array_shrink(struct flex_array *fa)
373 struct flex_array_part *part;
374 int part_nr;
375 int ret = 0;
377 if (!fa->total_nr_elements || !fa->element_size)
378 return 0;
379 if (elements_fit_in_base(fa))
380 return ret;
381 for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
382 part = fa->parts[part_nr];
383 if (!part)
384 continue;
385 if (part_is_free(part)) {
386 fa->parts[part_nr] = NULL;
387 kfree(part);
388 ret++;
391 return ret;
393 EXPORT_SYMBOL(flex_array_shrink);