Avoid beyond bounds copy while caching ACL
[zen-stable.git] / drivers / staging / tidspbridge / rmgr / rmm.c
blobf3dc0ddbfacc3fd0dc35658d1c5641eebb8299c5
1 /*
2 * rmm.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Copyright (C) 2005-2006 Texas Instruments, Inc.
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 * This memory manager provides general heap management and arbitrary
19 * alignment for any number of memory segments.
21 * Notes:
23 * Memory blocks are allocated from the end of the first free memory
24 * block large enough to satisfy the request. Alignment requirements
25 * are satisfied by "sliding" the block forward until its base satisfies
26 * the alignment specification; if this is not possible then the next
27 * free block large enough to hold the request is tried.
29 * Since alignment can cause the creation of a new free block - the
30 * unused memory formed between the start of the original free block
31 * and the start of the allocated block - the memory manager must free
32 * this memory to prevent a memory leak.
34 * Overlay memory is managed by reserving through rmm_alloc, and freeing
35 * it through rmm_free. The memory manager prevents DSP code/data that is
36 * overlayed from being overwritten as long as the memory it runs at has
37 * been allocated, and not yet freed.
40 #include <linux/types.h>
41 #include <linux/list.h>
43 /* ----------------------------------- Host OS */
44 #include <dspbridge/host_os.h>
46 /* ----------------------------------- DSP/BIOS Bridge */
47 #include <dspbridge/dbdefs.h>
49 /* ----------------------------------- Trace & Debug */
50 #include <dspbridge/dbc.h>
52 /* ----------------------------------- This */
53 #include <dspbridge/rmm.h>
56 * ======== rmm_header ========
57 * This header is used to maintain a list of free memory blocks.
59 struct rmm_header {
60 struct rmm_header *next; /* form a free memory link list */
61 u32 size; /* size of the free memory */
62 u32 addr; /* DSP address of memory block */
66 * ======== rmm_ovly_sect ========
67 * Keeps track of memory occupied by overlay section.
69 struct rmm_ovly_sect {
70 struct list_head list_elem;
71 u32 addr; /* Start of memory section */
72 u32 size; /* Length (target MAUs) of section */
73 s32 page; /* Memory page */
77 * ======== rmm_target_obj ========
79 struct rmm_target_obj {
80 struct rmm_segment *seg_tab;
81 struct rmm_header **free_list;
82 u32 num_segs;
83 struct list_head ovly_list; /* List of overlay memory in use */
86 static u32 refs; /* module reference count */
88 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
89 u32 align, u32 *dsp_address);
90 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
91 u32 size);
94 * ======== rmm_alloc ========
96 int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
97 u32 align, u32 *dsp_address, bool reserve)
99 struct rmm_ovly_sect *sect, *prev_sect = NULL;
100 struct rmm_ovly_sect *new_sect;
101 u32 addr;
102 int status = 0;
104 DBC_REQUIRE(target);
105 DBC_REQUIRE(dsp_address != NULL);
106 DBC_REQUIRE(size > 0);
107 DBC_REQUIRE(reserve || (target->num_segs > 0));
108 DBC_REQUIRE(refs > 0);
110 if (!reserve) {
111 if (!alloc_block(target, segid, size, align, dsp_address)) {
112 status = -ENOMEM;
113 } else {
114 /* Increment the number of allocated blocks in this
115 * segment */
116 target->seg_tab[segid].number++;
118 goto func_end;
120 /* An overlay section - See if block is already in use. If not,
121 * insert into the list in ascending address size. */
122 addr = *dsp_address;
123 /* Find place to insert new list element. List is sorted from
124 * smallest to largest address. */
125 list_for_each_entry(sect, &target->ovly_list, list_elem) {
126 if (addr <= sect->addr) {
127 /* Check for overlap with sect */
128 if ((addr + size > sect->addr) || (prev_sect &&
129 (prev_sect->addr +
130 prev_sect->size >
131 addr))) {
132 status = -ENXIO;
134 break;
136 prev_sect = sect;
138 if (!status) {
139 /* No overlap - allocate list element for new section. */
140 new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
141 if (new_sect == NULL) {
142 status = -ENOMEM;
143 } else {
144 new_sect->addr = addr;
145 new_sect->size = size;
146 new_sect->page = segid;
147 if (list_is_last(&sect->list_elem, &target->ovly_list))
148 /* Put new section at the end of the list */
149 list_add_tail(&new_sect->list_elem,
150 &target->ovly_list);
151 else
152 /* Put new section just before sect */
153 list_add_tail(&new_sect->list_elem,
154 &sect->list_elem);
157 func_end:
158 return status;
162 * ======== rmm_create ========
164 int rmm_create(struct rmm_target_obj **target_obj,
165 struct rmm_segment seg_tab[], u32 num_segs)
167 struct rmm_header *hptr;
168 struct rmm_segment *sptr, *tmp;
169 struct rmm_target_obj *target;
170 s32 i;
171 int status = 0;
173 DBC_REQUIRE(target_obj != NULL);
174 DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
176 /* Allocate DBL target object */
177 target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
179 if (target == NULL)
180 status = -ENOMEM;
182 if (status)
183 goto func_cont;
185 target->num_segs = num_segs;
186 if (!(num_segs > 0))
187 goto func_cont;
189 /* Allocate the memory for freelist from host's memory */
190 target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
191 GFP_KERNEL);
192 if (target->free_list == NULL) {
193 status = -ENOMEM;
194 } else {
195 /* Allocate headers for each element on the free list */
196 for (i = 0; i < (s32) num_segs; i++) {
197 target->free_list[i] =
198 kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
199 if (target->free_list[i] == NULL) {
200 status = -ENOMEM;
201 break;
204 /* Allocate memory for initial segment table */
205 target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
206 GFP_KERNEL);
207 if (target->seg_tab == NULL) {
208 status = -ENOMEM;
209 } else {
210 /* Initialize segment table and free list */
211 sptr = target->seg_tab;
212 for (i = 0, tmp = seg_tab; num_segs > 0;
213 num_segs--, i++) {
214 *sptr = *tmp;
215 hptr = target->free_list[i];
216 hptr->addr = tmp->base;
217 hptr->size = tmp->length;
218 hptr->next = NULL;
219 tmp++;
220 sptr++;
224 func_cont:
225 /* Initialize overlay memory list */
226 if (!status)
227 INIT_LIST_HEAD(&target->ovly_list);
229 if (!status) {
230 *target_obj = target;
231 } else {
232 *target_obj = NULL;
233 if (target)
234 rmm_delete(target);
238 DBC_ENSURE((!status && *target_obj)
239 || (status && *target_obj == NULL));
241 return status;
245 * ======== rmm_delete ========
247 void rmm_delete(struct rmm_target_obj *target)
249 struct rmm_ovly_sect *sect, *tmp;
250 struct rmm_header *hptr;
251 struct rmm_header *next;
252 u32 i;
254 DBC_REQUIRE(target);
256 kfree(target->seg_tab);
258 list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
259 list_del(&sect->list_elem);
260 kfree(sect);
263 if (target->free_list != NULL) {
264 /* Free elements on freelist */
265 for (i = 0; i < target->num_segs; i++) {
266 hptr = next = target->free_list[i];
267 while (next) {
268 hptr = next;
269 next = hptr->next;
270 kfree(hptr);
273 kfree(target->free_list);
276 kfree(target);
280 * ======== rmm_exit ========
282 void rmm_exit(void)
284 DBC_REQUIRE(refs > 0);
286 refs--;
288 DBC_ENSURE(refs >= 0);
292 * ======== rmm_free ========
294 bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
295 bool reserved)
297 struct rmm_ovly_sect *sect, *tmp;
298 bool ret = false;
300 DBC_REQUIRE(target);
302 DBC_REQUIRE(reserved || segid < target->num_segs);
303 DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
304 (dsp_addr + size) <= (target->seg_tab[segid].
305 base +
306 target->seg_tab[segid].
307 length)));
310 * Free or unreserve memory.
312 if (!reserved) {
313 ret = free_block(target, segid, dsp_addr, size);
314 if (ret)
315 target->seg_tab[segid].number--;
317 } else {
318 /* Unreserve memory */
319 list_for_each_entry_safe(sect, tmp, &target->ovly_list,
320 list_elem) {
321 if (dsp_addr == sect->addr) {
322 DBC_ASSERT(size == sect->size);
323 /* Remove from list */
324 list_del(&sect->list_elem);
325 kfree(sect);
326 return true;
330 return ret;
334 * ======== rmm_init ========
336 bool rmm_init(void)
338 DBC_REQUIRE(refs >= 0);
340 refs++;
342 return true;
346 * ======== rmm_stat ========
348 bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
349 struct dsp_memstat *mem_stat_buf)
351 struct rmm_header *head;
352 bool ret = false;
353 u32 max_free_size = 0;
354 u32 total_free_size = 0;
355 u32 free_blocks = 0;
357 DBC_REQUIRE(mem_stat_buf != NULL);
358 DBC_ASSERT(target != NULL);
360 if ((u32) segid < target->num_segs) {
361 head = target->free_list[segid];
363 /* Collect data from free_list */
364 while (head != NULL) {
365 max_free_size = max(max_free_size, head->size);
366 total_free_size += head->size;
367 free_blocks++;
368 head = head->next;
371 /* ul_size */
372 mem_stat_buf->size = target->seg_tab[segid].length;
374 /* num_free_blocks */
375 mem_stat_buf->num_free_blocks = free_blocks;
377 /* total_free_size */
378 mem_stat_buf->total_free_size = total_free_size;
380 /* len_max_free_block */
381 mem_stat_buf->len_max_free_block = max_free_size;
383 /* num_alloc_blocks */
384 mem_stat_buf->num_alloc_blocks =
385 target->seg_tab[segid].number;
387 ret = true;
390 return ret;
394 * ======== balloc ========
395 * This allocation function allocates memory from the lowest addresses
396 * first.
398 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
399 u32 align, u32 *dsp_address)
401 struct rmm_header *head;
402 struct rmm_header *prevhead = NULL;
403 struct rmm_header *next;
404 u32 tmpalign;
405 u32 alignbytes;
406 u32 hsize;
407 u32 allocsize;
408 u32 addr;
410 alignbytes = (align == 0) ? 1 : align;
411 prevhead = NULL;
412 head = target->free_list[segid];
414 do {
415 hsize = head->size;
416 next = head->next;
418 addr = head->addr; /* alloc from the bottom */
420 /* align allocation */
421 (tmpalign = (u32) addr % alignbytes);
422 if (tmpalign != 0)
423 tmpalign = alignbytes - tmpalign;
425 allocsize = size + tmpalign;
427 if (hsize >= allocsize) { /* big enough */
428 if (hsize == allocsize && prevhead != NULL) {
429 prevhead->next = next;
430 kfree(head);
431 } else {
432 head->size = hsize - allocsize;
433 head->addr += allocsize;
436 /* free up any hole created by alignment */
437 if (tmpalign)
438 free_block(target, segid, addr, tmpalign);
440 *dsp_address = addr + tmpalign;
441 return true;
444 prevhead = head;
445 head = next;
447 } while (head != NULL);
449 return false;
453 * ======== free_block ========
454 * TO DO: free_block() allocates memory, which could result in failure.
455 * Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
456 * free_block() could use an rmm_header from the pool, freeing as blocks
457 * are coalesced.
459 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
460 u32 size)
462 struct rmm_header *head;
463 struct rmm_header *thead;
464 struct rmm_header *rhead;
465 bool ret = true;
467 /* Create a memory header to hold the newly free'd block. */
468 rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
469 if (rhead == NULL) {
470 ret = false;
471 } else {
472 /* search down the free list to find the right place for addr */
473 head = target->free_list[segid];
475 if (addr >= head->addr) {
476 while (head->next != NULL && addr > head->next->addr)
477 head = head->next;
479 thead = head->next;
481 head->next = rhead;
482 rhead->next = thead;
483 rhead->addr = addr;
484 rhead->size = size;
485 } else {
486 *rhead = *head;
487 head->next = rhead;
488 head->addr = addr;
489 head->size = size;
490 thead = rhead->next;
493 /* join with upper block, if possible */
494 if (thead != NULL && (rhead->addr + rhead->size) ==
495 thead->addr) {
496 head->next = rhead->next;
497 thead->size = size + thead->size;
498 thead->addr = addr;
499 kfree(rhead);
500 rhead = thead;
503 /* join with the lower block, if possible */
504 if ((head->addr + head->size) == rhead->addr) {
505 head->next = rhead->next;
506 head->size = head->size + rhead->size;
507 kfree(rhead);
511 return ret;