4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region
10 * Region: Generic memory entitiy having a start address and a size
11 * Chunk: Reserved region
13 * Copyright (C) 2005-2006 Texas Instruments, Inc.
15 * This package is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
19 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
21 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
23 #include <linux/types.h>
25 /* ----------------------------------- Host OS */
26 #include <dspbridge/host_os.h>
28 /* ----------------------------------- DSP/BIOS Bridge */
29 #include <dspbridge/dbdefs.h>
31 /* ----------------------------------- Trace & Debug */
32 #include <dspbridge/dbc.h>
34 /* ----------------------------------- OS Adaptation Layer */
35 #include <dspbridge/sync.h>
37 /* ----------------------------------- Platform Manager */
38 #include <dspbridge/dev.h>
39 #include <dspbridge/proc.h>
41 /* ----------------------------------- This */
42 #include <dspbridge/dmm.h>
44 /* ----------------------------------- Defines, Data Structures, Typedefs */
45 #define DMM_ADDR_VIRTUAL(a) \
46 (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
48 #define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
52 /* Dmm Lock is used to serialize access mem manager for
54 spinlock_t dmm_lock
; /* Lock to access dmm mgr */
57 /* ----------------------------------- Globals */
58 static u32 refs
; /* module reference count */
66 /* Create the free list */
67 static struct map_page
*virtual_mapping_table
;
68 static u32 free_region
; /* The index of free region */
70 static u32 dyn_mem_map_beg
; /* The Beginning of dynamic memory mapping */
71 static u32 table_size
; /* The size of virt and phys pages tables */
73 /* ----------------------------------- Function Prototypes */
74 static struct map_page
*get_region(u32 addr
);
75 static struct map_page
*get_free_region(u32 len
);
76 static struct map_page
*get_mapped_region(u32 addrs
);
78 /* ======== dmm_create_tables ========
80 * Create table to hold the information of physical address
81 * the buffer pages that is passed by the user, and the table
82 * to hold the information of the virtual memory that is reserved
85 int dmm_create_tables(struct dmm_object
*dmm_mgr
, u32 addr
, u32 size
)
87 struct dmm_object
*dmm_obj
= (struct dmm_object
*)dmm_mgr
;
90 status
= dmm_delete_tables(dmm_obj
);
92 dyn_mem_map_beg
= addr
;
93 table_size
= PG_ALIGN_HIGH(size
, PG_SIZE4K
) / PG_SIZE4K
;
94 /* Create the free list */
95 virtual_mapping_table
= __vmalloc(table_size
*
96 sizeof(struct map_page
), GFP_KERNEL
|
97 __GFP_HIGHMEM
| __GFP_ZERO
, PAGE_KERNEL
);
98 if (virtual_mapping_table
== NULL
)
101 /* On successful allocation,
102 * all entries are zero ('free') */
104 free_size
= table_size
* PG_SIZE4K
;
105 virtual_mapping_table
[0].region_size
= table_size
;
110 pr_err("%s: failure, status 0x%x\n", __func__
, status
);
116 * ======== dmm_create ========
118 * Create a dynamic memory manager object.
120 int dmm_create(struct dmm_object
**dmm_manager
,
121 struct dev_object
*hdev_obj
,
122 const struct dmm_mgrattrs
*mgr_attrts
)
124 struct dmm_object
*dmm_obj
= NULL
;
126 DBC_REQUIRE(refs
> 0);
127 DBC_REQUIRE(dmm_manager
!= NULL
);
130 /* create, zero, and tag a cmm mgr object */
131 dmm_obj
= kzalloc(sizeof(struct dmm_object
), GFP_KERNEL
);
132 if (dmm_obj
!= NULL
) {
133 spin_lock_init(&dmm_obj
->dmm_lock
);
134 *dmm_manager
= dmm_obj
;
143 * ======== dmm_destroy ========
145 * Release the communication memory manager resources.
147 int dmm_destroy(struct dmm_object
*dmm_mgr
)
149 struct dmm_object
*dmm_obj
= (struct dmm_object
*)dmm_mgr
;
152 DBC_REQUIRE(refs
> 0);
154 status
= dmm_delete_tables(dmm_obj
);
164 * ======== dmm_delete_tables ========
168 int dmm_delete_tables(struct dmm_object
*dmm_mgr
)
172 DBC_REQUIRE(refs
> 0);
173 /* Delete all DMM tables */
175 vfree(virtual_mapping_table
);
182 * ======== dmm_exit ========
184 * Discontinue usage of module; free resources when reference count
189 DBC_REQUIRE(refs
> 0);
195 * ======== dmm_get_handle ========
197 * Return the dynamic memory manager object for this device.
198 * This is typically called from the client process.
200 int dmm_get_handle(void *hprocessor
, struct dmm_object
**dmm_manager
)
203 struct dev_object
*hdev_obj
;
205 DBC_REQUIRE(refs
> 0);
206 DBC_REQUIRE(dmm_manager
!= NULL
);
207 if (hprocessor
!= NULL
)
208 status
= proc_get_dev_object(hprocessor
, &hdev_obj
);
210 hdev_obj
= dev_get_first(); /* default */
213 status
= dev_get_dmm_mgr(hdev_obj
, dmm_manager
);
219 * ======== dmm_init ========
221 * Initializes private state of DMM module.
227 DBC_REQUIRE(refs
>= 0);
232 DBC_ENSURE((ret
&& (refs
> 0)) || (!ret
&& (refs
>= 0)));
234 virtual_mapping_table
= NULL
;
241 * ======== dmm_map_memory ========
243 * Add a mapping block to the reserved chunk. DMM assumes that this block
244 * will be mapped in the DSP/IVA's address space. DMM returns an error if a
245 * mapping overlaps another one. This function stores the info that will be
246 * required later while unmapping the block.
248 int dmm_map_memory(struct dmm_object
*dmm_mgr
, u32 addr
, u32 size
)
250 struct dmm_object
*dmm_obj
= (struct dmm_object
*)dmm_mgr
;
251 struct map_page
*chunk
;
254 spin_lock(&dmm_obj
->dmm_lock
);
255 /* Find the Reserved memory chunk containing the DSP block to
257 chunk
= (struct map_page
*)get_region(addr
);
259 /* Mark the region 'mapped', leave the 'reserved' info as-is */
260 chunk
->mapped
= true;
261 chunk
->mapped_size
= (size
/ PG_SIZE4K
);
264 spin_unlock(&dmm_obj
->dmm_lock
);
266 dev_dbg(bridge
, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
267 "chunk %p", __func__
, dmm_mgr
, addr
, size
, status
, chunk
);
273 * ======== dmm_reserve_memory ========
275 * Reserve a chunk of virtually contiguous DSP/IVA address space.
277 int dmm_reserve_memory(struct dmm_object
*dmm_mgr
, u32 size
,
281 struct dmm_object
*dmm_obj
= (struct dmm_object
*)dmm_mgr
;
282 struct map_page
*node
;
286 spin_lock(&dmm_obj
->dmm_lock
);
288 /* Try to get a DSP chunk from the free list */
289 node
= get_free_region(size
);
291 /* DSP chunk of given size is available. */
292 rsv_addr
= DMM_ADDR_VIRTUAL(node
);
293 /* Calculate the number entries to use */
294 rsv_size
= size
/ PG_SIZE4K
;
295 if (rsv_size
< node
->region_size
) {
296 /* Mark remainder of free region */
297 node
[rsv_size
].mapped
= false;
298 node
[rsv_size
].reserved
= false;
299 node
[rsv_size
].region_size
=
300 node
->region_size
- rsv_size
;
301 node
[rsv_size
].mapped_size
= 0;
303 /* get_region will return first fit chunk. But we only use what
305 node
->mapped
= false;
306 node
->reserved
= true;
307 node
->region_size
= rsv_size
;
308 node
->mapped_size
= 0;
309 /* Return the chunk's starting address */
310 *prsv_addr
= rsv_addr
;
312 /*dSP chunk of given size is not available */
315 spin_unlock(&dmm_obj
->dmm_lock
);
317 dev_dbg(bridge
, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
318 "rsv_addr %x, rsv_size %x\n", __func__
, dmm_mgr
, size
,
319 prsv_addr
, status
, rsv_addr
, rsv_size
);
325 * ======== dmm_un_map_memory ========
327 * Remove the mapped block from the reserved chunk.
329 int dmm_un_map_memory(struct dmm_object
*dmm_mgr
, u32 addr
, u32
*psize
)
331 struct dmm_object
*dmm_obj
= (struct dmm_object
*)dmm_mgr
;
332 struct map_page
*chunk
;
335 spin_lock(&dmm_obj
->dmm_lock
);
336 chunk
= get_mapped_region(addr
);
341 /* Unmap the region */
342 *psize
= chunk
->mapped_size
* PG_SIZE4K
;
343 chunk
->mapped
= false;
344 chunk
->mapped_size
= 0;
346 spin_unlock(&dmm_obj
->dmm_lock
);
348 dev_dbg(bridge
, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
349 "chunk %p\n", __func__
, dmm_mgr
, addr
, psize
, status
, chunk
);
355 * ======== dmm_un_reserve_memory ========
357 * Free a chunk of reserved DSP/IVA address space.
359 int dmm_un_reserve_memory(struct dmm_object
*dmm_mgr
, u32 rsv_addr
)
361 struct dmm_object
*dmm_obj
= (struct dmm_object
*)dmm_mgr
;
362 struct map_page
*chunk
;
367 spin_lock(&dmm_obj
->dmm_lock
);
369 /* Find the chunk containing the reserved address */
370 chunk
= get_mapped_region(rsv_addr
);
375 /* Free all the mapped pages for this reserved region */
377 while (i
< chunk
->region_size
) {
378 if (chunk
[i
].mapped
) {
379 /* Remove mapping from the page tables. */
380 chunk_size
= chunk
[i
].mapped_size
;
381 /* Clear the mapping flags */
382 chunk
[i
].mapped
= false;
383 chunk
[i
].mapped_size
= 0;
388 /* Clear the flags (mark the region 'free') */
389 chunk
->reserved
= false;
390 /* NOTE: We do NOT coalesce free regions here.
391 * Free regions are coalesced in get_region(), as it traverses
392 *the whole mapping table
395 spin_unlock(&dmm_obj
->dmm_lock
);
397 dev_dbg(bridge
, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
398 __func__
, dmm_mgr
, rsv_addr
, status
, chunk
);
404 * ======== get_region ========
406 * Returns a region containing the specified memory region
408 static struct map_page
*get_region(u32 addr
)
410 struct map_page
*curr_region
= NULL
;
413 if (virtual_mapping_table
!= NULL
) {
414 /* find page mapped by this address */
415 i
= DMM_ADDR_TO_INDEX(addr
);
417 curr_region
= virtual_mapping_table
+ i
;
420 dev_dbg(bridge
, "%s: curr_region %p, free_region %d, free_size %d\n",
421 __func__
, curr_region
, free_region
, free_size
);
426 * ======== get_free_region ========
428 * Returns the requested free region
430 static struct map_page
*get_free_region(u32 len
)
432 struct map_page
*curr_region
= NULL
;
437 if (virtual_mapping_table
== NULL
)
439 if (len
> free_size
) {
440 /* Find the largest free region
441 * (coalesce during the traversal) */
442 while (i
< table_size
) {
443 region_size
= virtual_mapping_table
[i
].region_size
;
444 next_i
= i
+ region_size
;
445 if (virtual_mapping_table
[i
].reserved
== false) {
446 /* Coalesce, if possible */
447 if (next_i
< table_size
&&
448 virtual_mapping_table
[next_i
].reserved
450 virtual_mapping_table
[i
].region_size
+=
451 virtual_mapping_table
452 [next_i
].region_size
;
455 region_size
*= PG_SIZE4K
;
456 if (region_size
> free_size
) {
458 free_size
= region_size
;
464 if (len
<= free_size
) {
465 curr_region
= virtual_mapping_table
+ free_region
;
466 free_region
+= (len
/ PG_SIZE4K
);
473 * ======== get_mapped_region ========
475 * Returns the requestedmapped region
477 static struct map_page
*get_mapped_region(u32 addrs
)
480 struct map_page
*curr_region
= NULL
;
482 if (virtual_mapping_table
== NULL
)
485 i
= DMM_ADDR_TO_INDEX(addrs
);
486 if (i
< table_size
&& (virtual_mapping_table
[i
].mapped
||
487 virtual_mapping_table
[i
].reserved
))
488 curr_region
= virtual_mapping_table
+ i
;
493 u32
dmm_mem_map_dump(struct dmm_object
*dmm_mgr
)
495 struct map_page
*curr_node
= NULL
;
500 spin_lock(&dmm_mgr
->dmm_lock
);
502 if (virtual_mapping_table
!= NULL
) {
503 for (i
= 0; i
< table_size
; i
+=
504 virtual_mapping_table
[i
].region_size
) {
505 curr_node
= virtual_mapping_table
+ i
;
506 if (curr_node
->reserved
) {
507 /*printk("RESERVED size = 0x%x, "
509 (curr_node->region_size * PG_SIZE4K),
510 (curr_node->mapped == false) ? 0 :
511 (curr_node->mapped_size * PG_SIZE4K));
514 /* printk("UNRESERVED size = 0x%x\n",
515 (curr_node->region_size * PG_SIZE4K));
517 freemem
+= (curr_node
->region_size
* PG_SIZE4K
);
518 if (curr_node
->region_size
> bigsize
)
519 bigsize
= curr_node
->region_size
;
523 spin_unlock(&dmm_mgr
->dmm_lock
);
524 printk(KERN_INFO
"Total DSP VA FREE memory = %d Mbytes\n",
525 freemem
/ (1024 * 1024));
526 printk(KERN_INFO
"Total DSP VA USED memory= %d Mbytes \n",
527 (((table_size
* PG_SIZE4K
) - freemem
)) / (1024 * 1024));
528 printk(KERN_INFO
"DSP VA - Biggest FREE block = %d Mbytes \n\n",
529 (bigsize
* PG_SIZE4K
/ (1024 * 1024)));