4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * The Communication(Shared) Memory Management(CMM) module provides
7 * shared memory management services for DSP/BIOS Bridge data streaming
10 * Multiple shared memory segments can be registered with CMM.
11 * Each registered SM segment is represented by a SM "allocator" that
12 * describes a block of physically contiguous shared memory used for
13 * future allocations by CMM.
15 * Memory is coalesced back to the appropriate heap when a buffer is
19 * Va: Virtual address.
20 * Pa: Physical or kernel system address.
22 * Copyright (C) 2005-2006 Texas Instruments, Inc.
24 * This package is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
28 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
32 #include <linux/types.h>
33 #include <linux/list.h>
35 /* ----------------------------------- DSP/BIOS Bridge */
36 #include <dspbridge/dbdefs.h>
38 /* ----------------------------------- Trace & Debug */
39 #include <dspbridge/dbc.h>
41 /* ----------------------------------- OS Adaptation Layer */
42 #include <dspbridge/sync.h>
44 /* ----------------------------------- Platform Manager */
45 #include <dspbridge/dev.h>
46 #include <dspbridge/proc.h>
48 /* ----------------------------------- This */
49 #include <dspbridge/cmm.h>
51 /* ----------------------------------- Defines, Data Structures, Typedefs */
52 #define NEXT_PA(pnode) (pnode->pa + pnode->size)
54 /* Other bus/platform translations */
55 #define DSPPA2GPPPA(base, x, y) ((x)+(y))
56 #define GPPPA2DSPPA(base, x, y) ((x)-(y))
59 * Allocators define a block of contiguous memory used for future allocations.
61 * sma - shared memory allocator.
62 * vma - virtual memory allocator.(not used).
64 struct cmm_allocator
{ /* sma */
65 unsigned int shm_base
; /* Start of physical SM block */
66 u32 sm_size
; /* Size of SM block in bytes */
67 unsigned int vm_base
; /* Start of VM block. (Dev driver
68 * context for 'sma') */
69 u32 dsp_phys_addr_offset
; /* DSP PA to GPP PA offset for this
71 s8 c_factor
; /* DSPPa to GPPPa Conversion Factor */
72 unsigned int dsp_base
; /* DSP virt base byte address */
73 u32 dsp_size
; /* DSP seg size in bytes */
74 struct cmm_object
*cmm_mgr
; /* back ref to parent mgr */
75 /* node list of available memory */
76 struct list_head free_list
;
77 /* node list of memory in use */
78 struct list_head in_use_list
;
81 struct cmm_xlator
{ /* Pa<->Va translator object */
82 /* CMM object this translator associated */
83 struct cmm_object
*cmm_mgr
;
85 * Client process virtual base address that corresponds to phys SM
86 * base address for translator's seg_id.
87 * Only 1 segment ID currently supported.
89 unsigned int virt_base
; /* virtual base address */
90 u32 virt_size
; /* size of virt space in bytes */
91 u32 seg_id
; /* Segment Id */
97 * Cmm Lock is used to serialize access mem manager for multi-threads.
99 struct mutex cmm_lock
; /* Lock to access cmm mgr */
100 struct list_head node_free_list
; /* Free list of memory nodes */
101 u32 min_block_size
; /* Min SM block; default 16 bytes */
102 u32 page_size
; /* Memory Page size (1k/4k) */
103 /* GPP SM segment ptrs */
104 struct cmm_allocator
*pa_gppsm_seg_tab
[CMM_MAXGPPSEGS
];
107 /* Default CMM Mgr attributes */
108 static struct cmm_mgrattrs cmm_dfltmgrattrs
= {
109 /* min_block_size, min block size(bytes) allocated by cmm mgr */
113 /* Default allocation attributes */
114 static struct cmm_attrs cmm_dfltalctattrs
= {
115 1 /* seg_id, default segment Id for allocator */
118 /* Address translator default attrs */
119 static struct cmm_xlatorattrs cmm_dfltxlatorattrs
= {
120 /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
123 0, /* dsp_buf_size */
128 /* SM node representing a block of memory. */
130 struct list_head link
; /* must be 1st element */
131 u32 pa
; /* Phys addr */
132 u32 va
; /* Virtual address in device process context */
133 u32 size
; /* SM block size in bytes */
134 u32 client_proc
; /* Process that allocated this mem block */
137 /* ----------------------------------- Globals */
138 static u32 refs
; /* module reference count */
140 /* ----------------------------------- Function Prototypes */
141 static void add_to_free_list(struct cmm_allocator
*allocator
,
142 struct cmm_mnode
*pnode
);
143 static struct cmm_allocator
*get_allocator(struct cmm_object
*cmm_mgr_obj
,
145 static struct cmm_mnode
*get_free_block(struct cmm_allocator
*allocator
,
147 static struct cmm_mnode
*get_node(struct cmm_object
*cmm_mgr_obj
, u32 dw_pa
,
148 u32 dw_va
, u32 ul_size
);
149 /* get available slot for new allocator */
150 static s32
get_slot(struct cmm_object
*cmm_mgr_obj
);
151 static void un_register_gppsm_seg(struct cmm_allocator
*psma
);
154 * ======== cmm_calloc_buf ========
156 * Allocate a SM buffer, zero contents, and return the physical address
157 * and optional driver context virtual address(pp_buf_va).
159 * The freelist is sorted in increasing size order. Get the first
160 * block that satifies the request and sort the remaining back on
161 * the freelist; if large enough. The kept block is placed on the
164 void *cmm_calloc_buf(struct cmm_object
*hcmm_mgr
, u32 usize
,
165 struct cmm_attrs
*pattrs
, void **pp_buf_va
)
167 struct cmm_object
*cmm_mgr_obj
= (struct cmm_object
*)hcmm_mgr
;
169 struct cmm_mnode
*pnode
= NULL
;
170 struct cmm_mnode
*new_node
= NULL
;
171 struct cmm_allocator
*allocator
= NULL
;
177 pattrs
= &cmm_dfltalctattrs
;
179 if (pp_buf_va
!= NULL
)
182 if (cmm_mgr_obj
&& (usize
!= 0)) {
183 if (pattrs
->seg_id
> 0) {
184 /* SegId > 0 is SM */
185 /* get the allocator object for this segment id */
187 get_allocator(cmm_mgr_obj
, pattrs
->seg_id
);
188 /* keep block size a multiple of min_block_size */
190 ((usize
- 1) & ~(cmm_mgr_obj
->min_block_size
-
192 + cmm_mgr_obj
->min_block_size
;
193 mutex_lock(&cmm_mgr_obj
->cmm_lock
);
194 pnode
= get_free_block(allocator
, usize
);
197 delta_size
= (pnode
->size
- usize
);
198 if (delta_size
>= cmm_mgr_obj
->min_block_size
) {
199 /* create a new block with the leftovers and
202 get_node(cmm_mgr_obj
, pnode
->pa
+ usize
,
205 /* leftovers go free */
206 add_to_free_list(allocator
, new_node
);
207 /* adjust our node's size */
210 /* Tag node with client process requesting allocation
211 * We'll need to free up a process's alloc'd SM if the
212 * client process goes away.
214 /* Return TGID instead of process handle */
215 pnode
->client_proc
= current
->tgid
;
217 /* put our node on InUse list */
218 list_add_tail(&pnode
->link
, &allocator
->in_use_list
);
219 buf_pa
= (void *)pnode
->pa
; /* physical address */
221 pbyte
= (u8
*) pnode
->va
;
222 for (cnt
= 0; cnt
< (s32
) usize
; cnt
++, pbyte
++)
225 if (pp_buf_va
!= NULL
) {
226 /* Virtual address */
227 *pp_buf_va
= (void *)pnode
->va
;
230 mutex_unlock(&cmm_mgr_obj
->cmm_lock
);
236 * ======== cmm_create ========
238 * Create a communication memory manager object.
240 int cmm_create(struct cmm_object
**ph_cmm_mgr
,
241 struct dev_object
*hdev_obj
,
242 const struct cmm_mgrattrs
*mgr_attrts
)
244 struct cmm_object
*cmm_obj
= NULL
;
247 DBC_REQUIRE(refs
> 0);
248 DBC_REQUIRE(ph_cmm_mgr
!= NULL
);
251 /* create, zero, and tag a cmm mgr object */
252 cmm_obj
= kzalloc(sizeof(struct cmm_object
), GFP_KERNEL
);
256 if (mgr_attrts
== NULL
)
257 mgr_attrts
= &cmm_dfltmgrattrs
; /* set defaults */
259 /* 4 bytes minimum */
260 DBC_ASSERT(mgr_attrts
->min_block_size
>= 4);
261 /* save away smallest block allocation for this cmm mgr */
262 cmm_obj
->min_block_size
= mgr_attrts
->min_block_size
;
263 cmm_obj
->page_size
= PAGE_SIZE
;
265 /* create node free list */
266 INIT_LIST_HEAD(&cmm_obj
->node_free_list
);
267 mutex_init(&cmm_obj
->cmm_lock
);
268 *ph_cmm_mgr
= cmm_obj
;
274 * ======== cmm_destroy ========
276 * Release the communication memory manager resources.
278 int cmm_destroy(struct cmm_object
*hcmm_mgr
, bool force
)
280 struct cmm_object
*cmm_mgr_obj
= (struct cmm_object
*)hcmm_mgr
;
281 struct cmm_info temp_info
;
284 struct cmm_mnode
*node
, *tmp
;
286 DBC_REQUIRE(refs
> 0);
291 mutex_lock(&cmm_mgr_obj
->cmm_lock
);
292 /* If not force then fail if outstanding allocations exist */
294 /* Check for outstanding memory allocations */
295 status
= cmm_get_info(hcmm_mgr
, &temp_info
);
297 if (temp_info
.total_in_use_cnt
> 0) {
298 /* outstanding allocations */
304 /* UnRegister SM allocator */
305 for (slot_seg
= 0; slot_seg
< CMM_MAXGPPSEGS
; slot_seg
++) {
306 if (cmm_mgr_obj
->pa_gppsm_seg_tab
[slot_seg
] != NULL
) {
307 un_register_gppsm_seg
308 (cmm_mgr_obj
->pa_gppsm_seg_tab
[slot_seg
]);
309 /* Set slot to NULL for future reuse */
310 cmm_mgr_obj
->pa_gppsm_seg_tab
[slot_seg
] = NULL
;
314 list_for_each_entry_safe(node
, tmp
, &cmm_mgr_obj
->node_free_list
,
316 list_del(&node
->link
);
319 mutex_unlock(&cmm_mgr_obj
->cmm_lock
);
321 /* delete CS & cmm mgr object */
322 mutex_destroy(&cmm_mgr_obj
->cmm_lock
);
329 * ======== cmm_exit ========
331 * Discontinue usage of module; free resources when reference count
336 DBC_REQUIRE(refs
> 0);
342 * ======== cmm_free_buf ========
344 * Free the given buffer.
346 int cmm_free_buf(struct cmm_object
*hcmm_mgr
, void *buf_pa
, u32 ul_seg_id
)
348 struct cmm_object
*cmm_mgr_obj
= (struct cmm_object
*)hcmm_mgr
;
349 int status
= -EFAULT
;
350 struct cmm_mnode
*curr
, *tmp
;
351 struct cmm_allocator
*allocator
;
352 struct cmm_attrs
*pattrs
;
354 DBC_REQUIRE(refs
> 0);
355 DBC_REQUIRE(buf_pa
!= NULL
);
357 if (ul_seg_id
== 0) {
358 pattrs
= &cmm_dfltalctattrs
;
359 ul_seg_id
= pattrs
->seg_id
;
361 if (!hcmm_mgr
|| !(ul_seg_id
> 0)) {
366 allocator
= get_allocator(cmm_mgr_obj
, ul_seg_id
);
370 mutex_lock(&cmm_mgr_obj
->cmm_lock
);
371 list_for_each_entry_safe(curr
, tmp
, &allocator
->in_use_list
, link
) {
372 if (curr
->pa
== (u32
) buf_pa
) {
373 list_del(&curr
->link
);
374 add_to_free_list(allocator
, curr
);
379 mutex_unlock(&cmm_mgr_obj
->cmm_lock
);
385 * ======== cmm_get_handle ========
387 * Return the communication memory manager object for this device.
388 * This is typically called from the client process.
390 int cmm_get_handle(void *hprocessor
, struct cmm_object
** ph_cmm_mgr
)
393 struct dev_object
*hdev_obj
;
395 DBC_REQUIRE(refs
> 0);
396 DBC_REQUIRE(ph_cmm_mgr
!= NULL
);
397 if (hprocessor
!= NULL
)
398 status
= proc_get_dev_object(hprocessor
, &hdev_obj
);
400 hdev_obj
= dev_get_first(); /* default */
403 status
= dev_get_cmm_mgr(hdev_obj
, ph_cmm_mgr
);
409 * ======== cmm_get_info ========
411 * Return the current memory utilization information.
413 int cmm_get_info(struct cmm_object
*hcmm_mgr
,
414 struct cmm_info
*cmm_info_obj
)
416 struct cmm_object
*cmm_mgr_obj
= (struct cmm_object
*)hcmm_mgr
;
419 struct cmm_allocator
*altr
;
420 struct cmm_mnode
*curr
;
422 DBC_REQUIRE(cmm_info_obj
!= NULL
);
428 mutex_lock(&cmm_mgr_obj
->cmm_lock
);
429 cmm_info_obj
->num_gppsm_segs
= 0; /* # of SM segments */
430 /* Total # of outstanding alloc */
431 cmm_info_obj
->total_in_use_cnt
= 0;
433 cmm_info_obj
->min_block_size
= cmm_mgr_obj
->min_block_size
;
434 /* check SM memory segments */
435 for (ul_seg
= 1; ul_seg
<= CMM_MAXGPPSEGS
; ul_seg
++) {
436 /* get the allocator object for this segment id */
437 altr
= get_allocator(cmm_mgr_obj
, ul_seg
);
440 cmm_info_obj
->num_gppsm_segs
++;
441 cmm_info_obj
->seg_info
[ul_seg
- 1].seg_base_pa
=
442 altr
->shm_base
- altr
->dsp_size
;
443 cmm_info_obj
->seg_info
[ul_seg
- 1].total_seg_size
=
444 altr
->dsp_size
+ altr
->sm_size
;
445 cmm_info_obj
->seg_info
[ul_seg
- 1].gpp_base_pa
=
447 cmm_info_obj
->seg_info
[ul_seg
- 1].gpp_size
=
449 cmm_info_obj
->seg_info
[ul_seg
- 1].dsp_base_va
=
451 cmm_info_obj
->seg_info
[ul_seg
- 1].dsp_size
=
453 cmm_info_obj
->seg_info
[ul_seg
- 1].seg_base_va
=
454 altr
->vm_base
- altr
->dsp_size
;
455 cmm_info_obj
->seg_info
[ul_seg
- 1].in_use_cnt
= 0;
457 list_for_each_entry(curr
, &altr
->in_use_list
, link
) {
458 cmm_info_obj
->total_in_use_cnt
++;
459 cmm_info_obj
->seg_info
[ul_seg
- 1].in_use_cnt
++;
462 mutex_unlock(&cmm_mgr_obj
->cmm_lock
);
467 * ======== cmm_init ========
469 * Initializes private state of CMM module.
475 DBC_REQUIRE(refs
>= 0);
479 DBC_ENSURE((ret
&& (refs
> 0)) || (!ret
&& (refs
>= 0)));
485 * ======== cmm_register_gppsm_seg ========
487 * Register a block of SM with the CMM to be used for later GPP SM
490 int cmm_register_gppsm_seg(struct cmm_object
*hcmm_mgr
,
491 u32 dw_gpp_base_pa
, u32 ul_size
,
492 u32 dsp_addr_offset
, s8 c_factor
,
493 u32 dw_dsp_base
, u32 ul_dsp_size
,
494 u32
*sgmt_id
, u32 gpp_base_va
)
496 struct cmm_object
*cmm_mgr_obj
= (struct cmm_object
*)hcmm_mgr
;
497 struct cmm_allocator
*psma
= NULL
;
499 struct cmm_mnode
*new_node
;
502 DBC_REQUIRE(ul_size
> 0);
503 DBC_REQUIRE(sgmt_id
!= NULL
);
504 DBC_REQUIRE(dw_gpp_base_pa
!= 0);
505 DBC_REQUIRE(gpp_base_va
!= 0);
506 DBC_REQUIRE((c_factor
<= CMM_ADDTODSPPA
) &&
507 (c_factor
>= CMM_SUBFROMDSPPA
));
509 dev_dbg(bridge
, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
510 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
511 __func__
, dw_gpp_base_pa
, ul_size
, dsp_addr_offset
,
512 dw_dsp_base
, ul_dsp_size
, gpp_base_va
);
517 /* make sure we have room for another allocator */
518 mutex_lock(&cmm_mgr_obj
->cmm_lock
);
520 slot_seg
= get_slot(cmm_mgr_obj
);
526 /* Check if input ul_size is big enough to alloc at least one block */
527 if (ul_size
< cmm_mgr_obj
->min_block_size
) {
532 /* create, zero, and tag an SM allocator object */
533 psma
= kzalloc(sizeof(struct cmm_allocator
), GFP_KERNEL
);
539 psma
->cmm_mgr
= hcmm_mgr
; /* ref to parent */
540 psma
->shm_base
= dw_gpp_base_pa
; /* SM Base phys */
541 psma
->sm_size
= ul_size
; /* SM segment size in bytes */
542 psma
->vm_base
= gpp_base_va
;
543 psma
->dsp_phys_addr_offset
= dsp_addr_offset
;
544 psma
->c_factor
= c_factor
;
545 psma
->dsp_base
= dw_dsp_base
;
546 psma
->dsp_size
= ul_dsp_size
;
547 if (psma
->vm_base
== 0) {
551 /* return the actual segment identifier */
552 *sgmt_id
= (u32
) slot_seg
+ 1;
554 INIT_LIST_HEAD(&psma
->free_list
);
555 INIT_LIST_HEAD(&psma
->in_use_list
);
557 /* Get a mem node for this hunk-o-memory */
558 new_node
= get_node(cmm_mgr_obj
, dw_gpp_base_pa
,
559 psma
->vm_base
, ul_size
);
560 /* Place node on the SM allocator's free list */
562 list_add_tail(&new_node
->link
, &psma
->free_list
);
568 cmm_mgr_obj
->pa_gppsm_seg_tab
[slot_seg
] = psma
;
571 /* Cleanup allocator */
573 un_register_gppsm_seg(psma
);
574 mutex_unlock(&cmm_mgr_obj
->cmm_lock
);
580 * ======== cmm_un_register_gppsm_seg ========
582 * UnRegister GPP SM segments with the CMM.
584 int cmm_un_register_gppsm_seg(struct cmm_object
*hcmm_mgr
,
587 struct cmm_object
*cmm_mgr_obj
= (struct cmm_object
*)hcmm_mgr
;
589 struct cmm_allocator
*psma
;
590 u32 ul_id
= ul_seg_id
;
592 DBC_REQUIRE(ul_seg_id
> 0);
596 if (ul_seg_id
== CMM_ALLSEGMENTS
)
599 if ((ul_id
<= 0) || (ul_id
> CMM_MAXGPPSEGS
))
603 * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like
604 * the ul_seg_id is not needed here. It must be always 1.
606 while (ul_id
<= CMM_MAXGPPSEGS
) {
607 mutex_lock(&cmm_mgr_obj
->cmm_lock
);
608 /* slot = seg_id-1 */
609 psma
= cmm_mgr_obj
->pa_gppsm_seg_tab
[ul_id
- 1];
611 un_register_gppsm_seg(psma
);
612 /* Set alctr ptr to NULL for future reuse */
613 cmm_mgr_obj
->pa_gppsm_seg_tab
[ul_id
- 1] = NULL
;
614 } else if (ul_seg_id
!= CMM_ALLSEGMENTS
) {
617 mutex_unlock(&cmm_mgr_obj
->cmm_lock
);
618 if (ul_seg_id
!= CMM_ALLSEGMENTS
)
627 * ======== un_register_gppsm_seg ========
629 * UnRegister the SM allocator by freeing all its resources and
630 * nulling cmm mgr table entry.
632 * This routine is always called within cmm lock crit sect.
634 static void un_register_gppsm_seg(struct cmm_allocator
*psma
)
636 struct cmm_mnode
*curr
, *tmp
;
638 DBC_REQUIRE(psma
!= NULL
);
640 /* free nodes on free list */
641 list_for_each_entry_safe(curr
, tmp
, &psma
->free_list
, link
) {
642 list_del(&curr
->link
);
646 /* free nodes on InUse list */
647 list_for_each_entry_safe(curr
, tmp
, &psma
->in_use_list
, link
) {
648 list_del(&curr
->link
);
652 if ((void *)psma
->vm_base
!= NULL
)
653 MEM_UNMAP_LINEAR_ADDRESS((void *)psma
->vm_base
);
655 /* Free allocator itself */
660 * ======== get_slot ========
662 * An available slot # is returned. Returns negative on failure.
664 static s32
get_slot(struct cmm_object
*cmm_mgr_obj
)
666 s32 slot_seg
= -1; /* neg on failure */
667 DBC_REQUIRE(cmm_mgr_obj
!= NULL
);
668 /* get first available slot in cmm mgr SMSegTab[] */
669 for (slot_seg
= 0; slot_seg
< CMM_MAXGPPSEGS
; slot_seg
++) {
670 if (cmm_mgr_obj
->pa_gppsm_seg_tab
[slot_seg
] == NULL
)
674 if (slot_seg
== CMM_MAXGPPSEGS
)
675 slot_seg
= -1; /* failed */
681 * ======== get_node ========
683 * Get a memory node from freelist or create a new one.
685 static struct cmm_mnode
*get_node(struct cmm_object
*cmm_mgr_obj
, u32 dw_pa
,
686 u32 dw_va
, u32 ul_size
)
688 struct cmm_mnode
*pnode
;
690 DBC_REQUIRE(cmm_mgr_obj
!= NULL
);
691 DBC_REQUIRE(dw_pa
!= 0);
692 DBC_REQUIRE(dw_va
!= 0);
693 DBC_REQUIRE(ul_size
!= 0);
695 /* Check cmm mgr's node freelist */
696 if (list_empty(&cmm_mgr_obj
->node_free_list
)) {
697 pnode
= kzalloc(sizeof(struct cmm_mnode
), GFP_KERNEL
);
701 /* surely a valid element */
702 pnode
= list_first_entry(&cmm_mgr_obj
->node_free_list
,
703 struct cmm_mnode
, link
);
704 list_del_init(&pnode
->link
);
709 pnode
->size
= ul_size
;
715 * ======== delete_node ========
717 * Put a memory node on the cmm nodelist for later use.
718 * Doesn't actually delete the node. Heap thrashing friendly.
720 static void delete_node(struct cmm_object
*cmm_mgr_obj
, struct cmm_mnode
*pnode
)
722 DBC_REQUIRE(pnode
!= NULL
);
723 list_add_tail(&pnode
->link
, &cmm_mgr_obj
->node_free_list
);
727 * ====== get_free_block ========
729 * Scan the free block list and return the first block that satisfies
732 static struct cmm_mnode
*get_free_block(struct cmm_allocator
*allocator
,
735 struct cmm_mnode
*node
, *tmp
;
740 list_for_each_entry_safe(node
, tmp
, &allocator
->free_list
, link
) {
741 if (usize
<= node
->size
) {
742 list_del(&node
->link
);
751 * ======== add_to_free_list ========
753 * Coalesce node into the freelist in ascending size order.
755 static void add_to_free_list(struct cmm_allocator
*allocator
,
756 struct cmm_mnode
*node
)
758 struct cmm_mnode
*curr
;
761 pr_err("%s: failed - node is NULL\n", __func__
);
765 list_for_each_entry(curr
, &allocator
->free_list
, link
) {
766 if (NEXT_PA(curr
) == node
->pa
) {
767 curr
->size
+= node
->size
;
768 delete_node(allocator
->cmm_mgr
, node
);
771 if (curr
->pa
== NEXT_PA(node
)) {
774 curr
->size
+= node
->size
;
775 delete_node(allocator
->cmm_mgr
, node
);
779 list_for_each_entry(curr
, &allocator
->free_list
, link
) {
780 if (curr
->size
>= node
->size
) {
781 list_add_tail(&node
->link
, &curr
->link
);
785 list_add_tail(&node
->link
, &allocator
->free_list
);
789 * ======== get_allocator ========
791 * Return the allocator for the given SM Segid.
792 * SegIds: 1,2,3..max.
794 static struct cmm_allocator
*get_allocator(struct cmm_object
*cmm_mgr_obj
,
797 DBC_REQUIRE(cmm_mgr_obj
!= NULL
);
798 DBC_REQUIRE((ul_seg_id
> 0) && (ul_seg_id
<= CMM_MAXGPPSEGS
));
800 return cmm_mgr_obj
->pa_gppsm_seg_tab
[ul_seg_id
- 1];
804 * The CMM_Xlator[xxx] routines below are used by Node and Stream
805 * to perform SM address translation to the client process address space.
806 * A "translator" object is created by a node/stream for each SM seg used.
810 * ======== cmm_xlator_create ========
812 * Create an address translator object.
814 int cmm_xlator_create(struct cmm_xlatorobject
**xlator
,
815 struct cmm_object
*hcmm_mgr
,
816 struct cmm_xlatorattrs
*xlator_attrs
)
818 struct cmm_xlator
*xlator_object
= NULL
;
821 DBC_REQUIRE(refs
> 0);
822 DBC_REQUIRE(xlator
!= NULL
);
823 DBC_REQUIRE(hcmm_mgr
!= NULL
);
826 if (xlator_attrs
== NULL
)
827 xlator_attrs
= &cmm_dfltxlatorattrs
; /* set defaults */
829 xlator_object
= kzalloc(sizeof(struct cmm_xlator
), GFP_KERNEL
);
830 if (xlator_object
!= NULL
) {
831 xlator_object
->cmm_mgr
= hcmm_mgr
; /* ref back to CMM */
833 xlator_object
->seg_id
= xlator_attrs
->seg_id
;
838 *xlator
= (struct cmm_xlatorobject
*)xlator_object
;
844 * ======== cmm_xlator_alloc_buf ========
846 void *cmm_xlator_alloc_buf(struct cmm_xlatorobject
*xlator
, void *va_buf
,
849 struct cmm_xlator
*xlator_obj
= (struct cmm_xlator
*)xlator
;
852 struct cmm_attrs attrs
;
854 DBC_REQUIRE(refs
> 0);
855 DBC_REQUIRE(xlator
!= NULL
);
856 DBC_REQUIRE(xlator_obj
->cmm_mgr
!= NULL
);
857 DBC_REQUIRE(va_buf
!= NULL
);
858 DBC_REQUIRE(pa_size
> 0);
859 DBC_REQUIRE(xlator_obj
->seg_id
> 0);
862 attrs
.seg_id
= xlator_obj
->seg_id
;
863 __raw_writel(0, va_buf
);
866 cmm_calloc_buf(xlator_obj
->cmm_mgr
, pa_size
, &attrs
, NULL
);
868 /* convert to translator(node/strm) process Virtual
870 tmp_va_buff
= cmm_xlator_translate(xlator
,
872 __raw_writel((u32
)tmp_va_buff
, va_buf
);
879 * ======== cmm_xlator_free_buf ========
881 * Free the given SM buffer and descriptor.
882 * Does not free virtual memory.
884 int cmm_xlator_free_buf(struct cmm_xlatorobject
*xlator
, void *buf_va
)
886 struct cmm_xlator
*xlator_obj
= (struct cmm_xlator
*)xlator
;
890 DBC_REQUIRE(refs
> 0);
891 DBC_REQUIRE(buf_va
!= NULL
);
892 DBC_REQUIRE(xlator_obj
->seg_id
> 0);
895 /* convert Va to Pa so we can free it. */
896 buf_pa
= cmm_xlator_translate(xlator
, buf_va
, CMM_VA2PA
);
898 status
= cmm_free_buf(xlator_obj
->cmm_mgr
, buf_pa
,
901 /* Uh oh, this shouldn't happen. Descriptor
903 DBC_ASSERT(false); /* CMM is leaking mem */
911 * ======== cmm_xlator_info ========
913 * Set/Get translator info.
915 int cmm_xlator_info(struct cmm_xlatorobject
*xlator
, u8
** paddr
,
916 u32 ul_size
, u32 segm_id
, bool set_info
)
918 struct cmm_xlator
*xlator_obj
= (struct cmm_xlator
*)xlator
;
921 DBC_REQUIRE(refs
> 0);
922 DBC_REQUIRE(paddr
!= NULL
);
923 DBC_REQUIRE((segm_id
> 0) && (segm_id
<= CMM_MAXGPPSEGS
));
927 /* set translators virtual address range */
928 xlator_obj
->virt_base
= (u32
) *paddr
;
929 xlator_obj
->virt_size
= ul_size
;
930 } else { /* return virt base address */
931 *paddr
= (u8
*) xlator_obj
->virt_base
;
940 * ======== cmm_xlator_translate ========
942 void *cmm_xlator_translate(struct cmm_xlatorobject
*xlator
, void *paddr
,
943 enum cmm_xlatetype xtype
)
945 u32 dw_addr_xlate
= 0;
946 struct cmm_xlator
*xlator_obj
= (struct cmm_xlator
*)xlator
;
947 struct cmm_object
*cmm_mgr_obj
= NULL
;
948 struct cmm_allocator
*allocator
= NULL
;
951 DBC_REQUIRE(refs
> 0);
952 DBC_REQUIRE(paddr
!= NULL
);
953 DBC_REQUIRE((xtype
>= CMM_VA2PA
) && (xtype
<= CMM_DSPPA2PA
));
958 cmm_mgr_obj
= (struct cmm_object
*)xlator_obj
->cmm_mgr
;
959 /* get this translator's default SM allocator */
960 DBC_ASSERT(xlator_obj
->seg_id
> 0);
961 allocator
= cmm_mgr_obj
->pa_gppsm_seg_tab
[xlator_obj
->seg_id
- 1];
965 if ((xtype
== CMM_VA2DSPPA
) || (xtype
== CMM_VA2PA
) ||
966 (xtype
== CMM_PA2VA
)) {
967 if (xtype
== CMM_PA2VA
) {
968 /* Gpp Va = Va Base + offset */
969 dw_offset
= (u8
*) paddr
- (u8
*) (allocator
->shm_base
-
972 dw_addr_xlate
= xlator_obj
->virt_base
+ dw_offset
;
973 /* Check if translated Va base is in range */
974 if ((dw_addr_xlate
< xlator_obj
->virt_base
) ||
976 (xlator_obj
->virt_base
+
977 xlator_obj
->virt_size
))) {
978 dw_addr_xlate
= 0; /* bad address */
981 /* Gpp PA = Gpp Base + offset */
983 (u8
*) paddr
- (u8
*) xlator_obj
->virt_base
;
985 allocator
->shm_base
- allocator
->dsp_size
+
989 dw_addr_xlate
= (u32
) paddr
;
991 /*Now convert address to proper target physical address if needed */
992 if ((xtype
== CMM_VA2DSPPA
) || (xtype
== CMM_PA2DSPPA
)) {
993 /* Got Gpp Pa now, convert to DSP Pa */
995 GPPPA2DSPPA((allocator
->shm_base
- allocator
->dsp_size
),
997 allocator
->dsp_phys_addr_offset
*
998 allocator
->c_factor
);
999 } else if (xtype
== CMM_DSPPA2PA
) {
1000 /* Got DSP Pa, convert to GPP Pa */
1002 DSPPA2GPPPA(allocator
->shm_base
- allocator
->dsp_size
,
1004 allocator
->dsp_phys_addr_offset
*
1005 allocator
->c_factor
);
1008 return (void *)dw_addr_xlate
;