4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge resource allocation module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 #include <linux/types.h>
19 #include <linux/list.h>
21 /* ----------------------------------- Host OS */
22 #include <dspbridge/host_os.h>
24 /* ----------------------------------- DSP/BIOS Bridge */
25 #include <dspbridge/dbdefs.h>
27 /* ----------------------------------- This */
28 #include <dspbridge/drv.h>
29 #include <dspbridge/dev.h>
31 #include <dspbridge/node.h>
32 #include <dspbridge/proc.h>
33 #include <dspbridge/strm.h>
34 #include <dspbridge/nodepriv.h>
35 #include <dspbridge/dspchnl.h>
36 #include <dspbridge/resourcecleanup.h>
38 /* ----------------------------------- Defines, Data Structures, Typedefs */
40 struct list_head dev_list
;
41 struct list_head dev_node_string
;
45 * This is the Device Extension. Named with the Prefix
46 * DRV_ since it is living in this module
49 struct list_head link
;
50 char sz_string
[MAXREGPATHLENGTH
];
53 /* ----------------------------------- Globals */
54 static bool ext_phys_mem_pool_enabled
;
55 struct ext_phys_mem_pool
{
59 u32 next_phys_alloc_ptr
;
61 static struct ext_phys_mem_pool ext_mem_pool
;
63 /* ----------------------------------- Function Prototypes */
64 static int request_bridge_resources(struct cfg_hostres
*res
);
67 /* GPP PROCESS CLEANUP CODE */
69 static int drv_proc_free_node_res(int id
, void *p
, void *data
);
71 /* Allocate and add a node resource element
72 * This function is called from .Node_Allocate. */
73 int drv_insert_node_res_element(void *hnode
, void *node_resource
,
76 struct node_res_object
**node_res_obj
=
77 (struct node_res_object
**)node_resource
;
78 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
82 *node_res_obj
= kzalloc(sizeof(struct node_res_object
), GFP_KERNEL
);
88 (*node_res_obj
)->node
= hnode
;
89 retval
= idr_get_new(ctxt
->node_id
, *node_res_obj
,
90 &(*node_res_obj
)->id
);
91 if (retval
== -EAGAIN
) {
92 if (!idr_pre_get(ctxt
->node_id
, GFP_KERNEL
)) {
93 pr_err("%s: OUT OF MEMORY\n", __func__
);
98 retval
= idr_get_new(ctxt
->node_id
, *node_res_obj
,
99 &(*node_res_obj
)->id
);
102 pr_err("%s: FAILED, IDR is FULL\n", __func__
);
107 kfree(*node_res_obj
);
112 /* Release all Node resources and its context
113 * Actual Node De-Allocation */
114 static int drv_proc_free_node_res(int id
, void *p
, void *data
)
116 struct process_context
*ctxt
= data
;
118 struct node_res_object
*node_res_obj
= p
;
121 if (node_res_obj
->node_allocated
) {
122 node_state
= node_get_state(node_res_obj
->node
);
123 if (node_state
<= NODE_DELETING
) {
124 if ((node_state
== NODE_RUNNING
) ||
125 (node_state
== NODE_PAUSED
) ||
126 (node_state
== NODE_TERMINATING
))
128 (node_res_obj
->node
, &status
);
130 node_delete(node_res_obj
, ctxt
);
137 /* Release all Mapped and Reserved DMM resources */
138 int drv_remove_all_dmm_res_elements(void *process_ctxt
)
140 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
142 struct dmm_map_object
*temp_map
, *map_obj
;
143 struct dmm_rsv_object
*temp_rsv
, *rsv_obj
;
145 /* Free DMM mapped memory resources */
146 list_for_each_entry_safe(map_obj
, temp_map
, &ctxt
->dmm_map_list
, link
) {
147 status
= proc_un_map(ctxt
->processor
,
148 (void *)map_obj
->dsp_addr
, ctxt
);
150 pr_err("%s: proc_un_map failed!"
151 " status = 0x%xn", __func__
, status
);
154 /* Free DMM reserved memory resources */
155 list_for_each_entry_safe(rsv_obj
, temp_rsv
, &ctxt
->dmm_rsv_list
, link
) {
156 status
= proc_un_reserve_memory(ctxt
->processor
, (void *)
157 rsv_obj
->dsp_reserved_addr
,
160 pr_err("%s: proc_un_reserve_memory failed!"
161 " status = 0x%xn", __func__
, status
);
166 /* Update Node allocation status */
167 void drv_proc_node_update_status(void *node_resource
, s32 status
)
169 struct node_res_object
*node_res_obj
=
170 (struct node_res_object
*)node_resource
;
171 node_res_obj
->node_allocated
= status
;
174 /* Update Node Heap status */
175 void drv_proc_node_update_heap_status(void *node_resource
, s32 status
)
177 struct node_res_object
*node_res_obj
=
178 (struct node_res_object
*)node_resource
;
179 node_res_obj
->heap_allocated
= status
;
182 /* Release all Node resources and its context
183 * This is called from .bridge_release.
185 int drv_remove_all_node_res_elements(void *process_ctxt
)
187 struct process_context
*ctxt
= process_ctxt
;
189 idr_for_each(ctxt
->node_id
, drv_proc_free_node_res
, ctxt
);
190 idr_destroy(ctxt
->node_id
);
195 /* Allocate the STRM resource element
196 * This is called after the actual resource is allocated
198 int drv_proc_insert_strm_res_element(void *stream_obj
,
199 void *strm_res
, void *process_ctxt
)
201 struct strm_res_object
**pstrm_res
=
202 (struct strm_res_object
**)strm_res
;
203 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
207 *pstrm_res
= kzalloc(sizeof(struct strm_res_object
), GFP_KERNEL
);
208 if (*pstrm_res
== NULL
) {
213 (*pstrm_res
)->stream
= stream_obj
;
214 retval
= idr_get_new(ctxt
->stream_id
, *pstrm_res
,
216 if (retval
== -EAGAIN
) {
217 if (!idr_pre_get(ctxt
->stream_id
, GFP_KERNEL
)) {
218 pr_err("%s: OUT OF MEMORY\n", __func__
);
223 retval
= idr_get_new(ctxt
->stream_id
, *pstrm_res
,
227 pr_err("%s: FAILED, IDR is FULL\n", __func__
);
235 static int drv_proc_free_strm_res(int id
, void *p
, void *process_ctxt
)
237 struct process_context
*ctxt
= process_ctxt
;
238 struct strm_res_object
*strm_res
= p
;
239 struct stream_info strm_info
;
240 struct dsp_streaminfo user
;
241 u8
**ap_buffer
= NULL
;
247 if (strm_res
->num_bufs
) {
248 ap_buffer
= kmalloc((strm_res
->num_bufs
*
249 sizeof(u8
*)), GFP_KERNEL
);
251 strm_free_buffer(strm_res
,
258 strm_info
.user_strm
= &user
;
259 user
.number_bufs_in_stream
= 0;
260 strm_get_info(strm_res
->stream
, &strm_info
, sizeof(strm_info
));
261 while (user
.number_bufs_in_stream
--)
262 strm_reclaim(strm_res
->stream
, &buf_ptr
, &ul_bytes
,
263 (u32
*) &ul_buf_size
, &dw_arg
);
264 strm_close(strm_res
, ctxt
);
268 /* Release all Stream resources and its context
269 * This is called from .bridge_release.
271 int drv_remove_all_strm_res_elements(void *process_ctxt
)
273 struct process_context
*ctxt
= process_ctxt
;
275 idr_for_each(ctxt
->stream_id
, drv_proc_free_strm_res
, ctxt
);
276 idr_destroy(ctxt
->stream_id
);
281 /* Updating the stream resource element */
282 int drv_proc_update_strm_res(u32 num_bufs
, void *strm_resources
)
285 struct strm_res_object
**strm_res
=
286 (struct strm_res_object
**)strm_resources
;
288 (*strm_res
)->num_bufs
= num_bufs
;
292 /* GPP PROCESS CLEANUP CODE END */
295 * ======== = drv_create ======== =
297 * DRV Object gets created only once during Driver Loading.
299 int drv_create(struct drv_object
**drv_obj
)
302 struct drv_object
*pdrv_object
= NULL
;
303 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
305 pdrv_object
= kzalloc(sizeof(struct drv_object
), GFP_KERNEL
);
307 /* Create and Initialize List of device objects */
308 INIT_LIST_HEAD(&pdrv_object
->dev_list
);
309 INIT_LIST_HEAD(&pdrv_object
->dev_node_string
);
313 /* Store the DRV Object in the driver data */
316 drv_datap
->drv_object
= (void *)pdrv_object
;
319 pr_err("%s: Failed to store DRV object\n", __func__
);
324 *drv_obj
= pdrv_object
;
326 /* Free the DRV Object */
334 * ======== = drv_destroy ======== =
336 * Invoked during bridge de-initialization
338 int drv_destroy(struct drv_object
*driver_obj
)
341 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
342 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
345 /* Update the DRV Object in the driver data */
347 drv_datap
->drv_object
= NULL
;
350 pr_err("%s: Failed to store DRV object\n", __func__
);
357 * ======== drv_get_dev_object ========
359 * Given a index, returns a handle to DevObject from the list.
361 int drv_get_dev_object(u32 index
, struct drv_object
*hdrv_obj
,
362 struct dev_object
**device_obj
)
365 struct dev_object
*dev_obj
;
368 dev_obj
= (struct dev_object
*)drv_get_first_dev_object();
369 for (i
= 0; i
< index
; i
++) {
371 (struct dev_object
*)drv_get_next_dev_object((u32
) dev_obj
);
374 *device_obj
= (struct dev_object
*)dev_obj
;
384 * ======== drv_get_first_dev_object ========
386 * Retrieve the first Device Object handle from an internal linked list of
387 * of DEV_OBJECTs maintained by DRV.
389 u32
drv_get_first_dev_object(void)
391 u32 dw_dev_object
= 0;
392 struct drv_object
*pdrv_obj
;
393 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
395 if (drv_datap
&& drv_datap
->drv_object
) {
396 pdrv_obj
= drv_datap
->drv_object
;
397 if (!list_empty(&pdrv_obj
->dev_list
))
398 dw_dev_object
= (u32
) pdrv_obj
->dev_list
.next
;
400 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
403 return dw_dev_object
;
407 * ======== DRV_GetFirstDevNodeString ========
409 * Retrieve the first Device Extension from an internal linked list of
410 * of Pointer to dev_node Strings maintained by DRV.
412 u32
drv_get_first_dev_extension(void)
414 u32 dw_dev_extension
= 0;
415 struct drv_object
*pdrv_obj
;
416 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
418 if (drv_datap
&& drv_datap
->drv_object
) {
419 pdrv_obj
= drv_datap
->drv_object
;
420 if (!list_empty(&pdrv_obj
->dev_node_string
)) {
422 (u32
) pdrv_obj
->dev_node_string
.next
;
425 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
428 return dw_dev_extension
;
432 * ======== drv_get_next_dev_object ========
434 * Retrieve the next Device Object handle from an internal linked list of
435 * of DEV_OBJECTs maintained by DRV, after having previously called
436 * drv_get_first_dev_object() and zero or more DRV_GetNext.
438 u32
drv_get_next_dev_object(u32 hdev_obj
)
440 u32 dw_next_dev_object
= 0;
441 struct drv_object
*pdrv_obj
;
442 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
443 struct list_head
*curr
;
445 if (drv_datap
&& drv_datap
->drv_object
) {
446 pdrv_obj
= drv_datap
->drv_object
;
447 if (!list_empty(&pdrv_obj
->dev_list
)) {
448 curr
= (struct list_head
*)hdev_obj
;
449 if (list_is_last(curr
, &pdrv_obj
->dev_list
))
451 dw_next_dev_object
= (u32
) curr
->next
;
454 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
457 return dw_next_dev_object
;
461 * ======== drv_get_next_dev_extension ========
463 * Retrieve the next Device Extension from an internal linked list of
464 * of pointer to DevNodeString maintained by DRV, after having previously
465 * called drv_get_first_dev_extension() and zero or more
466 * drv_get_next_dev_extension().
468 u32
drv_get_next_dev_extension(u32 dev_extension
)
470 u32 dw_dev_extension
= 0;
471 struct drv_object
*pdrv_obj
;
472 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
473 struct list_head
*curr
;
475 if (drv_datap
&& drv_datap
->drv_object
) {
476 pdrv_obj
= drv_datap
->drv_object
;
477 if (!list_empty(&pdrv_obj
->dev_node_string
)) {
478 curr
= (struct list_head
*)dev_extension
;
479 if (list_is_last(curr
, &pdrv_obj
->dev_node_string
))
481 dw_dev_extension
= (u32
) curr
->next
;
484 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
487 return dw_dev_extension
;
491 * ======== drv_insert_dev_object ========
493 * Insert a DevObject into the list of Manager object.
495 int drv_insert_dev_object(struct drv_object
*driver_obj
,
496 struct dev_object
*hdev_obj
)
498 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
500 list_add_tail((struct list_head
*)hdev_obj
, &pdrv_object
->dev_list
);
506 * ======== drv_remove_dev_object ========
508 * Search for and remove a DeviceObject from the given list of DRV
511 int drv_remove_dev_object(struct drv_object
*driver_obj
,
512 struct dev_object
*hdev_obj
)
515 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
516 struct list_head
*cur_elem
;
518 /* Search list for p_proc_object: */
519 list_for_each(cur_elem
, &pdrv_object
->dev_list
) {
520 /* If found, remove it. */
521 if ((struct dev_object
*)cur_elem
== hdev_obj
) {
532 * ======== drv_request_resources ========
534 * Requests resources from the OS.
536 int drv_request_resources(u32 dw_context
, u32
*dev_node_strg
)
539 struct drv_object
*pdrv_object
;
540 struct drv_ext
*pszdev_node
;
541 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
544 * Allocate memory to hold the string. This will live until
545 * it is freed in the Release resources. Update the driver object
549 if (!drv_datap
|| !drv_datap
->drv_object
)
552 pdrv_object
= drv_datap
->drv_object
;
555 pszdev_node
= kzalloc(sizeof(struct drv_ext
), GFP_KERNEL
);
557 strncpy(pszdev_node
->sz_string
,
558 (char *)dw_context
, MAXREGPATHLENGTH
- 1);
559 pszdev_node
->sz_string
[MAXREGPATHLENGTH
- 1] = '\0';
560 /* Update the Driver Object List */
561 *dev_node_strg
= (u32
) pszdev_node
->sz_string
;
562 list_add_tail(&pszdev_node
->link
,
563 &pdrv_object
->dev_node_string
);
569 dev_dbg(bridge
, "%s: Failed to get Driver Object from Registry",
578 * ======== drv_release_resources ========
580 * Releases resources from the OS.
582 int drv_release_resources(u32 dw_context
, struct drv_object
*hdrv_obj
)
585 struct drv_ext
*pszdev_node
;
588 * Irrespective of the status go ahead and clean it
589 * The following will over write the status.
591 for (pszdev_node
= (struct drv_ext
*)drv_get_first_dev_extension();
592 pszdev_node
!= NULL
; pszdev_node
= (struct drv_ext
*)
593 drv_get_next_dev_extension((u32
) pszdev_node
)) {
594 if ((u32
) pszdev_node
== dw_context
) {
596 /* Delete from the Driver object list */
597 list_del(&pszdev_node
->link
);
606 * ======== request_bridge_resources ========
608 * Reserves shared memory for bridge.
610 static int request_bridge_resources(struct cfg_hostres
*res
)
612 struct cfg_hostres
*host_res
= res
;
614 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
615 host_res
->num_mem_windows
= 2;
617 /* First window is for DSP internal memory */
618 dev_dbg(bridge
, "mem_base[0] 0x%x\n", host_res
->mem_base
[0]);
619 dev_dbg(bridge
, "mem_base[3] 0x%x\n", host_res
->mem_base
[3]);
620 dev_dbg(bridge
, "dmmu_base %p\n", host_res
->dmmu_base
);
622 /* for 24xx base port is not mapping the mamory for DSP
623 * internal memory TODO Do a ioremap here */
624 /* Second window is for DSP external memory shared with MPU */
626 /* These are hard-coded values */
627 host_res
->birq_registers
= 0;
628 host_res
->birq_attrib
= 0;
629 host_res
->offset_for_monitor
= 0;
630 host_res
->chnl_offset
= 0;
631 /* CHNL_MAXCHANNELS */
632 host_res
->num_chnls
= CHNL_MAXCHANNELS
;
633 host_res
->chnl_buf_size
= 0x400;
639 * ======== drv_request_bridge_res_dsp ========
641 * Reserves shared memory for bridge.
643 int drv_request_bridge_res_dsp(void **phost_resources
)
646 struct cfg_hostres
*host_res
;
650 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
652 dw_buff_size
= sizeof(struct cfg_hostres
);
654 host_res
= kzalloc(dw_buff_size
, GFP_KERNEL
);
656 if (host_res
!= NULL
) {
657 request_bridge_resources(host_res
);
658 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
659 host_res
->num_mem_windows
= 4;
661 host_res
->mem_base
[0] = 0;
662 host_res
->mem_base
[2] = (u32
) ioremap(OMAP_DSP_MEM1_BASE
,
664 host_res
->mem_base
[3] = (u32
) ioremap(OMAP_DSP_MEM2_BASE
,
666 host_res
->mem_base
[4] = (u32
) ioremap(OMAP_DSP_MEM3_BASE
,
668 host_res
->per_base
= ioremap(OMAP_PER_CM_BASE
,
670 host_res
->per_pm_base
= (u32
) ioremap(OMAP_PER_PRM_BASE
,
672 host_res
->core_pm_base
= (u32
) ioremap(OMAP_CORE_PRM_BASE
,
674 host_res
->dmmu_base
= ioremap(OMAP_DMMU_BASE
,
677 dev_dbg(bridge
, "mem_base[0] 0x%x\n",
678 host_res
->mem_base
[0]);
679 dev_dbg(bridge
, "mem_base[1] 0x%x\n",
680 host_res
->mem_base
[1]);
681 dev_dbg(bridge
, "mem_base[2] 0x%x\n",
682 host_res
->mem_base
[2]);
683 dev_dbg(bridge
, "mem_base[3] 0x%x\n",
684 host_res
->mem_base
[3]);
685 dev_dbg(bridge
, "mem_base[4] 0x%x\n",
686 host_res
->mem_base
[4]);
687 dev_dbg(bridge
, "dmmu_base %p\n", host_res
->dmmu_base
);
689 shm_size
= drv_datap
->shm_size
;
690 if (shm_size
>= 0x10000) {
691 /* Allocate Physically contiguous,
692 * non-cacheable memory */
693 host_res
->mem_base
[1] =
694 (u32
) mem_alloc_phys_mem(shm_size
, 0x100000,
696 if (host_res
->mem_base
[1] == 0) {
698 pr_err("shm reservation Failed\n");
700 host_res
->mem_length
[1] = shm_size
;
701 host_res
->mem_phys
[1] = dma_addr
;
703 dev_dbg(bridge
, "%s: Bridge shm address 0x%x "
704 "dma_addr %x size %x\n", __func__
,
705 host_res
->mem_base
[1],
710 /* These are hard-coded values */
711 host_res
->birq_registers
= 0;
712 host_res
->birq_attrib
= 0;
713 host_res
->offset_for_monitor
= 0;
714 host_res
->chnl_offset
= 0;
715 /* CHNL_MAXCHANNELS */
716 host_res
->num_chnls
= CHNL_MAXCHANNELS
;
717 host_res
->chnl_buf_size
= 0x400;
718 dw_buff_size
= sizeof(struct cfg_hostres
);
720 *phost_resources
= host_res
;
726 void mem_ext_phys_pool_init(u32 pool_phys_base
, u32 pool_size
)
730 /* get the virtual address for the physical memory pool passed */
731 pool_virt_base
= (u32
) ioremap(pool_phys_base
, pool_size
);
733 if ((void **)pool_virt_base
== NULL
) {
734 pr_err("%s: external physical memory map failed\n", __func__
);
735 ext_phys_mem_pool_enabled
= false;
737 ext_mem_pool
.phys_mem_base
= pool_phys_base
;
738 ext_mem_pool
.phys_mem_size
= pool_size
;
739 ext_mem_pool
.virt_mem_base
= pool_virt_base
;
740 ext_mem_pool
.next_phys_alloc_ptr
= pool_phys_base
;
741 ext_phys_mem_pool_enabled
= true;
745 void mem_ext_phys_pool_release(void)
747 if (ext_phys_mem_pool_enabled
) {
748 iounmap((void *)(ext_mem_pool
.virt_mem_base
));
749 ext_phys_mem_pool_enabled
= false;
754 * ======== mem_ext_phys_mem_alloc ========
756 * Allocate physically contiguous, uncached memory from external memory pool
759 static void *mem_ext_phys_mem_alloc(u32 bytes
, u32 align
, u32
* phys_addr
)
768 if (bytes
> ((ext_mem_pool
.phys_mem_base
+ ext_mem_pool
.phys_mem_size
)
769 - ext_mem_pool
.next_phys_alloc_ptr
)) {
773 offset
= (ext_mem_pool
.next_phys_alloc_ptr
& (align
- 1));
775 new_alloc_ptr
= ext_mem_pool
.next_phys_alloc_ptr
;
777 new_alloc_ptr
= (ext_mem_pool
.next_phys_alloc_ptr
) +
779 if ((new_alloc_ptr
+ bytes
) <=
780 (ext_mem_pool
.phys_mem_base
+ ext_mem_pool
.phys_mem_size
)) {
781 /* we can allocate */
782 *phys_addr
= new_alloc_ptr
;
783 ext_mem_pool
.next_phys_alloc_ptr
=
784 new_alloc_ptr
+ bytes
;
786 ext_mem_pool
.virt_mem_base
+ (new_alloc_ptr
-
789 return (void *)virt_addr
;
798 * ======== mem_alloc_phys_mem ========
800 * Allocate physically contiguous, uncached memory
802 void *mem_alloc_phys_mem(u32 byte_size
, u32 align_mask
,
803 u32
*physical_address
)
809 if (ext_phys_mem_pool_enabled
) {
810 va_mem
= mem_ext_phys_mem_alloc(byte_size
, align_mask
,
813 va_mem
= dma_alloc_coherent(NULL
, byte_size
, &pa_mem
,
816 *physical_address
= 0;
818 *physical_address
= pa_mem
;
824 * ======== mem_free_phys_mem ========
826 * Free the given block of physically contiguous memory.
828 void mem_free_phys_mem(void *virtual_address
, u32 physical_address
,
831 if (!ext_phys_mem_pool_enabled
)
832 dma_free_coherent(NULL
, byte_size
, virtual_address
,