4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge resource allocation module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 #include <linux/types.h>
19 #include <linux/list.h>
21 /* ----------------------------------- Host OS */
22 #include <dspbridge/host_os.h>
24 /* ----------------------------------- DSP/BIOS Bridge */
25 #include <dspbridge/dbdefs.h>
27 /* ----------------------------------- Trace & Debug */
28 #include <dspbridge/dbc.h>
30 /* ----------------------------------- This */
31 #include <dspbridge/drv.h>
32 #include <dspbridge/dev.h>
34 #include <dspbridge/node.h>
35 #include <dspbridge/proc.h>
36 #include <dspbridge/strm.h>
37 #include <dspbridge/nodepriv.h>
38 #include <dspbridge/dspchnl.h>
39 #include <dspbridge/resourcecleanup.h>
41 /* ----------------------------------- Defines, Data Structures, Typedefs */
43 struct list_head dev_list
;
44 struct list_head dev_node_string
;
48 * This is the Device Extension. Named with the Prefix
49 * DRV_ since it is living in this module
52 struct list_head link
;
53 char sz_string
[MAXREGPATHLENGTH
];
56 /* ----------------------------------- Globals */
58 static bool ext_phys_mem_pool_enabled
;
59 struct ext_phys_mem_pool
{
63 u32 next_phys_alloc_ptr
;
65 static struct ext_phys_mem_pool ext_mem_pool
;
67 /* ----------------------------------- Function Prototypes */
68 static int request_bridge_resources(struct cfg_hostres
*res
);
71 /* GPP PROCESS CLEANUP CODE */
73 static int drv_proc_free_node_res(int id
, void *p
, void *data
);
75 /* Allocate and add a node resource element
76 * This function is called from .Node_Allocate. */
77 int drv_insert_node_res_element(void *hnode
, void *node_resource
,
80 struct node_res_object
**node_res_obj
=
81 (struct node_res_object
**)node_resource
;
82 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
86 *node_res_obj
= kzalloc(sizeof(struct node_res_object
), GFP_KERNEL
);
92 (*node_res_obj
)->node
= hnode
;
93 retval
= idr_get_new(ctxt
->node_id
, *node_res_obj
,
94 &(*node_res_obj
)->id
);
95 if (retval
== -EAGAIN
) {
96 if (!idr_pre_get(ctxt
->node_id
, GFP_KERNEL
)) {
97 pr_err("%s: OUT OF MEMORY\n", __func__
);
102 retval
= idr_get_new(ctxt
->node_id
, *node_res_obj
,
103 &(*node_res_obj
)->id
);
106 pr_err("%s: FAILED, IDR is FULL\n", __func__
);
111 kfree(*node_res_obj
);
116 /* Release all Node resources and its context
117 * Actual Node De-Allocation */
118 static int drv_proc_free_node_res(int id
, void *p
, void *data
)
120 struct process_context
*ctxt
= data
;
122 struct node_res_object
*node_res_obj
= p
;
125 if (node_res_obj
->node_allocated
) {
126 node_state
= node_get_state(node_res_obj
->node
);
127 if (node_state
<= NODE_DELETING
) {
128 if ((node_state
== NODE_RUNNING
) ||
129 (node_state
== NODE_PAUSED
) ||
130 (node_state
== NODE_TERMINATING
))
132 (node_res_obj
->node
, &status
);
134 node_delete(node_res_obj
, ctxt
);
141 /* Release all Mapped and Reserved DMM resources */
142 int drv_remove_all_dmm_res_elements(void *process_ctxt
)
144 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
146 struct dmm_map_object
*temp_map
, *map_obj
;
147 struct dmm_rsv_object
*temp_rsv
, *rsv_obj
;
149 /* Free DMM mapped memory resources */
150 list_for_each_entry_safe(map_obj
, temp_map
, &ctxt
->dmm_map_list
, link
) {
151 status
= proc_un_map(ctxt
->processor
,
152 (void *)map_obj
->dsp_addr
, ctxt
);
154 pr_err("%s: proc_un_map failed!"
155 " status = 0x%xn", __func__
, status
);
158 /* Free DMM reserved memory resources */
159 list_for_each_entry_safe(rsv_obj
, temp_rsv
, &ctxt
->dmm_rsv_list
, link
) {
160 status
= proc_un_reserve_memory(ctxt
->processor
, (void *)
161 rsv_obj
->dsp_reserved_addr
,
164 pr_err("%s: proc_un_reserve_memory failed!"
165 " status = 0x%xn", __func__
, status
);
170 /* Update Node allocation status */
171 void drv_proc_node_update_status(void *node_resource
, s32 status
)
173 struct node_res_object
*node_res_obj
=
174 (struct node_res_object
*)node_resource
;
175 DBC_ASSERT(node_resource
!= NULL
);
176 node_res_obj
->node_allocated
= status
;
179 /* Update Node Heap status */
180 void drv_proc_node_update_heap_status(void *node_resource
, s32 status
)
182 struct node_res_object
*node_res_obj
=
183 (struct node_res_object
*)node_resource
;
184 DBC_ASSERT(node_resource
!= NULL
);
185 node_res_obj
->heap_allocated
= status
;
188 /* Release all Node resources and its context
189 * This is called from .bridge_release.
191 int drv_remove_all_node_res_elements(void *process_ctxt
)
193 struct process_context
*ctxt
= process_ctxt
;
195 idr_for_each(ctxt
->node_id
, drv_proc_free_node_res
, ctxt
);
196 idr_destroy(ctxt
->node_id
);
201 /* Allocate the STRM resource element
202 * This is called after the actual resource is allocated
204 int drv_proc_insert_strm_res_element(void *stream_obj
,
205 void *strm_res
, void *process_ctxt
)
207 struct strm_res_object
**pstrm_res
=
208 (struct strm_res_object
**)strm_res
;
209 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
213 *pstrm_res
= kzalloc(sizeof(struct strm_res_object
), GFP_KERNEL
);
214 if (*pstrm_res
== NULL
) {
219 (*pstrm_res
)->stream
= stream_obj
;
220 retval
= idr_get_new(ctxt
->stream_id
, *pstrm_res
,
222 if (retval
== -EAGAIN
) {
223 if (!idr_pre_get(ctxt
->stream_id
, GFP_KERNEL
)) {
224 pr_err("%s: OUT OF MEMORY\n", __func__
);
229 retval
= idr_get_new(ctxt
->stream_id
, *pstrm_res
,
233 pr_err("%s: FAILED, IDR is FULL\n", __func__
);
241 static int drv_proc_free_strm_res(int id
, void *p
, void *process_ctxt
)
243 struct process_context
*ctxt
= process_ctxt
;
244 struct strm_res_object
*strm_res
= p
;
245 struct stream_info strm_info
;
246 struct dsp_streaminfo user
;
247 u8
**ap_buffer
= NULL
;
253 if (strm_res
->num_bufs
) {
254 ap_buffer
= kmalloc((strm_res
->num_bufs
*
255 sizeof(u8
*)), GFP_KERNEL
);
257 strm_free_buffer(strm_res
,
264 strm_info
.user_strm
= &user
;
265 user
.number_bufs_in_stream
= 0;
266 strm_get_info(strm_res
->stream
, &strm_info
, sizeof(strm_info
));
267 while (user
.number_bufs_in_stream
--)
268 strm_reclaim(strm_res
->stream
, &buf_ptr
, &ul_bytes
,
269 (u32
*) &ul_buf_size
, &dw_arg
);
270 strm_close(strm_res
, ctxt
);
274 /* Release all Stream resources and its context
275 * This is called from .bridge_release.
277 int drv_remove_all_strm_res_elements(void *process_ctxt
)
279 struct process_context
*ctxt
= process_ctxt
;
281 idr_for_each(ctxt
->stream_id
, drv_proc_free_strm_res
, ctxt
);
282 idr_destroy(ctxt
->stream_id
);
287 /* Updating the stream resource element */
288 int drv_proc_update_strm_res(u32 num_bufs
, void *strm_resources
)
291 struct strm_res_object
**strm_res
=
292 (struct strm_res_object
**)strm_resources
;
294 (*strm_res
)->num_bufs
= num_bufs
;
298 /* GPP PROCESS CLEANUP CODE END */
301 * ======== = drv_create ======== =
303 * DRV Object gets created only once during Driver Loading.
305 int drv_create(struct drv_object
**drv_obj
)
308 struct drv_object
*pdrv_object
= NULL
;
309 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
311 DBC_REQUIRE(drv_obj
!= NULL
);
312 DBC_REQUIRE(refs
> 0);
314 pdrv_object
= kzalloc(sizeof(struct drv_object
), GFP_KERNEL
);
316 /* Create and Initialize List of device objects */
317 INIT_LIST_HEAD(&pdrv_object
->dev_list
);
318 INIT_LIST_HEAD(&pdrv_object
->dev_node_string
);
322 /* Store the DRV Object in the driver data */
325 drv_datap
->drv_object
= (void *)pdrv_object
;
328 pr_err("%s: Failed to store DRV object\n", __func__
);
333 *drv_obj
= pdrv_object
;
335 /* Free the DRV Object */
339 DBC_ENSURE(status
|| pdrv_object
);
344 * ======== drv_exit ========
346 * Discontinue usage of the DRV module.
350 DBC_REQUIRE(refs
> 0);
354 DBC_ENSURE(refs
>= 0);
358 * ======== = drv_destroy ======== =
360 * Invoked during bridge de-initialization
362 int drv_destroy(struct drv_object
*driver_obj
)
365 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
366 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
368 DBC_REQUIRE(refs
> 0);
369 DBC_REQUIRE(pdrv_object
);
372 /* Update the DRV Object in the driver data */
374 drv_datap
->drv_object
= NULL
;
377 pr_err("%s: Failed to store DRV object\n", __func__
);
384 * ======== drv_get_dev_object ========
386 * Given a index, returns a handle to DevObject from the list.
388 int drv_get_dev_object(u32 index
, struct drv_object
*hdrv_obj
,
389 struct dev_object
**device_obj
)
392 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
393 /* used only for Assertions and debug messages */
394 struct drv_object
*pdrv_obj
= (struct drv_object
*)hdrv_obj
;
396 struct dev_object
*dev_obj
;
398 DBC_REQUIRE(pdrv_obj
);
399 DBC_REQUIRE(device_obj
!= NULL
);
400 DBC_REQUIRE(index
>= 0);
401 DBC_REQUIRE(refs
> 0);
402 DBC_ASSERT(!(list_empty(&pdrv_obj
->dev_list
)));
404 dev_obj
= (struct dev_object
*)drv_get_first_dev_object();
405 for (i
= 0; i
< index
; i
++) {
407 (struct dev_object
*)drv_get_next_dev_object((u32
) dev_obj
);
410 *device_obj
= (struct dev_object
*)dev_obj
;
420 * ======== drv_get_first_dev_object ========
422 * Retrieve the first Device Object handle from an internal linked list of
423 * of DEV_OBJECTs maintained by DRV.
425 u32
drv_get_first_dev_object(void)
427 u32 dw_dev_object
= 0;
428 struct drv_object
*pdrv_obj
;
429 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
431 if (drv_datap
&& drv_datap
->drv_object
) {
432 pdrv_obj
= drv_datap
->drv_object
;
433 if (!list_empty(&pdrv_obj
->dev_list
))
434 dw_dev_object
= (u32
) pdrv_obj
->dev_list
.next
;
436 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
439 return dw_dev_object
;
443 * ======== DRV_GetFirstDevNodeString ========
445 * Retrieve the first Device Extension from an internal linked list of
446 * of Pointer to dev_node Strings maintained by DRV.
448 u32
drv_get_first_dev_extension(void)
450 u32 dw_dev_extension
= 0;
451 struct drv_object
*pdrv_obj
;
452 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
454 if (drv_datap
&& drv_datap
->drv_object
) {
455 pdrv_obj
= drv_datap
->drv_object
;
456 if (!list_empty(&pdrv_obj
->dev_node_string
)) {
458 (u32
) pdrv_obj
->dev_node_string
.next
;
461 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
464 return dw_dev_extension
;
468 * ======== drv_get_next_dev_object ========
470 * Retrieve the next Device Object handle from an internal linked list of
471 * of DEV_OBJECTs maintained by DRV, after having previously called
472 * drv_get_first_dev_object() and zero or more DRV_GetNext.
474 u32
drv_get_next_dev_object(u32 hdev_obj
)
476 u32 dw_next_dev_object
= 0;
477 struct drv_object
*pdrv_obj
;
478 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
479 struct list_head
*curr
;
481 if (drv_datap
&& drv_datap
->drv_object
) {
482 pdrv_obj
= drv_datap
->drv_object
;
483 if (!list_empty(&pdrv_obj
->dev_list
)) {
484 curr
= (struct list_head
*)hdev_obj
;
485 if (list_is_last(curr
, &pdrv_obj
->dev_list
))
487 dw_next_dev_object
= (u32
) curr
->next
;
490 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
493 return dw_next_dev_object
;
497 * ======== drv_get_next_dev_extension ========
499 * Retrieve the next Device Extension from an internal linked list of
500 * of pointer to DevNodeString maintained by DRV, after having previously
501 * called drv_get_first_dev_extension() and zero or more
502 * drv_get_next_dev_extension().
504 u32
drv_get_next_dev_extension(u32 dev_extension
)
506 u32 dw_dev_extension
= 0;
507 struct drv_object
*pdrv_obj
;
508 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
509 struct list_head
*curr
;
511 if (drv_datap
&& drv_datap
->drv_object
) {
512 pdrv_obj
= drv_datap
->drv_object
;
513 if (!list_empty(&pdrv_obj
->dev_node_string
)) {
514 curr
= (struct list_head
*)dev_extension
;
515 if (list_is_last(curr
, &pdrv_obj
->dev_node_string
))
517 dw_dev_extension
= (u32
) curr
->next
;
520 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
523 return dw_dev_extension
;
527 * ======== drv_init ========
529 * Initialize DRV module private state.
533 s32 ret
= 1; /* function return value */
535 DBC_REQUIRE(refs
>= 0);
540 DBC_ENSURE((ret
&& (refs
> 0)) || (!ret
&& (refs
>= 0)));
546 * ======== drv_insert_dev_object ========
548 * Insert a DevObject into the list of Manager object.
550 int drv_insert_dev_object(struct drv_object
*driver_obj
,
551 struct dev_object
*hdev_obj
)
553 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
555 DBC_REQUIRE(refs
> 0);
556 DBC_REQUIRE(hdev_obj
!= NULL
);
557 DBC_REQUIRE(pdrv_object
);
559 list_add_tail((struct list_head
*)hdev_obj
, &pdrv_object
->dev_list
);
565 * ======== drv_remove_dev_object ========
567 * Search for and remove a DeviceObject from the given list of DRV
570 int drv_remove_dev_object(struct drv_object
*driver_obj
,
571 struct dev_object
*hdev_obj
)
574 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
575 struct list_head
*cur_elem
;
577 DBC_REQUIRE(refs
> 0);
578 DBC_REQUIRE(pdrv_object
);
579 DBC_REQUIRE(hdev_obj
!= NULL
);
581 DBC_REQUIRE(!list_empty(&pdrv_object
->dev_list
));
583 /* Search list for p_proc_object: */
584 list_for_each(cur_elem
, &pdrv_object
->dev_list
) {
585 /* If found, remove it. */
586 if ((struct dev_object
*)cur_elem
== hdev_obj
) {
597 * ======== drv_request_resources ========
599 * Requests resources from the OS.
601 int drv_request_resources(u32 dw_context
, u32
*dev_node_strg
)
604 struct drv_object
*pdrv_object
;
605 struct drv_ext
*pszdev_node
;
606 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
608 DBC_REQUIRE(dw_context
!= 0);
609 DBC_REQUIRE(dev_node_strg
!= NULL
);
612 * Allocate memory to hold the string. This will live until
613 * it is freed in the Release resources. Update the driver object
617 if (!drv_datap
|| !drv_datap
->drv_object
)
620 pdrv_object
= drv_datap
->drv_object
;
623 pszdev_node
= kzalloc(sizeof(struct drv_ext
), GFP_KERNEL
);
625 strncpy(pszdev_node
->sz_string
,
626 (char *)dw_context
, MAXREGPATHLENGTH
- 1);
627 pszdev_node
->sz_string
[MAXREGPATHLENGTH
- 1] = '\0';
628 /* Update the Driver Object List */
629 *dev_node_strg
= (u32
) pszdev_node
->sz_string
;
630 list_add_tail(&pszdev_node
->link
,
631 &pdrv_object
->dev_node_string
);
637 dev_dbg(bridge
, "%s: Failed to get Driver Object from Registry",
642 DBC_ENSURE((!status
&& dev_node_strg
!= NULL
&&
643 !list_empty(&pdrv_object
->dev_node_string
)) ||
644 (status
&& *dev_node_strg
== 0));
650 * ======== drv_release_resources ========
652 * Releases resources from the OS.
654 int drv_release_resources(u32 dw_context
, struct drv_object
*hdrv_obj
)
657 struct drv_ext
*pszdev_node
;
660 * Irrespective of the status go ahead and clean it
661 * The following will over write the status.
663 for (pszdev_node
= (struct drv_ext
*)drv_get_first_dev_extension();
664 pszdev_node
!= NULL
; pszdev_node
= (struct drv_ext
*)
665 drv_get_next_dev_extension((u32
) pszdev_node
)) {
666 if ((u32
) pszdev_node
== dw_context
) {
668 /* Delete from the Driver object list */
669 list_del(&pszdev_node
->link
);
678 * ======== request_bridge_resources ========
680 * Reserves shared memory for bridge.
682 static int request_bridge_resources(struct cfg_hostres
*res
)
684 struct cfg_hostres
*host_res
= res
;
686 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
687 host_res
->num_mem_windows
= 2;
689 /* First window is for DSP internal memory */
690 dev_dbg(bridge
, "mem_base[0] 0x%x\n", host_res
->mem_base
[0]);
691 dev_dbg(bridge
, "mem_base[3] 0x%x\n", host_res
->mem_base
[3]);
692 dev_dbg(bridge
, "dmmu_base %p\n", host_res
->dmmu_base
);
694 /* for 24xx base port is not mapping the mamory for DSP
695 * internal memory TODO Do a ioremap here */
696 /* Second window is for DSP external memory shared with MPU */
698 /* These are hard-coded values */
699 host_res
->birq_registers
= 0;
700 host_res
->birq_attrib
= 0;
701 host_res
->offset_for_monitor
= 0;
702 host_res
->chnl_offset
= 0;
703 /* CHNL_MAXCHANNELS */
704 host_res
->num_chnls
= CHNL_MAXCHANNELS
;
705 host_res
->chnl_buf_size
= 0x400;
711 * ======== drv_request_bridge_res_dsp ========
713 * Reserves shared memory for bridge.
715 int drv_request_bridge_res_dsp(void **phost_resources
)
718 struct cfg_hostres
*host_res
;
722 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
724 dw_buff_size
= sizeof(struct cfg_hostres
);
726 host_res
= kzalloc(dw_buff_size
, GFP_KERNEL
);
728 if (host_res
!= NULL
) {
729 request_bridge_resources(host_res
);
730 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
731 host_res
->num_mem_windows
= 4;
733 host_res
->mem_base
[0] = 0;
734 host_res
->mem_base
[2] = (u32
) ioremap(OMAP_DSP_MEM1_BASE
,
736 host_res
->mem_base
[3] = (u32
) ioremap(OMAP_DSP_MEM2_BASE
,
738 host_res
->mem_base
[4] = (u32
) ioremap(OMAP_DSP_MEM3_BASE
,
740 host_res
->per_base
= ioremap(OMAP_PER_CM_BASE
,
742 host_res
->per_pm_base
= (u32
) ioremap(OMAP_PER_PRM_BASE
,
744 host_res
->core_pm_base
= (u32
) ioremap(OMAP_CORE_PRM_BASE
,
746 host_res
->dmmu_base
= ioremap(OMAP_DMMU_BASE
,
749 dev_dbg(bridge
, "mem_base[0] 0x%x\n",
750 host_res
->mem_base
[0]);
751 dev_dbg(bridge
, "mem_base[1] 0x%x\n",
752 host_res
->mem_base
[1]);
753 dev_dbg(bridge
, "mem_base[2] 0x%x\n",
754 host_res
->mem_base
[2]);
755 dev_dbg(bridge
, "mem_base[3] 0x%x\n",
756 host_res
->mem_base
[3]);
757 dev_dbg(bridge
, "mem_base[4] 0x%x\n",
758 host_res
->mem_base
[4]);
759 dev_dbg(bridge
, "dmmu_base %p\n", host_res
->dmmu_base
);
761 shm_size
= drv_datap
->shm_size
;
762 if (shm_size
>= 0x10000) {
763 /* Allocate Physically contiguous,
764 * non-cacheable memory */
765 host_res
->mem_base
[1] =
766 (u32
) mem_alloc_phys_mem(shm_size
, 0x100000,
768 if (host_res
->mem_base
[1] == 0) {
770 pr_err("shm reservation Failed\n");
772 host_res
->mem_length
[1] = shm_size
;
773 host_res
->mem_phys
[1] = dma_addr
;
775 dev_dbg(bridge
, "%s: Bridge shm address 0x%x "
776 "dma_addr %x size %x\n", __func__
,
777 host_res
->mem_base
[1],
782 /* These are hard-coded values */
783 host_res
->birq_registers
= 0;
784 host_res
->birq_attrib
= 0;
785 host_res
->offset_for_monitor
= 0;
786 host_res
->chnl_offset
= 0;
787 /* CHNL_MAXCHANNELS */
788 host_res
->num_chnls
= CHNL_MAXCHANNELS
;
789 host_res
->chnl_buf_size
= 0x400;
790 dw_buff_size
= sizeof(struct cfg_hostres
);
792 *phost_resources
= host_res
;
798 void mem_ext_phys_pool_init(u32 pool_phys_base
, u32 pool_size
)
802 /* get the virtual address for the physical memory pool passed */
803 pool_virt_base
= (u32
) ioremap(pool_phys_base
, pool_size
);
805 if ((void **)pool_virt_base
== NULL
) {
806 pr_err("%s: external physical memory map failed\n", __func__
);
807 ext_phys_mem_pool_enabled
= false;
809 ext_mem_pool
.phys_mem_base
= pool_phys_base
;
810 ext_mem_pool
.phys_mem_size
= pool_size
;
811 ext_mem_pool
.virt_mem_base
= pool_virt_base
;
812 ext_mem_pool
.next_phys_alloc_ptr
= pool_phys_base
;
813 ext_phys_mem_pool_enabled
= true;
817 void mem_ext_phys_pool_release(void)
819 if (ext_phys_mem_pool_enabled
) {
820 iounmap((void *)(ext_mem_pool
.virt_mem_base
));
821 ext_phys_mem_pool_enabled
= false;
826 * ======== mem_ext_phys_mem_alloc ========
828 * Allocate physically contiguous, uncached memory from external memory pool
831 static void *mem_ext_phys_mem_alloc(u32 bytes
, u32 align
, u32
* phys_addr
)
840 if (bytes
> ((ext_mem_pool
.phys_mem_base
+ ext_mem_pool
.phys_mem_size
)
841 - ext_mem_pool
.next_phys_alloc_ptr
)) {
845 offset
= (ext_mem_pool
.next_phys_alloc_ptr
& (align
- 1));
847 new_alloc_ptr
= ext_mem_pool
.next_phys_alloc_ptr
;
849 new_alloc_ptr
= (ext_mem_pool
.next_phys_alloc_ptr
) +
851 if ((new_alloc_ptr
+ bytes
) <=
852 (ext_mem_pool
.phys_mem_base
+ ext_mem_pool
.phys_mem_size
)) {
853 /* we can allocate */
854 *phys_addr
= new_alloc_ptr
;
855 ext_mem_pool
.next_phys_alloc_ptr
=
856 new_alloc_ptr
+ bytes
;
858 ext_mem_pool
.virt_mem_base
+ (new_alloc_ptr
-
861 return (void *)virt_addr
;
870 * ======== mem_alloc_phys_mem ========
872 * Allocate physically contiguous, uncached memory
874 void *mem_alloc_phys_mem(u32 byte_size
, u32 align_mask
,
875 u32
*physical_address
)
881 if (ext_phys_mem_pool_enabled
) {
882 va_mem
= mem_ext_phys_mem_alloc(byte_size
, align_mask
,
885 va_mem
= dma_alloc_coherent(NULL
, byte_size
, &pa_mem
,
888 *physical_address
= 0;
890 *physical_address
= pa_mem
;
896 * ======== mem_free_phys_mem ========
898 * Free the given block of physically contiguous memory.
900 void mem_free_phys_mem(void *virtual_address
, u32 physical_address
,
903 DBC_REQUIRE(virtual_address
!= NULL
);
905 if (!ext_phys_mem_pool_enabled
)
906 dma_free_coherent(NULL
, byte_size
, virtual_address
,