4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Processor interface at the driver level.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
20 /* ------------------------------------ Host OS */
21 #include <linux/dma-mapping.h>
22 #include <linux/scatterlist.h>
23 #include <dspbridge/host_os.h>
25 /* ----------------------------------- DSP/BIOS Bridge */
26 #include <dspbridge/dbdefs.h>
28 /* ----------------------------------- Trace & Debug */
29 #include <dspbridge/dbc.h>
31 /* ----------------------------------- OS Adaptation Layer */
32 #include <dspbridge/ntfy.h>
33 #include <dspbridge/sync.h>
34 /* ----------------------------------- Bridge Driver */
35 #include <dspbridge/dspdefs.h>
36 #include <dspbridge/dspdeh.h>
37 /* ----------------------------------- Platform Manager */
38 #include <dspbridge/cod.h>
39 #include <dspbridge/dev.h>
40 #include <dspbridge/procpriv.h>
41 #include <dspbridge/dmm.h>
43 /* ----------------------------------- Resource Manager */
44 #include <dspbridge/mgr.h>
45 #include <dspbridge/node.h>
46 #include <dspbridge/nldr.h>
47 #include <dspbridge/rmm.h>
49 /* ----------------------------------- Others */
50 #include <dspbridge/dbdcd.h>
51 #include <dspbridge/msg.h>
52 #include <dspbridge/dspioctl.h>
53 #include <dspbridge/drv.h>
55 /* ----------------------------------- This */
56 #include <dspbridge/proc.h>
57 #include <dspbridge/pwr.h>
59 #include <dspbridge/resourcecleanup.h>
60 /* ----------------------------------- Defines, Data Structures, Typedefs */
61 #define MAXCMDLINELEN 255
62 #define PROC_ENVPROCID "PROC_ID=%d"
63 #define MAXPROCIDLEN (8 + 5)
64 #define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */
65 #define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */
66 #define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */
68 #define DSP_CACHE_LINE 128
70 #define BUFMODE_MASK (3 << 14)
72 /* Buffer modes from DSP perspective */
73 #define RBUF 0x4000 /* Input buffer */
74 #define WBUF 0x8000 /* Output Buffer */
76 extern struct device
*bridge
;
78 /* ----------------------------------- Globals */
80 /* The proc_object structure. */
82 struct list_head link
; /* Link to next proc_object */
83 struct dev_object
*dev_obj
; /* Device this PROC represents */
84 u32 process
; /* Process owning this Processor */
85 struct mgr_object
*mgr_obj
; /* Manager Object Handle */
86 u32 attach_count
; /* Processor attach count */
87 u32 processor_id
; /* Processor number */
88 u32 timeout
; /* Time out count */
89 enum dsp_procstate proc_state
; /* Processor state */
90 u32 unit
; /* DDSP unit number */
91 bool is_already_attached
; /*
92 * True if the Device below has
95 struct ntfy_object
*ntfy_obj
; /* Manages notifications */
96 /* Bridge Context Handle */
97 struct bridge_dev_context
*bridge_context
;
98 /* Function interface to Bridge driver */
99 struct bridge_drv_interface
*intf_fxns
;
101 struct list_head proc_list
;
106 DEFINE_MUTEX(proc_lock
); /* For critical sections */
108 /* ----------------------------------- Function Prototypes */
109 static int proc_monitor(struct proc_object
*proc_obj
);
110 static s32
get_envp_count(char **envp
);
111 static char **prepend_envp(char **new_envp
, char **envp
, s32 envp_elems
,
112 s32 cnew_envp
, char *sz_var
);
114 /* remember mapping information */
115 static struct dmm_map_object
*add_mapping_info(struct process_context
*pr_ctxt
,
116 u32 mpu_addr
, u32 dsp_addr
, u32 size
)
118 struct dmm_map_object
*map_obj
;
120 u32 num_usr_pgs
= size
/ PG_SIZE4K
;
122 pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
126 map_obj
= kzalloc(sizeof(struct dmm_map_object
), GFP_KERNEL
);
128 pr_err("%s: kzalloc failed\n", __func__
);
131 INIT_LIST_HEAD(&map_obj
->link
);
133 map_obj
->pages
= kcalloc(num_usr_pgs
, sizeof(struct page
*),
135 if (!map_obj
->pages
) {
136 pr_err("%s: kzalloc failed\n", __func__
);
141 map_obj
->mpu_addr
= mpu_addr
;
142 map_obj
->dsp_addr
= dsp_addr
;
143 map_obj
->size
= size
;
144 map_obj
->num_usr_pgs
= num_usr_pgs
;
146 spin_lock(&pr_ctxt
->dmm_map_lock
);
147 list_add(&map_obj
->link
, &pr_ctxt
->dmm_map_list
);
148 spin_unlock(&pr_ctxt
->dmm_map_lock
);
153 static int match_exact_map_obj(struct dmm_map_object
*map_obj
,
154 u32 dsp_addr
, u32 size
)
156 if (map_obj
->dsp_addr
== dsp_addr
&& map_obj
->size
!= size
)
157 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
158 __func__
, dsp_addr
, map_obj
->size
, size
);
160 return map_obj
->dsp_addr
== dsp_addr
&&
161 map_obj
->size
== size
;
164 static void remove_mapping_information(struct process_context
*pr_ctxt
,
165 u32 dsp_addr
, u32 size
)
167 struct dmm_map_object
*map_obj
;
169 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__
,
172 spin_lock(&pr_ctxt
->dmm_map_lock
);
173 list_for_each_entry(map_obj
, &pr_ctxt
->dmm_map_list
, link
) {
174 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
180 if (match_exact_map_obj(map_obj
, dsp_addr
, size
)) {
181 pr_debug("%s: match, deleting map info\n", __func__
);
182 list_del(&map_obj
->link
);
183 kfree(map_obj
->dma_info
.sg
);
184 kfree(map_obj
->pages
);
188 pr_debug("%s: candidate didn't match\n", __func__
);
191 pr_err("%s: failed to find given map info\n", __func__
);
193 spin_unlock(&pr_ctxt
->dmm_map_lock
);
196 static int match_containing_map_obj(struct dmm_map_object
*map_obj
,
197 u32 mpu_addr
, u32 size
)
199 u32 map_obj_end
= map_obj
->mpu_addr
+ map_obj
->size
;
201 return mpu_addr
>= map_obj
->mpu_addr
&&
202 mpu_addr
+ size
<= map_obj_end
;
205 static struct dmm_map_object
*find_containing_mapping(
206 struct process_context
*pr_ctxt
,
207 u32 mpu_addr
, u32 size
)
209 struct dmm_map_object
*map_obj
;
210 pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__
,
213 spin_lock(&pr_ctxt
->dmm_map_lock
);
214 list_for_each_entry(map_obj
, &pr_ctxt
->dmm_map_list
, link
) {
215 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
220 if (match_containing_map_obj(map_obj
, mpu_addr
, size
)) {
221 pr_debug("%s: match!\n", __func__
);
225 pr_debug("%s: no match!\n", __func__
);
230 spin_unlock(&pr_ctxt
->dmm_map_lock
);
234 static int find_first_page_in_cache(struct dmm_map_object
*map_obj
,
235 unsigned long mpu_addr
)
237 u32 mapped_base_page
= map_obj
->mpu_addr
>> PAGE_SHIFT
;
238 u32 requested_base_page
= mpu_addr
>> PAGE_SHIFT
;
239 int pg_index
= requested_base_page
- mapped_base_page
;
241 if (pg_index
< 0 || pg_index
>= map_obj
->num_usr_pgs
) {
242 pr_err("%s: failed (got %d)\n", __func__
, pg_index
);
246 pr_debug("%s: first page is %d\n", __func__
, pg_index
);
250 static inline struct page
*get_mapping_page(struct dmm_map_object
*map_obj
,
253 pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__
,
254 pg_i
, map_obj
->num_usr_pgs
);
256 if (pg_i
< 0 || pg_i
>= map_obj
->num_usr_pgs
) {
257 pr_err("%s: requested pg_i %d is out of mapped range\n",
262 return map_obj
->pages
[pg_i
];
266 * ======== proc_attach ========
268 * Prepare for communication with a particular DSP processor, and return
269 * a handle to the processor object.
272 proc_attach(u32 processor_id
,
273 const struct dsp_processorattrin
*attr_in
,
274 void **ph_processor
, struct process_context
*pr_ctxt
)
277 struct dev_object
*hdev_obj
;
278 struct proc_object
*p_proc_object
= NULL
;
279 struct mgr_object
*hmgr_obj
= NULL
;
280 struct drv_object
*hdrv_obj
= NULL
;
281 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
284 DBC_REQUIRE(refs
> 0);
285 DBC_REQUIRE(ph_processor
!= NULL
);
287 if (pr_ctxt
->processor
) {
288 *ph_processor
= pr_ctxt
->processor
;
292 /* Get the Driver and Manager Object Handles */
293 if (!drv_datap
|| !drv_datap
->drv_object
|| !drv_datap
->mgr_object
) {
295 pr_err("%s: Failed to get object handles\n", __func__
);
297 hdrv_obj
= drv_datap
->drv_object
;
298 hmgr_obj
= drv_datap
->mgr_object
;
302 /* Get the Device Object */
303 status
= drv_get_dev_object(processor_id
, hdrv_obj
, &hdev_obj
);
306 status
= dev_get_dev_type(hdev_obj
, &dev_type
);
311 /* If we made it this far, create the Proceesor object: */
312 p_proc_object
= kzalloc(sizeof(struct proc_object
), GFP_KERNEL
);
313 /* Fill out the Processor Object: */
314 if (p_proc_object
== NULL
) {
318 p_proc_object
->dev_obj
= hdev_obj
;
319 p_proc_object
->mgr_obj
= hmgr_obj
;
320 p_proc_object
->processor_id
= dev_type
;
321 /* Store TGID instead of process handle */
322 p_proc_object
->process
= current
->tgid
;
324 INIT_LIST_HEAD(&p_proc_object
->proc_list
);
327 p_proc_object
->timeout
= attr_in
->timeout
;
329 p_proc_object
->timeout
= PROC_DFLT_TIMEOUT
;
331 status
= dev_get_intf_fxns(hdev_obj
, &p_proc_object
->intf_fxns
);
333 status
= dev_get_bridge_context(hdev_obj
,
334 &p_proc_object
->bridge_context
);
336 kfree(p_proc_object
);
338 kfree(p_proc_object
);
343 /* Create the Notification Object */
344 /* This is created with no event mask, no notify mask
345 * and no valid handle to the notification. They all get
346 * filled up when proc_register_notify is called */
347 p_proc_object
->ntfy_obj
= kmalloc(sizeof(struct ntfy_object
),
349 if (p_proc_object
->ntfy_obj
)
350 ntfy_init(p_proc_object
->ntfy_obj
);
355 /* Insert the Processor Object into the DEV List.
356 * Return handle to this Processor Object:
357 * Find out if the Device is already attached to a
358 * Processor. If so, return AlreadyAttached status */
359 status
= dev_insert_proc_object(p_proc_object
->dev_obj
,
362 is_already_attached
);
364 if (p_proc_object
->is_already_attached
)
367 if (p_proc_object
->ntfy_obj
) {
368 ntfy_delete(p_proc_object
->ntfy_obj
);
369 kfree(p_proc_object
->ntfy_obj
);
372 kfree(p_proc_object
);
375 *ph_processor
= (void *)p_proc_object
;
376 pr_ctxt
->processor
= *ph_processor
;
377 (void)proc_notify_clients(p_proc_object
,
378 DSP_PROCESSORATTACH
);
381 /* Don't leak memory if status is failed */
382 kfree(p_proc_object
);
385 DBC_ENSURE((status
== -EPERM
&& *ph_processor
== NULL
) ||
386 (!status
&& p_proc_object
) ||
387 (status
== 0 && p_proc_object
));
392 static int get_exec_file(struct cfg_devnode
*dev_node_obj
,
393 struct dev_object
*hdev_obj
,
394 u32 size
, char *exec_file
)
398 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
400 dev_get_dev_type(hdev_obj
, (u8
*) &dev_type
);
405 if (dev_type
== DSP_UNIT
) {
406 if (!drv_datap
|| !drv_datap
->base_img
)
409 if (strlen(drv_datap
->base_img
) > size
)
412 strcpy(exec_file
, drv_datap
->base_img
);
413 } else if (dev_type
== IVA_UNIT
&& iva_img
) {
414 len
= strlen(iva_img
);
415 strncpy(exec_file
, iva_img
, len
+ 1);
424 * ======== proc_auto_start ======== =
426 * A Particular device gets loaded with the default image
427 * if the AutoStart flag is set.
429 * hdev_obj: Handle to the Device
431 * 0: On Successful Loading
432 * -EPERM General Failure
437 int proc_auto_start(struct cfg_devnode
*dev_node_obj
,
438 struct dev_object
*hdev_obj
)
441 struct proc_object
*p_proc_object
;
442 char sz_exec_file
[MAXCMDLINELEN
];
444 struct mgr_object
*hmgr_obj
= NULL
;
445 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
448 DBC_REQUIRE(refs
> 0);
449 DBC_REQUIRE(dev_node_obj
!= NULL
);
450 DBC_REQUIRE(hdev_obj
!= NULL
);
452 /* Create a Dummy PROC Object */
453 if (!drv_datap
|| !drv_datap
->mgr_object
) {
455 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
458 hmgr_obj
= drv_datap
->mgr_object
;
461 p_proc_object
= kzalloc(sizeof(struct proc_object
), GFP_KERNEL
);
462 if (p_proc_object
== NULL
) {
466 p_proc_object
->dev_obj
= hdev_obj
;
467 p_proc_object
->mgr_obj
= hmgr_obj
;
468 status
= dev_get_intf_fxns(hdev_obj
, &p_proc_object
->intf_fxns
);
470 status
= dev_get_bridge_context(hdev_obj
,
471 &p_proc_object
->bridge_context
);
475 /* Stop the Device, put it into standby mode */
476 status
= proc_stop(p_proc_object
);
481 /* Get the default executable for this board... */
482 dev_get_dev_type(hdev_obj
, (u8
*) &dev_type
);
483 p_proc_object
->processor_id
= dev_type
;
484 status
= get_exec_file(dev_node_obj
, hdev_obj
, sizeof(sz_exec_file
),
487 argv
[0] = sz_exec_file
;
489 /* ...and try to load it: */
490 status
= proc_load(p_proc_object
, 1, (const char **)argv
, NULL
);
492 status
= proc_start(p_proc_object
);
494 kfree(p_proc_object
->last_coff
);
495 p_proc_object
->last_coff
= NULL
;
497 kfree(p_proc_object
);
503 * ======== proc_ctrl ========
505 * Pass control information to the GPP device driver managing the
508 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
509 * application developer's API.
510 * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
511 * Operation. arg can be null.
513 int proc_ctrl(void *hprocessor
, u32 dw_cmd
, struct dsp_cbdata
* arg
)
516 struct proc_object
*p_proc_object
= hprocessor
;
519 DBC_REQUIRE(refs
> 0);
522 /* intercept PWR deep sleep command */
523 if (dw_cmd
== BRDIOCTL_DEEPSLEEP
) {
524 timeout
= arg
->cb_data
;
525 status
= pwr_sleep_dsp(PWR_DEEPSLEEP
, timeout
);
527 /* intercept PWR emergency sleep command */
528 else if (dw_cmd
== BRDIOCTL_EMERGENCYSLEEP
) {
529 timeout
= arg
->cb_data
;
530 status
= pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP
, timeout
);
531 } else if (dw_cmd
== PWR_DEEPSLEEP
) {
532 /* timeout = arg->cb_data; */
533 status
= pwr_sleep_dsp(PWR_DEEPSLEEP
, timeout
);
535 /* intercept PWR wake commands */
536 else if (dw_cmd
== BRDIOCTL_WAKEUP
) {
537 timeout
= arg
->cb_data
;
538 status
= pwr_wake_dsp(timeout
);
539 } else if (dw_cmd
== PWR_WAKEUP
) {
540 /* timeout = arg->cb_data; */
541 status
= pwr_wake_dsp(timeout
);
543 if (!((*p_proc_object
->intf_fxns
->dev_cntrl
)
544 (p_proc_object
->bridge_context
, dw_cmd
,
558 * ======== proc_detach ========
560 * Destroys the Processor Object. Removes the notification from the Dev
563 int proc_detach(struct process_context
*pr_ctxt
)
566 struct proc_object
*p_proc_object
= NULL
;
568 DBC_REQUIRE(refs
> 0);
570 p_proc_object
= (struct proc_object
*)pr_ctxt
->processor
;
573 /* Notify the Client */
574 ntfy_notify(p_proc_object
->ntfy_obj
, DSP_PROCESSORDETACH
);
575 /* Remove the notification memory */
576 if (p_proc_object
->ntfy_obj
) {
577 ntfy_delete(p_proc_object
->ntfy_obj
);
578 kfree(p_proc_object
->ntfy_obj
);
581 kfree(p_proc_object
->last_coff
);
582 p_proc_object
->last_coff
= NULL
;
583 /* Remove the Proc from the DEV List */
584 (void)dev_remove_proc_object(p_proc_object
->dev_obj
,
585 (u32
) p_proc_object
);
586 /* Free the Processor Object */
587 kfree(p_proc_object
);
588 pr_ctxt
->processor
= NULL
;
597 * ======== proc_enum_nodes ========
599 * Enumerate and get configuration information about nodes allocated
600 * on a DSP processor.
602 int proc_enum_nodes(void *hprocessor
, void **node_tab
,
603 u32 node_tab_size
, u32
*pu_num_nodes
,
607 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
608 struct node_mgr
*hnode_mgr
= NULL
;
610 DBC_REQUIRE(refs
> 0);
611 DBC_REQUIRE(node_tab
!= NULL
|| node_tab_size
== 0);
612 DBC_REQUIRE(pu_num_nodes
!= NULL
);
613 DBC_REQUIRE(pu_allocated
!= NULL
);
616 if (!(dev_get_node_manager(p_proc_object
->dev_obj
,
619 status
= node_enum_nodes(hnode_mgr
, node_tab
,
632 /* Cache operation against kernel address instead of users */
633 static int build_dma_sg(struct dmm_map_object
*map_obj
, unsigned long start
,
634 ssize_t len
, int pg_i
)
637 unsigned long offset
;
640 struct scatterlist
*sg
= map_obj
->dma_info
.sg
;
643 page
= get_mapping_page(map_obj
, pg_i
);
645 pr_err("%s: no page for %08lx\n", __func__
, start
);
648 } else if (IS_ERR(page
)) {
649 pr_err("%s: err page for %08lx(%lu)\n", __func__
, start
,
655 offset
= start
& ~PAGE_MASK
;
656 rest
= min_t(ssize_t
, PAGE_SIZE
- offset
, len
);
658 sg_set_page(&sg
[i
], page
, rest
, offset
);
665 if (i
!= map_obj
->dma_info
.num_pages
) {
666 pr_err("%s: bad number of sg iterations\n", __func__
);
675 static int memory_regain_ownership(struct dmm_map_object
*map_obj
,
676 unsigned long start
, ssize_t len
, enum dma_data_direction dir
)
679 unsigned long first_data_page
= start
>> PAGE_SHIFT
;
680 unsigned long last_data_page
= ((u32
)(start
+ len
- 1) >> PAGE_SHIFT
);
681 /* calculating the number of pages this area spans */
682 unsigned long num_pages
= last_data_page
- first_data_page
+ 1;
683 struct bridge_dma_map_info
*dma_info
= &map_obj
->dma_info
;
688 if (dma_info
->dir
!= dir
|| dma_info
->num_pages
!= num_pages
) {
689 pr_err("%s: dma info doesn't match given params\n", __func__
);
693 dma_unmap_sg(bridge
, dma_info
->sg
, num_pages
, dma_info
->dir
);
695 pr_debug("%s: dma_map_sg unmapped\n", __func__
);
699 map_obj
->dma_info
.sg
= NULL
;
705 /* Cache operation against kernel address instead of users */
706 static int memory_give_ownership(struct dmm_map_object
*map_obj
,
707 unsigned long start
, ssize_t len
, enum dma_data_direction dir
)
709 int pg_i
, ret
, sg_num
;
710 struct scatterlist
*sg
;
711 unsigned long first_data_page
= start
>> PAGE_SHIFT
;
712 unsigned long last_data_page
= ((u32
)(start
+ len
- 1) >> PAGE_SHIFT
);
713 /* calculating the number of pages this area spans */
714 unsigned long num_pages
= last_data_page
- first_data_page
+ 1;
716 pg_i
= find_first_page_in_cache(map_obj
, start
);
718 pr_err("%s: failed to find first page in cache\n", __func__
);
723 sg
= kcalloc(num_pages
, sizeof(*sg
), GFP_KERNEL
);
725 pr_err("%s: kcalloc failed\n", __func__
);
730 sg_init_table(sg
, num_pages
);
732 /* cleanup a previous sg allocation */
733 /* this may happen if application doesn't signal for e/o DMA */
734 kfree(map_obj
->dma_info
.sg
);
736 map_obj
->dma_info
.sg
= sg
;
737 map_obj
->dma_info
.dir
= dir
;
738 map_obj
->dma_info
.num_pages
= num_pages
;
740 ret
= build_dma_sg(map_obj
, start
, len
, pg_i
);
744 sg_num
= dma_map_sg(bridge
, sg
, num_pages
, dir
);
746 pr_err("%s: dma_map_sg failed: %d\n", __func__
, sg_num
);
751 pr_debug("%s: dma_map_sg mapped %d elements\n", __func__
, sg_num
);
752 map_obj
->dma_info
.sg_num
= sg_num
;
758 map_obj
->dma_info
.sg
= NULL
;
763 int proc_begin_dma(void *hprocessor
, void *pmpu_addr
, u32 ul_size
,
764 enum dma_data_direction dir
)
766 /* Keep STATUS here for future additions to this function */
768 struct process_context
*pr_ctxt
= (struct process_context
*) hprocessor
;
769 struct dmm_map_object
*map_obj
;
771 DBC_REQUIRE(refs
> 0);
778 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__
,
782 mutex_lock(&proc_lock
);
784 /* find requested memory are in cached mapping information */
785 map_obj
= find_containing_mapping(pr_ctxt
, (u32
) pmpu_addr
, ul_size
);
787 pr_err("%s: find_containing_mapping failed\n", __func__
);
792 if (memory_give_ownership(map_obj
, (u32
) pmpu_addr
, ul_size
, dir
)) {
793 pr_err("%s: InValid address parameters %p %x\n",
794 __func__
, pmpu_addr
, ul_size
);
799 mutex_unlock(&proc_lock
);
805 int proc_end_dma(void *hprocessor
, void *pmpu_addr
, u32 ul_size
,
806 enum dma_data_direction dir
)
808 /* Keep STATUS here for future additions to this function */
810 struct process_context
*pr_ctxt
= (struct process_context
*) hprocessor
;
811 struct dmm_map_object
*map_obj
;
813 DBC_REQUIRE(refs
> 0);
820 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__
,
824 mutex_lock(&proc_lock
);
826 /* find requested memory are in cached mapping information */
827 map_obj
= find_containing_mapping(pr_ctxt
, (u32
) pmpu_addr
, ul_size
);
829 pr_err("%s: find_containing_mapping failed\n", __func__
);
834 if (memory_regain_ownership(map_obj
, (u32
) pmpu_addr
, ul_size
, dir
)) {
835 pr_err("%s: InValid address parameters %p %x\n",
836 __func__
, pmpu_addr
, ul_size
);
841 mutex_unlock(&proc_lock
);
847 * ======== proc_flush_memory ========
851 int proc_flush_memory(void *hprocessor
, void *pmpu_addr
,
852 u32 ul_size
, u32 ul_flags
)
854 enum dma_data_direction dir
= DMA_BIDIRECTIONAL
;
856 return proc_begin_dma(hprocessor
, pmpu_addr
, ul_size
, dir
);
860 * ======== proc_invalidate_memory ========
862 * Invalidates the memory specified
864 int proc_invalidate_memory(void *hprocessor
, void *pmpu_addr
, u32 size
)
866 enum dma_data_direction dir
= DMA_FROM_DEVICE
;
868 return proc_begin_dma(hprocessor
, pmpu_addr
, size
, dir
);
872 * ======== proc_get_resource_info ========
874 * Enumerate the resources currently available on a processor.
876 int proc_get_resource_info(void *hprocessor
, u32 resource_type
,
877 struct dsp_resourceinfo
*resource_info
,
878 u32 resource_info_size
)
881 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
882 struct node_mgr
*hnode_mgr
= NULL
;
883 struct nldr_object
*nldr_obj
= NULL
;
884 struct rmm_target_obj
*rmm
= NULL
;
885 struct io_mgr
*hio_mgr
= NULL
; /* IO manager handle */
887 DBC_REQUIRE(refs
> 0);
888 DBC_REQUIRE(resource_info
!= NULL
);
889 DBC_REQUIRE(resource_info_size
>= sizeof(struct dsp_resourceinfo
));
891 if (!p_proc_object
) {
895 switch (resource_type
) {
896 case DSP_RESOURCE_DYNDARAM
:
897 case DSP_RESOURCE_DYNSARAM
:
898 case DSP_RESOURCE_DYNEXTERNAL
:
899 case DSP_RESOURCE_DYNSRAM
:
900 status
= dev_get_node_manager(p_proc_object
->dev_obj
,
907 status
= node_get_nldr_obj(hnode_mgr
, &nldr_obj
);
909 status
= nldr_get_rmm_manager(nldr_obj
, &rmm
);
912 (enum dsp_memtype
)resource_type
,
913 (struct dsp_memstat
*)
914 &(resource_info
->result
.
922 case DSP_RESOURCE_PROCLOAD
:
923 status
= dev_get_io_mgr(p_proc_object
->dev_obj
, &hio_mgr
);
926 p_proc_object
->intf_fxns
->
927 io_get_proc_load(hio_mgr
,
928 (struct dsp_procloadstat
*)
929 &(resource_info
->result
.
943 * ======== proc_exit ========
945 * Decrement reference count, and free resources when reference count is
950 DBC_REQUIRE(refs
> 0);
954 DBC_ENSURE(refs
>= 0);
958 * ======== proc_get_dev_object ========
960 * Return the Dev Object handle for a given Processor.
963 int proc_get_dev_object(void *hprocessor
,
964 struct dev_object
**device_obj
)
967 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
969 DBC_REQUIRE(refs
> 0);
970 DBC_REQUIRE(device_obj
!= NULL
);
973 *device_obj
= p_proc_object
->dev_obj
;
980 DBC_ENSURE((!status
&& *device_obj
!= NULL
) ||
981 (status
&& *device_obj
== NULL
));
987 * ======== proc_get_state ========
989 * Report the state of the specified DSP processor.
991 int proc_get_state(void *hprocessor
,
992 struct dsp_processorstate
*proc_state_obj
,
996 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
999 DBC_REQUIRE(refs
> 0);
1000 DBC_REQUIRE(proc_state_obj
!= NULL
);
1001 DBC_REQUIRE(state_info_size
>= sizeof(struct dsp_processorstate
));
1003 if (p_proc_object
) {
1004 /* First, retrieve BRD state information */
1005 status
= (*p_proc_object
->intf_fxns
->brd_status
)
1006 (p_proc_object
->bridge_context
, &brd_status
);
1008 switch (brd_status
) {
1010 proc_state_obj
->proc_state
= PROC_STOPPED
;
1012 case BRD_SLEEP_TRANSITION
:
1013 case BRD_DSP_HIBERNATION
:
1016 proc_state_obj
->proc_state
= PROC_RUNNING
;
1019 proc_state_obj
->proc_state
= PROC_LOADED
;
1022 proc_state_obj
->proc_state
= PROC_ERROR
;
1025 proc_state_obj
->proc_state
= 0xFF;
1033 dev_dbg(bridge
, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
1034 __func__
, status
, proc_state_obj
->proc_state
);
1039 * ======== proc_get_trace ========
1041 * Retrieve the current contents of the trace buffer, located on the
1042 * Processor. Predefined symbols for the trace buffer must have been
1043 * configured into the DSP executable.
1045 * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
1046 * trace buffer, only. Treat it as an undocumented feature.
1047 * This call is destructive, meaning the processor is placed in the monitor
1048 * state as a result of this function.
1050 int proc_get_trace(void *hprocessor
, u8
* pbuf
, u32 max_size
)
1058 * ======== proc_init ========
1060 * Initialize PROC's private state, keeping a reference count on each call
1062 bool proc_init(void)
1066 DBC_REQUIRE(refs
>= 0);
1071 DBC_ENSURE((ret
&& (refs
> 0)) || (!ret
&& (refs
>= 0)));
1077 * ======== proc_load ========
1079 * Reset a processor and load a new base program image.
1080 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
1081 * application developer's API.
1083 int proc_load(void *hprocessor
, const s32 argc_index
,
1084 const char **user_args
, const char **user_envp
)
1087 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1088 struct io_mgr
*hio_mgr
; /* IO manager handle */
1089 struct msg_mgr
*hmsg_mgr
;
1090 struct cod_manager
*cod_mgr
; /* Code manager handle */
1091 char *pargv0
; /* temp argv[0] ptr */
1092 char **new_envp
; /* Updated envp[] array. */
1093 char sz_proc_id
[MAXPROCIDLEN
]; /* Size of "PROC_ID=<n>" */
1094 s32 envp_elems
; /* Num elements in envp[]. */
1095 s32 cnew_envp
; /* " " in new_envp[] */
1096 s32 nproc_id
= 0; /* Anticipate MP version. */
1097 struct dcd_manager
*hdcd_handle
;
1098 struct dmm_object
*dmm_mgr
;
1102 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
1104 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1109 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1110 struct dspbridge_platform_data
*pdata
=
1111 omap_dspbridge_dev
->dev
.platform_data
;
1114 DBC_REQUIRE(refs
> 0);
1115 DBC_REQUIRE(argc_index
> 0);
1116 DBC_REQUIRE(user_args
!= NULL
);
1118 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1119 do_gettimeofday(&tv1
);
1121 if (!p_proc_object
) {
1125 dev_get_cod_mgr(p_proc_object
->dev_obj
, &cod_mgr
);
1130 status
= proc_stop(hprocessor
);
1134 /* Place the board in the monitor state. */
1135 status
= proc_monitor(hprocessor
);
1139 /* Save ptr to original argv[0]. */
1140 pargv0
= (char *)user_args
[0];
1141 /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
1142 envp_elems
= get_envp_count((char **)user_envp
);
1143 cnew_envp
= (envp_elems
? (envp_elems
+ 1) : (envp_elems
+ 2));
1144 new_envp
= kzalloc(cnew_envp
* sizeof(char **), GFP_KERNEL
);
1146 status
= snprintf(sz_proc_id
, MAXPROCIDLEN
, PROC_ENVPROCID
,
1149 dev_dbg(bridge
, "%s: Proc ID string overflow\n",
1154 prepend_envp(new_envp
, (char **)user_envp
,
1155 envp_elems
, cnew_envp
, sz_proc_id
);
1156 /* Get the DCD Handle */
1157 status
= mgr_get_dcd_handle(p_proc_object
->mgr_obj
,
1158 (u32
*) &hdcd_handle
);
1160 /* Before proceeding with new load,
1161 * check if a previously registered COFF
1163 * If yes, unregister nodes in previously
1164 * registered COFF. If any error occurred,
1165 * set previously registered COFF to NULL. */
1166 if (p_proc_object
->last_coff
!= NULL
) {
1168 dcd_auto_unregister(hdcd_handle
,
1171 /* Regardless of auto unregister status,
1172 * free previously allocated
1174 kfree(p_proc_object
->last_coff
);
1175 p_proc_object
->last_coff
= NULL
;
1178 /* On success, do cod_open_base() */
1179 status
= cod_open_base(cod_mgr
, (char *)user_args
[0],
1186 /* Auto-register data base */
1187 /* Get the DCD Handle */
1188 status
= mgr_get_dcd_handle(p_proc_object
->mgr_obj
,
1189 (u32
*) &hdcd_handle
);
1191 /* Auto register nodes in specified COFF
1192 * file. If registration did not fail,
1193 * (status = 0 or -EACCES)
1194 * save the name of the COFF file for
1195 * de-registration in the future. */
1197 dcd_auto_register(hdcd_handle
,
1198 (char *)user_args
[0]);
1199 if (status
== -EACCES
)
1205 DBC_ASSERT(p_proc_object
->last_coff
==
1207 /* Allocate memory for pszLastCoff */
1208 p_proc_object
->last_coff
=
1209 kzalloc((strlen(user_args
[0]) +
1211 /* If memory allocated, save COFF file name */
1212 if (p_proc_object
->last_coff
) {
1213 strncpy(p_proc_object
->last_coff
,
1214 (char *)user_args
[0],
1215 (strlen((char *)user_args
[0]) +
1221 /* Update shared memory address and size */
1223 /* Create the message manager. This must be done
1224 * before calling the IOOnLoaded function. */
1225 dev_get_msg_mgr(p_proc_object
->dev_obj
, &hmsg_mgr
);
1227 status
= msg_create(&hmsg_mgr
, p_proc_object
->dev_obj
,
1228 (msg_onexit
) node_on_exit
);
1229 DBC_ASSERT(!status
);
1230 dev_set_msg_mgr(p_proc_object
->dev_obj
, hmsg_mgr
);
1234 /* Set the Device object's message manager */
1235 status
= dev_get_io_mgr(p_proc_object
->dev_obj
, &hio_mgr
);
1237 status
= (*p_proc_object
->intf_fxns
->io_on_loaded
)
1243 /* Now, attempt to load an exec: */
1245 /* Boost the OPP level to Maximum level supported by baseport */
1246 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1247 if (pdata
->cpu_set_freq
)
1248 (*pdata
->cpu_set_freq
) (pdata
->mpu_speed
[VDD1_OPP5
]);
1250 status
= cod_load_base(cod_mgr
, argc_index
, (char **)user_args
,
1252 p_proc_object
->dev_obj
, NULL
);
1254 if (status
== -EBADF
) {
1255 dev_dbg(bridge
, "%s: Failure to Load the EXE\n",
1258 if (status
== -ESPIPE
) {
1259 pr_err("%s: Couldn't parse the file\n",
1263 /* Requesting the lowest opp supported */
1264 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1265 if (pdata
->cpu_set_freq
)
1266 (*pdata
->cpu_set_freq
) (pdata
->mpu_speed
[VDD1_OPP1
]);
1271 /* Update the Processor status to loaded */
1272 status
= (*p_proc_object
->intf_fxns
->brd_set_state
)
1273 (p_proc_object
->bridge_context
, BRD_LOADED
);
1275 p_proc_object
->proc_state
= PROC_LOADED
;
1276 if (p_proc_object
->ntfy_obj
)
1277 proc_notify_clients(p_proc_object
,
1278 DSP_PROCESSORSTATECHANGE
);
1282 status
= proc_get_processor_id(hprocessor
, &proc_id
);
1283 if (proc_id
== DSP_UNIT
) {
1284 /* Use all available DSP address space after EXTMEM
1287 status
= cod_get_sym_value(cod_mgr
, EXTEND
,
1290 /* Reset DMM structs and add an initial free chunk */
1293 dev_get_dmm_mgr(p_proc_object
->dev_obj
,
1296 /* Set dw_ext_end to DMM START u8
1299 (dw_ext_end
+ 1) * DSPWORDSIZE
;
1300 /* DMM memory is from EXT_END */
1301 status
= dmm_create_tables(dmm_mgr
,
1310 /* Restore the original argv[0] */
1312 user_args
[0] = pargv0
;
1314 if (!((*p_proc_object
->intf_fxns
->brd_status
)
1315 (p_proc_object
->bridge_context
, &brd_state
))) {
1316 pr_info("%s: Processor Loaded %s\n", __func__
, pargv0
);
1317 kfree(drv_datap
->base_img
);
1318 drv_datap
->base_img
= kmalloc(strlen(pargv0
) + 1,
1320 if (drv_datap
->base_img
)
1321 strncpy(drv_datap
->base_img
, pargv0
,
1322 strlen(pargv0
) + 1);
1325 DBC_ASSERT(brd_state
== BRD_LOADED
);
1331 pr_err("%s: Processor failed to load\n", __func__
);
1332 proc_stop(p_proc_object
);
1335 && p_proc_object
->proc_state
== PROC_LOADED
)
1337 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1338 do_gettimeofday(&tv2
);
1339 if (tv2
.tv_usec
< tv1
.tv_usec
) {
1340 tv2
.tv_usec
+= 1000000;
1343 dev_dbg(bridge
, "%s: time to load %d sec and %d usec\n", __func__
,
1344 tv2
.tv_sec
- tv1
.tv_sec
, tv2
.tv_usec
- tv1
.tv_usec
);
1350 * ======== proc_map ========
1352 * Maps a MPU buffer to DSP address space.
1354 int proc_map(void *hprocessor
, void *pmpu_addr
, u32 ul_size
,
1355 void *req_addr
, void **pp_map_addr
, u32 ul_map_attr
,
1356 struct process_context
*pr_ctxt
)
1360 struct dmm_object
*dmm_mgr
;
1363 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1364 struct dmm_map_object
*map_obj
;
1367 #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1368 if ((ul_map_attr
& BUFMODE_MASK
) != RBUF
) {
1369 if (!IS_ALIGNED((u32
)pmpu_addr
, DSP_CACHE_LINE
) ||
1370 !IS_ALIGNED(ul_size
, DSP_CACHE_LINE
)) {
1371 pr_err("%s: not aligned: 0x%x (%d)\n", __func__
,
1372 (u32
)pmpu_addr
, ul_size
);
1378 /* Calculate the page-aligned PA, VA and size */
1379 va_align
= PG_ALIGN_LOW((u32
) req_addr
, PG_SIZE4K
);
1380 pa_align
= PG_ALIGN_LOW((u32
) pmpu_addr
, PG_SIZE4K
);
1381 size_align
= PG_ALIGN_HIGH(ul_size
+ (u32
) pmpu_addr
- pa_align
,
1384 if (!p_proc_object
) {
1388 /* Critical section */
1389 mutex_lock(&proc_lock
);
1390 dmm_get_handle(p_proc_object
, &dmm_mgr
);
1392 status
= dmm_map_memory(dmm_mgr
, va_align
, size_align
);
1396 /* Add mapping to the page tables. */
1399 /* Mapped address = MSB of VA | LSB of PA */
1400 tmp_addr
= (va_align
| ((u32
) pmpu_addr
& (PG_SIZE4K
- 1)));
1401 /* mapped memory resource tracking */
1402 map_obj
= add_mapping_info(pr_ctxt
, pa_align
, tmp_addr
,
1407 status
= (*p_proc_object
->intf_fxns
->brd_mem_map
)
1408 (p_proc_object
->bridge_context
, pa_align
, va_align
,
1409 size_align
, ul_map_attr
, map_obj
->pages
);
1412 /* Mapped address = MSB of VA | LSB of PA */
1413 *pp_map_addr
= (void *) tmp_addr
;
1415 remove_mapping_information(pr_ctxt
, tmp_addr
, size_align
);
1416 dmm_un_map_memory(dmm_mgr
, va_align
, &size_align
);
1418 mutex_unlock(&proc_lock
);
1424 dev_dbg(bridge
, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
1425 "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
1426 "pa_align %x, size_align %x status 0x%x\n", __func__
,
1427 hprocessor
, pmpu_addr
, ul_size
, req_addr
, ul_map_attr
,
1428 pp_map_addr
, va_align
, pa_align
, size_align
, status
);
1434 * ======== proc_register_notify ========
1436 * Register to be notified of specific processor events.
1438 int proc_register_notify(void *hprocessor
, u32 event_mask
,
1439 u32 notify_type
, struct dsp_notification
1443 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1444 struct deh_mgr
*hdeh_mgr
;
1446 DBC_REQUIRE(hnotification
!= NULL
);
1447 DBC_REQUIRE(refs
> 0);
1449 /* Check processor handle */
1450 if (!p_proc_object
) {
1454 /* Check if event mask is a valid processor related event */
1455 if (event_mask
& ~(DSP_PROCESSORSTATECHANGE
| DSP_PROCESSORATTACH
|
1456 DSP_PROCESSORDETACH
| DSP_PROCESSORRESTART
|
1457 DSP_MMUFAULT
| DSP_SYSERROR
| DSP_PWRERROR
|
1461 /* Check if notify type is valid */
1462 if (notify_type
!= DSP_SIGNALEVENT
)
1466 /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
1467 * or DSP_PWRERROR then register event immediately. */
1469 ~(DSP_SYSERROR
| DSP_MMUFAULT
| DSP_PWRERROR
|
1471 status
= ntfy_register(p_proc_object
->ntfy_obj
,
1472 hnotification
, event_mask
,
1474 /* Special case alert, special case alert!
1475 * If we're trying to *deregister* (i.e. event_mask
1476 * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
1477 * we have to deregister with the DEH manager.
1478 * There's no way to know, based on event_mask which
1479 * manager the notification event was registered with,
1480 * so if we're trying to deregister and ntfy_register
1481 * failed, we'll give the deh manager a shot.
1483 if ((event_mask
== 0) && status
) {
1485 dev_get_deh_mgr(p_proc_object
->dev_obj
,
1488 bridge_deh_register_notify(hdeh_mgr
,
1494 status
= dev_get_deh_mgr(p_proc_object
->dev_obj
,
1497 bridge_deh_register_notify(hdeh_mgr
,
1509 * ======== proc_reserve_memory ========
1511 * Reserve a virtually contiguous region of DSP address space.
1513 int proc_reserve_memory(void *hprocessor
, u32 ul_size
,
1515 struct process_context
*pr_ctxt
)
1517 struct dmm_object
*dmm_mgr
;
1519 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1520 struct dmm_rsv_object
*rsv_obj
;
1522 if (!p_proc_object
) {
1527 status
= dmm_get_handle(p_proc_object
, &dmm_mgr
);
1533 status
= dmm_reserve_memory(dmm_mgr
, ul_size
, (u32
*) pp_rsv_addr
);
1538 * A successful reserve should be followed by insertion of rsv_obj
1539 * into dmm_rsv_list, so that reserved memory resource tracking
1542 rsv_obj
= kmalloc(sizeof(struct dmm_rsv_object
), GFP_KERNEL
);
1544 rsv_obj
->dsp_reserved_addr
= (u32
) *pp_rsv_addr
;
1545 spin_lock(&pr_ctxt
->dmm_rsv_lock
);
1546 list_add(&rsv_obj
->link
, &pr_ctxt
->dmm_rsv_list
);
1547 spin_unlock(&pr_ctxt
->dmm_rsv_lock
);
1551 dev_dbg(bridge
, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1552 "status 0x%x\n", __func__
, hprocessor
,
1553 ul_size
, pp_rsv_addr
, status
);
1558 * ======== proc_start ========
1560 * Start a processor running.
1562 int proc_start(void *hprocessor
)
1565 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1566 struct cod_manager
*cod_mgr
; /* Code manager handle */
1567 u32 dw_dsp_addr
; /* Loaded code's entry point. */
1570 DBC_REQUIRE(refs
> 0);
1571 if (!p_proc_object
) {
1575 /* Call the bridge_brd_start */
1576 if (p_proc_object
->proc_state
!= PROC_LOADED
) {
1580 status
= dev_get_cod_mgr(p_proc_object
->dev_obj
, &cod_mgr
);
1586 status
= cod_get_entry(cod_mgr
, &dw_dsp_addr
);
1590 status
= (*p_proc_object
->intf_fxns
->brd_start
)
1591 (p_proc_object
->bridge_context
, dw_dsp_addr
);
1595 /* Call dev_create2 */
1596 status
= dev_create2(p_proc_object
->dev_obj
);
1598 p_proc_object
->proc_state
= PROC_RUNNING
;
1599 /* Deep sleep switces off the peripheral clocks.
1600 * we just put the DSP CPU in idle in the idle loop.
1601 * so there is no need to send a command to DSP */
1603 if (p_proc_object
->ntfy_obj
) {
1604 proc_notify_clients(p_proc_object
,
1605 DSP_PROCESSORSTATECHANGE
);
1608 /* Failed to Create Node Manager and DISP Object
1609 * Stop the Processor from running. Put it in STOPPED State */
1610 (void)(*p_proc_object
->intf_fxns
->
1611 brd_stop
) (p_proc_object
->bridge_context
);
1612 p_proc_object
->proc_state
= PROC_STOPPED
;
1616 if (!((*p_proc_object
->intf_fxns
->brd_status
)
1617 (p_proc_object
->bridge_context
, &brd_state
))) {
1618 pr_info("%s: dsp in running state\n", __func__
);
1619 DBC_ASSERT(brd_state
!= BRD_HIBERNATION
);
1622 pr_err("%s: Failed to start the dsp\n", __func__
);
1623 proc_stop(p_proc_object
);
1627 DBC_ENSURE((!status
&& p_proc_object
->proc_state
==
1628 PROC_RUNNING
) || status
);
1633 * ======== proc_stop ========
1635 * Stop a processor running.
1637 int proc_stop(void *hprocessor
)
1640 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1641 struct msg_mgr
*hmsg_mgr
;
1642 struct node_mgr
*hnode_mgr
;
1644 u32 node_tab_size
= 1;
1646 u32 nodes_allocated
= 0;
1649 DBC_REQUIRE(refs
> 0);
1650 if (!p_proc_object
) {
1654 /* check if there are any running nodes */
1655 status
= dev_get_node_manager(p_proc_object
->dev_obj
, &hnode_mgr
);
1656 if (!status
&& hnode_mgr
) {
1657 status
= node_enum_nodes(hnode_mgr
, &hnode
, node_tab_size
,
1658 &num_nodes
, &nodes_allocated
);
1659 if ((status
== -EINVAL
) || (nodes_allocated
> 0)) {
1660 pr_err("%s: Can't stop device, active nodes = %d \n",
1661 __func__
, nodes_allocated
);
1665 /* Call the bridge_brd_stop */
1666 /* It is OK to stop a device that does n't have nodes OR not started */
1668 (*p_proc_object
->intf_fxns
->
1669 brd_stop
) (p_proc_object
->bridge_context
);
1671 dev_dbg(bridge
, "%s: processor in standby mode\n", __func__
);
1672 p_proc_object
->proc_state
= PROC_STOPPED
;
1673 /* Destroy the Node Manager, msg_ctrl Manager */
1674 if (!(dev_destroy2(p_proc_object
->dev_obj
))) {
1675 /* Destroy the msg_ctrl by calling msg_delete */
1676 dev_get_msg_mgr(p_proc_object
->dev_obj
, &hmsg_mgr
);
1678 msg_delete(hmsg_mgr
);
1679 dev_set_msg_mgr(p_proc_object
->dev_obj
, NULL
);
1681 if (!((*p_proc_object
->
1682 intf_fxns
->brd_status
) (p_proc_object
->
1685 DBC_ASSERT(brd_state
== BRD_STOPPED
);
1688 pr_err("%s: Failed to stop the processor\n", __func__
);
1696 * ======== proc_un_map ========
1698 * Removes a MPU buffer mapping from the DSP address space.
1700 int proc_un_map(void *hprocessor
, void *map_addr
,
1701 struct process_context
*pr_ctxt
)
1704 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1705 struct dmm_object
*dmm_mgr
;
1709 va_align
= PG_ALIGN_LOW((u32
) map_addr
, PG_SIZE4K
);
1710 if (!p_proc_object
) {
1715 status
= dmm_get_handle(hprocessor
, &dmm_mgr
);
1721 /* Critical section */
1722 mutex_lock(&proc_lock
);
1724 * Update DMM structures. Get the size to unmap.
1725 * This function returns error if the VA is not mapped
1727 status
= dmm_un_map_memory(dmm_mgr
, (u32
) va_align
, &size_align
);
1728 /* Remove mapping from the page tables. */
1730 status
= (*p_proc_object
->intf_fxns
->brd_mem_un_map
)
1731 (p_proc_object
->bridge_context
, va_align
, size_align
);
1738 * A successful unmap should be followed by removal of map_obj
1739 * from dmm_map_list, so that mapped memory resource tracking
1742 remove_mapping_information(pr_ctxt
, (u32
) map_addr
, size_align
);
1745 mutex_unlock(&proc_lock
);
1748 dev_dbg(bridge
, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
1749 __func__
, hprocessor
, map_addr
, status
);
1754 * ======== proc_un_reserve_memory ========
1756 * Frees a previously reserved region of DSP address space.
1758 int proc_un_reserve_memory(void *hprocessor
, void *prsv_addr
,
1759 struct process_context
*pr_ctxt
)
1761 struct dmm_object
*dmm_mgr
;
1763 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1764 struct dmm_rsv_object
*rsv_obj
;
1766 if (!p_proc_object
) {
1771 status
= dmm_get_handle(p_proc_object
, &dmm_mgr
);
1777 status
= dmm_un_reserve_memory(dmm_mgr
, (u32
) prsv_addr
);
1782 * A successful unreserve should be followed by removal of rsv_obj
1783 * from dmm_rsv_list, so that reserved memory resource tracking
1786 spin_lock(&pr_ctxt
->dmm_rsv_lock
);
1787 list_for_each_entry(rsv_obj
, &pr_ctxt
->dmm_rsv_list
, link
) {
1788 if (rsv_obj
->dsp_reserved_addr
== (u32
) prsv_addr
) {
1789 list_del(&rsv_obj
->link
);
1794 spin_unlock(&pr_ctxt
->dmm_rsv_lock
);
1797 dev_dbg(bridge
, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1798 __func__
, hprocessor
, prsv_addr
, status
);
1803 * ======== = proc_monitor ======== ==
1805 * Place the Processor in Monitor State. This is an internal
1806 * function and a requirement before Processor is loaded.
1807 * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
1808 * In dev_destroy2 we delete the node manager.
1810 * p_proc_object: Pointer to Processor Object
1812 * 0: Processor placed in monitor mode.
1813 * !0: Failed to place processor in monitor mode.
1815 * Valid Processor Handle
1817 * Success: ProcObject state is PROC_IDLE
1819 static int proc_monitor(struct proc_object
*proc_obj
)
1821 int status
= -EPERM
;
1822 struct msg_mgr
*hmsg_mgr
;
1825 DBC_REQUIRE(refs
> 0);
1826 DBC_REQUIRE(proc_obj
);
1828 /* This is needed only when Device is loaded when it is
1829 * already 'ACTIVE' */
1830 /* Destroy the Node Manager, msg_ctrl Manager */
1831 if (!dev_destroy2(proc_obj
->dev_obj
)) {
1832 /* Destroy the msg_ctrl by calling msg_delete */
1833 dev_get_msg_mgr(proc_obj
->dev_obj
, &hmsg_mgr
);
1835 msg_delete(hmsg_mgr
);
1836 dev_set_msg_mgr(proc_obj
->dev_obj
, NULL
);
1839 /* Place the Board in the Monitor State */
1840 if (!((*proc_obj
->intf_fxns
->brd_monitor
)
1841 (proc_obj
->bridge_context
))) {
1843 if (!((*proc_obj
->intf_fxns
->brd_status
)
1844 (proc_obj
->bridge_context
, &brd_state
)))
1845 DBC_ASSERT(brd_state
== BRD_IDLE
);
1848 DBC_ENSURE((!status
&& brd_state
== BRD_IDLE
) ||
1854 * ======== get_envp_count ========
1856 * Return the number of elements in the envp array, including the
1857 * terminating NULL element.
1859 static s32
get_envp_count(char **envp
)
1866 ret
+= 1; /* Include the terminating NULL in the count. */
1873 * ======== prepend_envp ========
1875 * Prepend an environment variable=value pair to the new envp array, and
1876 * copy in the existing var=value pairs in the old envp array.
1878 static char **prepend_envp(char **new_envp
, char **envp
, s32 envp_elems
,
1879 s32 cnew_envp
, char *sz_var
)
1881 char **pp_envp
= new_envp
;
1883 DBC_REQUIRE(new_envp
);
1885 /* Prepend new environ var=value string */
1886 *new_envp
++ = sz_var
;
1888 /* Copy user's environment into our own. */
1889 while (envp_elems
--)
1890 *new_envp
++ = *envp
++;
1892 /* Ensure NULL terminates the new environment strings array. */
1893 if (envp_elems
== 0)
1900 * ======== proc_notify_clients ========
1902 * Notify the processor the events.
1904 int proc_notify_clients(void *proc
, u32 events
)
1907 struct proc_object
*p_proc_object
= (struct proc_object
*)proc
;
1909 DBC_REQUIRE(p_proc_object
);
1910 DBC_REQUIRE(is_valid_proc_event(events
));
1911 DBC_REQUIRE(refs
> 0);
1912 if (!p_proc_object
) {
1917 ntfy_notify(p_proc_object
->ntfy_obj
, events
);
1923 * ======== proc_notify_all_clients ========
1925 * Notify the processor the events. This includes notifying all clients
1926 * attached to a particulat DSP.
1928 int proc_notify_all_clients(void *proc
, u32 events
)
1931 struct proc_object
*p_proc_object
= (struct proc_object
*)proc
;
1933 DBC_REQUIRE(is_valid_proc_event(events
));
1934 DBC_REQUIRE(refs
> 0);
1936 if (!p_proc_object
) {
1941 dev_notify_clients(p_proc_object
->dev_obj
, events
);
1948 * ======== proc_get_processor_id ========
1950 * Retrieves the processor ID.
1952 int proc_get_processor_id(void *proc
, u32
* proc_id
)
1955 struct proc_object
*p_proc_object
= (struct proc_object
*)proc
;
1958 *proc_id
= p_proc_object
->processor_id
;