Avoid beyond bounds copy while caching ACL
[zen-stable.git] / drivers / staging / tidspbridge / rmgr / node.c
blob5dadaa445ad90fd0003ad13f03803fa59b97e86d
1 /*
2 * node.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge Node Manager.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
20 #include <linux/bitmap.h>
21 #include <linux/list.h>
23 /* ----------------------------------- Host OS */
24 #include <dspbridge/host_os.h>
26 /* ----------------------------------- DSP/BIOS Bridge */
27 #include <dspbridge/dbdefs.h>
29 /* ----------------------------------- Trace & Debug */
30 #include <dspbridge/dbc.h>
32 /* ----------------------------------- OS Adaptation Layer */
33 #include <dspbridge/memdefs.h>
34 #include <dspbridge/proc.h>
35 #include <dspbridge/strm.h>
36 #include <dspbridge/sync.h>
37 #include <dspbridge/ntfy.h>
39 /* ----------------------------------- Platform Manager */
40 #include <dspbridge/cmm.h>
41 #include <dspbridge/cod.h>
42 #include <dspbridge/dev.h>
43 #include <dspbridge/msg.h>
45 /* ----------------------------------- Resource Manager */
46 #include <dspbridge/dbdcd.h>
47 #include <dspbridge/disp.h>
48 #include <dspbridge/rms_sh.h>
50 /* ----------------------------------- Link Driver */
51 #include <dspbridge/dspdefs.h>
52 #include <dspbridge/dspioctl.h>
54 /* ----------------------------------- Others */
55 #include <dspbridge/uuidutil.h>
57 /* ----------------------------------- This */
58 #include <dspbridge/nodepriv.h>
59 #include <dspbridge/node.h>
60 #include <dspbridge/dmm.h>
62 /* Static/Dynamic Loader includes */
63 #include <dspbridge/dbll.h>
64 #include <dspbridge/nldr.h>
66 #include <dspbridge/drv.h>
67 #include <dspbridge/resourcecleanup.h>
68 #include <_tiomap.h>
70 #include <dspbridge/dspdeh.h>
72 #define HOSTPREFIX "/host"
73 #define PIPEPREFIX "/dbpipe"
75 #define MAX_INPUTS(h) \
76 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
77 #define MAX_OUTPUTS(h) \
78 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
80 #define NODE_GET_PRIORITY(h) ((h)->prio)
81 #define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
82 #define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
84 #define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
85 #define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
87 #define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
88 #define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
90 #define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
91 #define CREATEPHASE 1
92 #define EXECUTEPHASE 2
93 #define DELETEPHASE 3
95 /* Define default STRM parameters */
97 * TBD: Put in header file, make global DSP_STRMATTRS with defaults,
98 * or make defaults configurable.
100 #define DEFAULTBUFSIZE 32
101 #define DEFAULTNBUFS 2
102 #define DEFAULTSEGID 0
103 #define DEFAULTALIGNMENT 0
104 #define DEFAULTTIMEOUT 10000
106 #define RMSQUERYSERVER 0
107 #define RMSCONFIGURESERVER 1
108 #define RMSCREATENODE 2
109 #define RMSEXECUTENODE 3
110 #define RMSDELETENODE 4
111 #define RMSCHANGENODEPRIORITY 5
112 #define RMSREADMEMORY 6
113 #define RMSWRITEMEMORY 7
114 #define RMSCOPY 8
115 #define MAXTIMEOUT 2000
117 #define NUMRMSFXNS 9
119 #define PWR_TIMEOUT 500 /* default PWR timeout in msec */
121 #define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
124 * ======== node_mgr ========
126 struct node_mgr {
127 struct dev_object *dev_obj; /* Device object */
128 /* Function interface to Bridge driver */
129 struct bridge_drv_interface *intf_fxns;
130 struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
131 struct disp_object *disp_obj; /* Node dispatcher */
132 struct list_head node_list; /* List of all allocated nodes */
133 u32 num_nodes; /* Number of nodes in node_list */
134 u32 num_created; /* Number of nodes *created* on DSP */
135 DECLARE_BITMAP(pipe_map, MAXPIPES); /* Pipe connection bitmap */
136 DECLARE_BITMAP(pipe_done_map, MAXPIPES); /* Pipes that are half free */
137 /* Channel allocation bitmap */
138 DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS);
139 /* DMA Channel allocation bitmap */
140 DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS);
141 /* Zero-Copy Channel alloc bitmap */
142 DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS);
143 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
144 struct mutex node_mgr_lock; /* For critical sections */
145 u32 fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
146 struct msg_mgr *msg_mgr_obj;
148 /* Processor properties needed by Node Dispatcher */
149 u32 num_chnls; /* Total number of channels */
150 u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */
151 u32 chnl_buf_size; /* Buffer size for data to RMS */
152 int proc_family; /* eg, 5000 */
153 int proc_type; /* eg, 5510 */
154 u32 dsp_word_size; /* Size of DSP word on host bytes */
155 u32 dsp_data_mau_size; /* Size of DSP data MAU */
156 u32 dsp_mau_size; /* Size of MAU */
157 s32 min_pri; /* Minimum runtime priority for node */
158 s32 max_pri; /* Maximum runtime priority for node */
160 struct strm_mgr *strm_mgr_obj; /* STRM manager */
162 /* Loader properties */
163 struct nldr_object *nldr_obj; /* Handle to loader */
164 struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
165 bool loader_init; /* Loader Init function succeeded? */
169 * ======== connecttype ========
171 enum connecttype {
172 NOTCONNECTED = 0,
173 NODECONNECT,
174 HOSTCONNECT,
175 DEVICECONNECT,
179 * ======== stream_chnl ========
181 struct stream_chnl {
182 enum connecttype type; /* Type of stream connection */
183 u32 dev_id; /* pipe or channel id */
187 * ======== node_object ========
189 struct node_object {
190 struct list_head list_elem;
191 struct node_mgr *node_mgr; /* The manager of this node */
192 struct proc_object *processor; /* Back pointer to processor */
193 struct dsp_uuid node_uuid; /* Node's ID */
194 s32 prio; /* Node's current priority */
195 u32 timeout; /* Timeout for blocking NODE calls */
196 u32 heap_size; /* Heap Size */
197 u32 dsp_heap_virt_addr; /* Heap Size */
198 u32 gpp_heap_virt_addr; /* Heap Size */
199 enum node_type ntype; /* Type of node: message, task, etc */
200 enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
201 u32 num_inputs; /* Current number of inputs */
202 u32 num_outputs; /* Current number of outputs */
203 u32 max_input_index; /* Current max input stream index */
204 u32 max_output_index; /* Current max output stream index */
205 struct stream_chnl *inputs; /* Node's input streams */
206 struct stream_chnl *outputs; /* Node's output streams */
207 struct node_createargs create_args; /* Args for node create func */
208 nodeenv node_env; /* Environment returned by RMS */
209 struct dcd_genericobj dcd_props; /* Node properties from DCD */
210 struct dsp_cbdata *args; /* Optional args to pass to node */
211 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
212 char *str_dev_name; /* device name, if device node */
213 struct sync_object *sync_done; /* Synchronize node_terminate */
214 s32 exit_status; /* execute function return status */
216 /* Information needed for node_get_attr() */
217 void *device_owner; /* If dev node, task that owns it */
218 u32 num_gpp_inputs; /* Current # of from GPP streams */
219 u32 num_gpp_outputs; /* Current # of to GPP streams */
220 /* Current stream connections */
221 struct dsp_streamconnect *stream_connect;
223 /* Message queue */
224 struct msg_queue *msg_queue_obj;
226 /* These fields used for SM messaging */
227 struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
229 /* Handle to pass to dynamic loader */
230 struct nldr_nodeobject *nldr_node_obj;
231 bool loaded; /* Code is (dynamically) loaded */
232 bool phase_split; /* Phases split in many libs or ovly */
236 /* Default buffer attributes */
237 static struct dsp_bufferattr node_dfltbufattrs = {
238 .cb_struct = 0,
239 .segment_id = 1,
240 .buf_alignment = 0,
243 static void delete_node(struct node_object *hnode,
244 struct process_context *pr_ctxt);
245 static void delete_node_mgr(struct node_mgr *hnode_mgr);
246 static void fill_stream_connect(struct node_object *node1,
247 struct node_object *node2, u32 stream1,
248 u32 stream2);
249 static void fill_stream_def(struct node_object *hnode,
250 struct node_strmdef *pstrm_def,
251 struct dsp_strmattr *pattrs);
252 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
253 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
254 u32 phase);
255 static int get_node_props(struct dcd_manager *hdcd_mgr,
256 struct node_object *hnode,
257 const struct dsp_uuid *node_uuid,
258 struct dcd_genericobj *dcd_prop);
259 static int get_proc_props(struct node_mgr *hnode_mgr,
260 struct dev_object *hdev_obj);
261 static int get_rms_fxns(struct node_mgr *hnode_mgr);
262 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
263 u32 ul_num_bytes, u32 mem_space);
264 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
265 u32 ul_num_bytes, u32 mem_space);
267 static u32 refs; /* module reference count */
269 /* Dynamic loader functions. */
270 static struct node_ldr_fxns nldr_fxns = {
271 nldr_allocate,
272 nldr_create,
273 nldr_delete,
274 nldr_exit,
275 nldr_get_fxn_addr,
276 nldr_init,
277 nldr_load,
278 nldr_unload,
281 enum node_state node_get_state(void *hnode)
283 struct node_object *pnode = (struct node_object *)hnode;
284 if (!pnode)
285 return -1;
286 return pnode->node_state;
290 * ======== node_allocate ========
291 * Purpose:
292 * Allocate GPP resources to manage a node on the DSP.
294 int node_allocate(struct proc_object *hprocessor,
295 const struct dsp_uuid *node_uuid,
296 const struct dsp_cbdata *pargs,
297 const struct dsp_nodeattrin *attr_in,
298 struct node_res_object **noderes,
299 struct process_context *pr_ctxt)
301 struct node_mgr *hnode_mgr;
302 struct dev_object *hdev_obj;
303 struct node_object *pnode = NULL;
304 enum node_type node_type = NODE_TASK;
305 struct node_msgargs *pmsg_args;
306 struct node_taskargs *ptask_args;
307 u32 num_streams;
308 struct bridge_drv_interface *intf_fxns;
309 int status = 0;
310 struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
311 u32 proc_id;
312 u32 pul_value;
313 u32 dynext_base;
314 u32 off_set = 0;
315 u32 ul_stack_seg_addr, ul_stack_seg_val;
316 u32 ul_gpp_mem_base;
317 struct cfg_hostres *host_res;
318 struct bridge_dev_context *pbridge_context;
319 u32 mapped_addr = 0;
320 u32 map_attrs = 0x0;
321 struct dsp_processorstate proc_state;
322 #ifdef DSP_DMM_DEBUG
323 struct dmm_object *dmm_mgr;
324 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
325 #endif
327 void *node_res;
329 DBC_REQUIRE(refs > 0);
330 DBC_REQUIRE(hprocessor != NULL);
331 DBC_REQUIRE(noderes != NULL);
332 DBC_REQUIRE(node_uuid != NULL);
334 *noderes = NULL;
336 status = proc_get_processor_id(hprocessor, &proc_id);
338 if (proc_id != DSP_UNIT)
339 goto func_end;
341 status = proc_get_dev_object(hprocessor, &hdev_obj);
342 if (!status) {
343 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
344 if (hnode_mgr == NULL)
345 status = -EPERM;
349 if (status)
350 goto func_end;
352 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
353 if (!pbridge_context) {
354 status = -EFAULT;
355 goto func_end;
358 status = proc_get_state(hprocessor, &proc_state,
359 sizeof(struct dsp_processorstate));
360 if (status)
361 goto func_end;
362 /* If processor is in error state then don't attempt
363 to send the message */
364 if (proc_state.proc_state == PROC_ERROR) {
365 status = -EPERM;
366 goto func_end;
369 /* Assuming that 0 is not a valid function address */
370 if (hnode_mgr->fxn_addrs[0] == 0) {
371 /* No RMS on target - we currently can't handle this */
372 pr_err("%s: Failed, no RMS in base image\n", __func__);
373 status = -EPERM;
374 } else {
375 /* Validate attr_in fields, if non-NULL */
376 if (attr_in) {
377 /* Check if attr_in->prio is within range */
378 if (attr_in->prio < hnode_mgr->min_pri ||
379 attr_in->prio > hnode_mgr->max_pri)
380 status = -EDOM;
383 /* Allocate node object and fill in */
384 if (status)
385 goto func_end;
387 pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
388 if (pnode == NULL) {
389 status = -ENOMEM;
390 goto func_end;
392 pnode->node_mgr = hnode_mgr;
393 /* This critical section protects get_node_props */
394 mutex_lock(&hnode_mgr->node_mgr_lock);
396 /* Get dsp_ndbprops from node database */
397 status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
398 &(pnode->dcd_props));
399 if (status)
400 goto func_cont;
402 pnode->node_uuid = *node_uuid;
403 pnode->processor = hprocessor;
404 pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
405 pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout;
406 pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
408 /* Currently only C64 DSP builds support Node Dynamic * heaps */
409 /* Allocate memory for node heap */
410 pnode->create_args.asa.task_arg_obj.heap_size = 0;
411 pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0;
412 pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0;
413 pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0;
414 if (!attr_in)
415 goto func_cont;
417 /* Check if we have a user allocated node heap */
418 if (!(attr_in->pgpp_virt_addr))
419 goto func_cont;
421 /* check for page aligned Heap size */
422 if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
423 pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
424 __func__, attr_in->heap_size);
425 status = -EINVAL;
426 } else {
427 pnode->create_args.asa.task_arg_obj.heap_size =
428 attr_in->heap_size;
429 pnode->create_args.asa.task_arg_obj.gpp_heap_addr =
430 (u32) attr_in->pgpp_virt_addr;
432 if (status)
433 goto func_cont;
435 status = proc_reserve_memory(hprocessor,
436 pnode->create_args.asa.task_arg_obj.
437 heap_size + PAGE_SIZE,
438 (void **)&(pnode->create_args.asa.
439 task_arg_obj.dsp_heap_res_addr),
440 pr_ctxt);
441 if (status) {
442 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
443 __func__, status);
444 goto func_cont;
446 #ifdef DSP_DMM_DEBUG
447 status = dmm_get_handle(p_proc_object, &dmm_mgr);
448 if (!dmm_mgr) {
449 status = DSP_EHANDLE;
450 goto func_cont;
453 dmm_mem_map_dump(dmm_mgr);
454 #endif
456 map_attrs |= DSP_MAPLITTLEENDIAN;
457 map_attrs |= DSP_MAPELEMSIZE32;
458 map_attrs |= DSP_MAPVIRTUALADDR;
459 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
460 pnode->create_args.asa.task_arg_obj.heap_size,
461 (void *)pnode->create_args.asa.task_arg_obj.
462 dsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
463 pr_ctxt);
464 if (status)
465 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
466 __func__, status);
467 else
468 pnode->create_args.asa.task_arg_obj.dsp_heap_addr =
469 (u32) mapped_addr;
471 func_cont:
472 mutex_unlock(&hnode_mgr->node_mgr_lock);
473 if (attr_in != NULL) {
474 /* Overrides of NBD properties */
475 pnode->timeout = attr_in->timeout;
476 pnode->prio = attr_in->prio;
478 /* Create object to manage notifications */
479 if (!status) {
480 pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
481 GFP_KERNEL);
482 if (pnode->ntfy_obj)
483 ntfy_init(pnode->ntfy_obj);
484 else
485 status = -ENOMEM;
488 if (!status) {
489 node_type = node_get_type(pnode);
490 /* Allocate dsp_streamconnect array for device, task, and
491 * dais socket nodes. */
492 if (node_type != NODE_MESSAGE) {
493 num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
494 pnode->stream_connect = kzalloc(num_streams *
495 sizeof(struct dsp_streamconnect),
496 GFP_KERNEL);
497 if (num_streams > 0 && pnode->stream_connect == NULL)
498 status = -ENOMEM;
501 if (!status && (node_type == NODE_TASK ||
502 node_type == NODE_DAISSOCKET)) {
503 /* Allocate arrays for maintainig stream connections */
504 pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
505 sizeof(struct stream_chnl), GFP_KERNEL);
506 pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
507 sizeof(struct stream_chnl), GFP_KERNEL);
508 ptask_args = &(pnode->create_args.asa.task_arg_obj);
509 ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
510 sizeof(struct node_strmdef),
511 GFP_KERNEL);
512 ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
513 sizeof(struct node_strmdef),
514 GFP_KERNEL);
515 if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
516 ptask_args->strm_in_def
517 == NULL))
518 || (MAX_OUTPUTS(pnode) > 0
519 && (pnode->outputs == NULL
520 || ptask_args->strm_out_def == NULL)))
521 status = -ENOMEM;
524 if (!status && (node_type != NODE_DEVICE)) {
525 /* Create an event that will be posted when RMS_EXIT is
526 * received. */
527 pnode->sync_done = kzalloc(sizeof(struct sync_object),
528 GFP_KERNEL);
529 if (pnode->sync_done)
530 sync_init_event(pnode->sync_done);
531 else
532 status = -ENOMEM;
534 if (!status) {
535 /*Get the shared mem mgr for this nodes dev object */
536 status = cmm_get_handle(hprocessor, &hcmm_mgr);
537 if (!status) {
538 /* Allocate a SM addr translator for this node
539 * w/ deflt attr */
540 status = cmm_xlator_create(&pnode->xlator,
541 hcmm_mgr, NULL);
544 if (!status) {
545 /* Fill in message args */
546 if ((pargs != NULL) && (pargs->cb_data > 0)) {
547 pmsg_args =
548 &(pnode->create_args.asa.node_msg_args);
549 pmsg_args->pdata = kzalloc(pargs->cb_data,
550 GFP_KERNEL);
551 if (pmsg_args->pdata == NULL) {
552 status = -ENOMEM;
553 } else {
554 pmsg_args->arg_length = pargs->cb_data;
555 memcpy(pmsg_args->pdata,
556 pargs->node_data,
557 pargs->cb_data);
563 if (!status && node_type != NODE_DEVICE) {
564 /* Create a message queue for this node */
565 intf_fxns = hnode_mgr->intf_fxns;
566 status =
567 (*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj,
568 &pnode->msg_queue_obj,
570 pnode->create_args.asa.
571 node_msg_args.max_msgs,
572 pnode);
575 if (!status) {
576 /* Create object for dynamic loading */
578 status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
579 (void *)pnode,
580 &pnode->dcd_props.
581 obj_data.node_obj,
582 &pnode->
583 nldr_node_obj,
584 &pnode->phase_split);
587 /* Compare value read from Node Properties and check if it is same as
588 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
589 * GPP Address, Read the value in that address and override the
590 * stack_seg value in task args */
591 if (!status &&
592 (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
593 stack_seg_name != NULL) {
594 if (strcmp((char *)
595 pnode->dcd_props.obj_data.node_obj.ndb_props.
596 stack_seg_name, STACKSEGLABEL) == 0) {
597 status =
598 hnode_mgr->nldr_fxns.
599 get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
600 &dynext_base);
601 if (status)
602 pr_err("%s: Failed to get addr for DYNEXT_BEG"
603 " status = 0x%x\n", __func__, status);
605 status =
606 hnode_mgr->nldr_fxns.
607 get_fxn_addr(pnode->nldr_node_obj,
608 "L1DSRAM_HEAP", &pul_value);
610 if (status)
611 pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
612 " status = 0x%x\n", __func__, status);
614 host_res = pbridge_context->resources;
615 if (!host_res)
616 status = -EPERM;
618 if (status) {
619 pr_err("%s: Failed to get host resource, status"
620 " = 0x%x\n", __func__, status);
621 goto func_end;
624 ul_gpp_mem_base = (u32) host_res->mem_base[1];
625 off_set = pul_value - dynext_base;
626 ul_stack_seg_addr = ul_gpp_mem_base + off_set;
627 ul_stack_seg_val = readl(ul_stack_seg_addr);
629 dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
630 " 0x%x\n", __func__, ul_stack_seg_val,
631 ul_stack_seg_addr);
633 pnode->create_args.asa.task_arg_obj.stack_seg =
634 ul_stack_seg_val;
639 if (!status) {
640 /* Add the node to the node manager's list of allocated
641 * nodes. */
642 NODE_SET_STATE(pnode, NODE_ALLOCATED);
644 mutex_lock(&hnode_mgr->node_mgr_lock);
646 list_add_tail(&pnode->list_elem, &hnode_mgr->node_list);
647 ++(hnode_mgr->num_nodes);
649 /* Exit critical section */
650 mutex_unlock(&hnode_mgr->node_mgr_lock);
652 /* Preset this to assume phases are split
653 * (for overlay and dll) */
654 pnode->phase_split = true;
656 /* Notify all clients registered for DSP_NODESTATECHANGE. */
657 proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
658 } else {
659 /* Cleanup */
660 if (pnode)
661 delete_node(pnode, pr_ctxt);
665 if (!status) {
666 status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
667 if (status) {
668 delete_node(pnode, pr_ctxt);
669 goto func_end;
672 *noderes = (struct node_res_object *)node_res;
673 drv_proc_node_update_heap_status(node_res, true);
674 drv_proc_node_update_status(node_res, true);
676 DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
677 func_end:
678 dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
679 "node_res: %p status: 0x%x\n", __func__, hprocessor,
680 node_uuid, pargs, attr_in, noderes, status);
681 return status;
685 * ======== node_alloc_msg_buf ========
686 * Purpose:
687 * Allocates buffer for zero copy messaging.
689 DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
690 struct dsp_bufferattr *pattr,
691 u8 **pbuffer)
693 struct node_object *pnode = (struct node_object *)hnode;
694 int status = 0;
695 bool va_flag = false;
696 bool set_info;
697 u32 proc_id;
699 DBC_REQUIRE(refs > 0);
700 DBC_REQUIRE(pbuffer != NULL);
702 DBC_REQUIRE(usize > 0);
704 if (!pnode)
705 status = -EFAULT;
706 else if (node_get_type(pnode) == NODE_DEVICE)
707 status = -EPERM;
709 if (status)
710 goto func_end;
712 if (pattr == NULL)
713 pattr = &node_dfltbufattrs; /* set defaults */
715 status = proc_get_processor_id(pnode->processor, &proc_id);
716 if (proc_id != DSP_UNIT) {
717 DBC_ASSERT(NULL);
718 goto func_end;
720 /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
721 * virt address, so set this info in this node's translator
722 * object for future ref. If MEM_GETVIRTUALSEGID then retrieve
723 * virtual address from node's translator. */
724 if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
725 (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
726 va_flag = true;
727 set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
728 true : false;
729 /* Clear mask bits */
730 pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
731 /* Set/get this node's translators virtual address base/size */
732 status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
733 pattr->segment_id, set_info);
735 if (!status && (!va_flag)) {
736 if (pattr->segment_id != 1) {
737 /* Node supports single SM segment only. */
738 status = -EBADR;
740 /* Arbitrary SM buffer alignment not supported for host side
741 * allocs, but guaranteed for the following alignment
742 * values. */
743 switch (pattr->buf_alignment) {
744 case 0:
745 case 1:
746 case 2:
747 case 4:
748 break;
749 default:
750 /* alignment value not suportted */
751 status = -EPERM;
752 break;
754 if (!status) {
755 /* allocate physical buffer from seg_id in node's
756 * translator */
757 (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
758 usize);
759 if (*pbuffer == NULL) {
760 pr_err("%s: error - Out of shared memory\n",
761 __func__);
762 status = -ENOMEM;
766 func_end:
767 return status;
771 * ======== node_change_priority ========
772 * Purpose:
773 * Change the priority of a node in the allocated state, or that is
774 * currently running or paused on the target.
776 int node_change_priority(struct node_object *hnode, s32 prio)
778 struct node_object *pnode = (struct node_object *)hnode;
779 struct node_mgr *hnode_mgr = NULL;
780 enum node_type node_type;
781 enum node_state state;
782 int status = 0;
783 u32 proc_id;
785 DBC_REQUIRE(refs > 0);
787 if (!hnode || !hnode->node_mgr) {
788 status = -EFAULT;
789 } else {
790 hnode_mgr = hnode->node_mgr;
791 node_type = node_get_type(hnode);
792 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
793 status = -EPERM;
794 else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
795 status = -EDOM;
797 if (status)
798 goto func_end;
800 /* Enter critical section */
801 mutex_lock(&hnode_mgr->node_mgr_lock);
803 state = node_get_state(hnode);
804 if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
805 NODE_SET_PRIORITY(hnode, prio);
806 } else {
807 if (state != NODE_RUNNING) {
808 status = -EBADR;
809 goto func_cont;
811 status = proc_get_processor_id(pnode->processor, &proc_id);
812 if (proc_id == DSP_UNIT) {
813 status =
814 disp_node_change_priority(hnode_mgr->disp_obj,
815 hnode,
816 hnode_mgr->fxn_addrs
817 [RMSCHANGENODEPRIORITY],
818 hnode->node_env, prio);
820 if (status >= 0)
821 NODE_SET_PRIORITY(hnode, prio);
824 func_cont:
825 /* Leave critical section */
826 mutex_unlock(&hnode_mgr->node_mgr_lock);
827 func_end:
828 return status;
832 * ======== node_connect ========
833 * Purpose:
834 * Connect two nodes on the DSP, or a node on the DSP to the GPP.
836 int node_connect(struct node_object *node1, u32 stream1,
837 struct node_object *node2,
838 u32 stream2, struct dsp_strmattr *pattrs,
839 struct dsp_cbdata *conn_param)
841 struct node_mgr *hnode_mgr;
842 char *pstr_dev_name = NULL;
843 enum node_type node1_type = NODE_TASK;
844 enum node_type node2_type = NODE_TASK;
845 enum dsp_strmmode strm_mode;
846 struct node_strmdef *pstrm_def;
847 struct node_strmdef *input = NULL;
848 struct node_strmdef *output = NULL;
849 struct node_object *dev_node_obj;
850 struct node_object *hnode;
851 struct stream_chnl *pstream;
852 u32 pipe_id;
853 u32 chnl_id;
854 s8 chnl_mode;
855 u32 dw_length;
856 int status = 0;
857 DBC_REQUIRE(refs > 0);
859 if (!node1 || !node2)
860 return -EFAULT;
862 /* The two nodes must be on the same processor */
863 if (node1 != (struct node_object *)DSP_HGPPNODE &&
864 node2 != (struct node_object *)DSP_HGPPNODE &&
865 node1->node_mgr != node2->node_mgr)
866 return -EPERM;
868 /* Cannot connect a node to itself */
869 if (node1 == node2)
870 return -EPERM;
872 /* node_get_type() will return NODE_GPP if hnode = DSP_HGPPNODE. */
873 node1_type = node_get_type(node1);
874 node2_type = node_get_type(node2);
875 /* Check stream indices ranges */
876 if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
877 stream1 >= MAX_OUTPUTS(node1)) ||
878 (node2_type != NODE_GPP && node2_type != NODE_DEVICE &&
879 stream2 >= MAX_INPUTS(node2)))
880 return -EINVAL;
883 * Only the following types of connections are allowed:
884 * task/dais socket < == > task/dais socket
885 * task/dais socket < == > device
886 * task/dais socket < == > GPP
888 * ie, no message nodes, and at least one task or dais
889 * socket node.
891 if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
892 (node1_type != NODE_TASK &&
893 node1_type != NODE_DAISSOCKET &&
894 node2_type != NODE_TASK &&
895 node2_type != NODE_DAISSOCKET))
896 return -EPERM;
898 * Check stream mode. Default is STRMMODE_PROCCOPY.
900 if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
901 return -EPERM; /* illegal stream mode */
903 if (node1_type != NODE_GPP) {
904 hnode_mgr = node1->node_mgr;
905 } else {
906 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
907 hnode_mgr = node2->node_mgr;
910 /* Enter critical section */
911 mutex_lock(&hnode_mgr->node_mgr_lock);
913 /* Nodes must be in the allocated state */
914 if (node1_type != NODE_GPP &&
915 node_get_state(node1) != NODE_ALLOCATED) {
916 status = -EBADR;
917 goto out_unlock;
920 if (node2_type != NODE_GPP &&
921 node_get_state(node2) != NODE_ALLOCATED) {
922 status = -EBADR;
923 goto out_unlock;
927 * Check that stream indices for task and dais socket nodes
928 * are not already be used. (Device nodes checked later)
930 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
931 output = &(node1->create_args.asa.
932 task_arg_obj.strm_out_def[stream1]);
933 if (output->sz_device) {
934 status = -EISCONN;
935 goto out_unlock;
939 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
940 input = &(node2->create_args.asa.
941 task_arg_obj.strm_in_def[stream2]);
942 if (input->sz_device) {
943 status = -EISCONN;
944 goto out_unlock;
948 /* Connecting two task nodes? */
949 if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) &&
950 (node2_type == NODE_TASK ||
951 node2_type == NODE_DAISSOCKET)) {
952 /* Find available pipe */
953 pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES);
954 if (pipe_id == MAXPIPES) {
955 status = -ECONNREFUSED;
956 goto out_unlock;
958 set_bit(pipe_id, hnode_mgr->pipe_map);
959 node1->outputs[stream1].type = NODECONNECT;
960 node2->inputs[stream2].type = NODECONNECT;
961 node1->outputs[stream1].dev_id = pipe_id;
962 node2->inputs[stream2].dev_id = pipe_id;
963 output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
964 input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
965 if (!output->sz_device || !input->sz_device) {
966 /* Undo the connection */
967 kfree(output->sz_device);
968 kfree(input->sz_device);
969 clear_bit(pipe_id, hnode_mgr->pipe_map);
970 status = -ENOMEM;
971 goto out_unlock;
973 /* Copy "/dbpipe<pipId>" name to device names */
974 sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id);
975 strcpy(input->sz_device, output->sz_device);
977 /* Connecting task node to host? */
978 if (node1_type == NODE_GPP || node2_type == NODE_GPP) {
979 pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
980 if (!pstr_dev_name) {
981 status = -ENOMEM;
982 goto out_unlock;
985 DBC_ASSERT((node1_type == NODE_GPP) ||
986 (node2_type == NODE_GPP));
988 chnl_mode = (node1_type == NODE_GPP) ?
989 CHNL_MODETODSP : CHNL_MODEFROMDSP;
992 * Reserve a channel id. We need to put the name "/host<id>"
993 * in the node's create_args, but the host
994 * side channel will not be opened until DSPStream_Open is
995 * called for this node.
997 strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY;
998 switch (strm_mode) {
999 case STRMMODE_RDMA:
1000 chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map,
1001 CHNL_MAXCHANNELS);
1002 if (chnl_id < CHNL_MAXCHANNELS) {
1003 set_bit(chnl_id, hnode_mgr->dma_chnl_map);
1004 /* dma chans are 2nd transport chnl set
1005 * ids(e.g. 16-31) */
1006 chnl_id = chnl_id + hnode_mgr->num_chnls;
1008 break;
1009 case STRMMODE_ZEROCOPY:
1010 chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map,
1011 CHNL_MAXCHANNELS);
1012 if (chnl_id < CHNL_MAXCHANNELS) {
1013 set_bit(chnl_id, hnode_mgr->zc_chnl_map);
1014 /* zero-copy chans are 3nd transport set
1015 * (e.g. 32-47) */
1016 chnl_id = chnl_id +
1017 (2 * hnode_mgr->num_chnls);
1019 break;
1020 case STRMMODE_PROCCOPY:
1021 chnl_id = find_first_zero_bit(hnode_mgr->chnl_map,
1022 CHNL_MAXCHANNELS);
1023 if (chnl_id < CHNL_MAXCHANNELS)
1024 set_bit(chnl_id, hnode_mgr->chnl_map);
1025 break;
1026 default:
1027 status = -EINVAL;
1028 goto out_unlock;
1030 if (chnl_id == CHNL_MAXCHANNELS) {
1031 status = -ECONNREFUSED;
1032 goto out_unlock;
1035 if (node1 == (struct node_object *)DSP_HGPPNODE) {
1036 node2->inputs[stream2].type = HOSTCONNECT;
1037 node2->inputs[stream2].dev_id = chnl_id;
1038 input->sz_device = pstr_dev_name;
1039 } else {
1040 node1->outputs[stream1].type = HOSTCONNECT;
1041 node1->outputs[stream1].dev_id = chnl_id;
1042 output->sz_device = pstr_dev_name;
1044 sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1046 /* Connecting task node to device node? */
1047 if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) {
1048 if (node2_type == NODE_DEVICE) {
1049 /* node1 == > device */
1050 dev_node_obj = node2;
1051 hnode = node1;
1052 pstream = &(node1->outputs[stream1]);
1053 pstrm_def = output;
1054 } else {
1055 /* device == > node2 */
1056 dev_node_obj = node1;
1057 hnode = node2;
1058 pstream = &(node2->inputs[stream2]);
1059 pstrm_def = input;
1061 /* Set up create args */
1062 pstream->type = DEVICECONNECT;
1063 dw_length = strlen(dev_node_obj->str_dev_name);
1064 if (conn_param)
1065 pstrm_def->sz_device = kzalloc(dw_length + 1 +
1066 conn_param->cb_data,
1067 GFP_KERNEL);
1068 else
1069 pstrm_def->sz_device = kzalloc(dw_length + 1,
1070 GFP_KERNEL);
1071 if (!pstrm_def->sz_device) {
1072 status = -ENOMEM;
1073 goto out_unlock;
1075 /* Copy device name */
1076 strncpy(pstrm_def->sz_device,
1077 dev_node_obj->str_dev_name, dw_length);
1078 if (conn_param)
1079 strncat(pstrm_def->sz_device,
1080 (char *)conn_param->node_data,
1081 (u32) conn_param->cb_data);
1082 dev_node_obj->device_owner = hnode;
1084 /* Fill in create args */
1085 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1086 node1->create_args.asa.task_arg_obj.num_outputs++;
1087 fill_stream_def(node1, output, pattrs);
1089 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1090 node2->create_args.asa.task_arg_obj.num_inputs++;
1091 fill_stream_def(node2, input, pattrs);
1093 /* Update node1 and node2 stream_connect */
1094 if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1095 node1->num_outputs++;
1096 if (stream1 > node1->max_output_index)
1097 node1->max_output_index = stream1;
1100 if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1101 node2->num_inputs++;
1102 if (stream2 > node2->max_input_index)
1103 node2->max_input_index = stream2;
1106 fill_stream_connect(node1, node2, stream1, stream2);
1107 /* end of sync_enter_cs */
1108 /* Exit critical section */
1109 out_unlock:
1110 if (status && pstr_dev_name)
1111 kfree(pstr_dev_name);
1112 mutex_unlock(&hnode_mgr->node_mgr_lock);
1113 dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1114 "pattrs: %p status: 0x%x\n", __func__, node1,
1115 stream1, node2, stream2, pattrs, status);
1116 return status;
1120 * ======== node_create ========
1121 * Purpose:
1122 * Create a node on the DSP by remotely calling the node's create function.
1124 int node_create(struct node_object *hnode)
1126 struct node_object *pnode = (struct node_object *)hnode;
1127 struct node_mgr *hnode_mgr;
1128 struct bridge_drv_interface *intf_fxns;
1129 u32 ul_create_fxn;
1130 enum node_type node_type;
1131 int status = 0;
1132 int status1 = 0;
1133 struct dsp_cbdata cb_data;
1134 u32 proc_id = 255;
1135 struct dsp_processorstate proc_state;
1136 struct proc_object *hprocessor;
1137 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1138 struct dspbridge_platform_data *pdata =
1139 omap_dspbridge_dev->dev.platform_data;
1140 #endif
1142 DBC_REQUIRE(refs > 0);
1143 if (!pnode) {
1144 status = -EFAULT;
1145 goto func_end;
1147 hprocessor = hnode->processor;
1148 status = proc_get_state(hprocessor, &proc_state,
1149 sizeof(struct dsp_processorstate));
1150 if (status)
1151 goto func_end;
1152 /* If processor is in error state then don't attempt to create
1153 new node */
1154 if (proc_state.proc_state == PROC_ERROR) {
1155 status = -EPERM;
1156 goto func_end;
1158 /* create struct dsp_cbdata struct for PWR calls */
1159 cb_data.cb_data = PWR_TIMEOUT;
1160 node_type = node_get_type(hnode);
1161 hnode_mgr = hnode->node_mgr;
1162 intf_fxns = hnode_mgr->intf_fxns;
1163 /* Get access to node dispatcher */
1164 mutex_lock(&hnode_mgr->node_mgr_lock);
1166 /* Check node state */
1167 if (node_get_state(hnode) != NODE_ALLOCATED)
1168 status = -EBADR;
1170 if (!status)
1171 status = proc_get_processor_id(pnode->processor, &proc_id);
1173 if (status)
1174 goto func_cont2;
1176 if (proc_id != DSP_UNIT)
1177 goto func_cont2;
1179 /* Make sure streams are properly connected */
1180 if ((hnode->num_inputs && hnode->max_input_index >
1181 hnode->num_inputs - 1) ||
1182 (hnode->num_outputs && hnode->max_output_index >
1183 hnode->num_outputs - 1))
1184 status = -ENOTCONN;
1186 if (!status) {
1187 /* If node's create function is not loaded, load it */
1188 /* Boost the OPP level to max level that DSP can be requested */
1189 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1190 if (pdata->cpu_set_freq)
1191 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1192 #endif
1193 status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
1194 NLDR_CREATE);
1195 /* Get address of node's create function */
1196 if (!status) {
1197 hnode->loaded = true;
1198 if (node_type != NODE_DEVICE) {
1199 status = get_fxn_address(hnode, &ul_create_fxn,
1200 CREATEPHASE);
1202 } else {
1203 pr_err("%s: failed to load create code: 0x%x\n",
1204 __func__, status);
1206 /* Request the lowest OPP level */
1207 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1208 if (pdata->cpu_set_freq)
1209 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1210 #endif
1211 /* Get address of iAlg functions, if socket node */
1212 if (!status) {
1213 if (node_type == NODE_DAISSOCKET) {
1214 status = hnode_mgr->nldr_fxns.get_fxn_addr
1215 (hnode->nldr_node_obj,
1216 hnode->dcd_props.obj_data.node_obj.
1217 str_i_alg_name,
1218 &hnode->create_args.asa.
1219 task_arg_obj.dais_arg);
1223 if (!status) {
1224 if (node_type != NODE_DEVICE) {
1225 status = disp_node_create(hnode_mgr->disp_obj, hnode,
1226 hnode_mgr->fxn_addrs
1227 [RMSCREATENODE],
1228 ul_create_fxn,
1229 &(hnode->create_args),
1230 &(hnode->node_env));
1231 if (status >= 0) {
1232 /* Set the message queue id to the node env
1233 * pointer */
1234 intf_fxns = hnode_mgr->intf_fxns;
1235 (*intf_fxns->msg_set_queue_id) (hnode->
1236 msg_queue_obj,
1237 hnode->node_env);
1241 /* Phase II/Overlays: Create, execute, delete phases possibly in
1242 * different files/sections. */
1243 if (hnode->loaded && hnode->phase_split) {
1244 /* If create code was dynamically loaded, we can now unload
1245 * it. */
1246 status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj,
1247 NLDR_CREATE);
1248 hnode->loaded = false;
1250 if (status1)
1251 pr_err("%s: Failed to unload create code: 0x%x\n",
1252 __func__, status1);
1253 func_cont2:
1254 /* Update node state and node manager state */
1255 if (status >= 0) {
1256 NODE_SET_STATE(hnode, NODE_CREATED);
1257 hnode_mgr->num_created++;
1258 goto func_cont;
1260 if (status != -EBADR) {
1261 /* Put back in NODE_ALLOCATED state if error occurred */
1262 NODE_SET_STATE(hnode, NODE_ALLOCATED);
1264 func_cont:
1265 /* Free access to node dispatcher */
1266 mutex_unlock(&hnode_mgr->node_mgr_lock);
1267 func_end:
1268 if (status >= 0) {
1269 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1270 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1273 dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1274 hnode, status);
1275 return status;
1279 * ======== node_create_mgr ========
1280 * Purpose:
1281 * Create a NODE Manager object.
1283 int node_create_mgr(struct node_mgr **node_man,
1284 struct dev_object *hdev_obj)
1286 u32 i;
1287 struct node_mgr *node_mgr_obj = NULL;
1288 struct disp_attr disp_attr_obj;
1289 char *sz_zl_file = "";
1290 struct nldr_attrs nldr_attrs_obj;
1291 int status = 0;
1292 u8 dev_type;
1294 DBC_REQUIRE(refs > 0);
1295 DBC_REQUIRE(node_man != NULL);
1296 DBC_REQUIRE(hdev_obj != NULL);
1298 *node_man = NULL;
1299 /* Allocate Node manager object */
1300 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1301 if (!node_mgr_obj)
1302 return -ENOMEM;
1304 node_mgr_obj->dev_obj = hdev_obj;
1306 node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
1307 GFP_KERNEL);
1308 if (!node_mgr_obj->ntfy_obj) {
1309 status = -ENOMEM;
1310 goto out_err;
1312 ntfy_init(node_mgr_obj->ntfy_obj);
1314 INIT_LIST_HEAD(&node_mgr_obj->node_list);
1316 dev_get_dev_type(hdev_obj, &dev_type);
1318 status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
1319 if (status)
1320 goto out_err;
1322 status = get_proc_props(node_mgr_obj, hdev_obj);
1323 if (status)
1324 goto out_err;
1326 /* Create NODE Dispatcher */
1327 disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset;
1328 disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size;
1329 disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1330 disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1332 status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj);
1333 if (status)
1334 goto out_err;
1336 /* Create a STRM Manager */
1337 status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1338 if (status)
1339 goto out_err;
1341 dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1342 /* Get msg_ctrl queue manager */
1343 dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1344 mutex_init(&node_mgr_obj->node_mgr_lock);
1346 /* Block out reserved channels */
1347 for (i = 0; i < node_mgr_obj->chnl_offset; i++)
1348 set_bit(i, node_mgr_obj->chnl_map);
1350 /* Block out channels reserved for RMS */
1351 set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map);
1352 set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map);
1354 /* NO RM Server on the IVA */
1355 if (dev_type != IVA_UNIT) {
1356 /* Get addresses of any RMS functions loaded */
1357 status = get_rms_fxns(node_mgr_obj);
1358 if (status)
1359 goto out_err;
1362 /* Get loader functions and create loader */
1363 node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
1365 nldr_attrs_obj.ovly = ovly;
1366 nldr_attrs_obj.write = mem_write;
1367 nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
1368 nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
1369 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.init();
1370 status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
1371 hdev_obj,
1372 &nldr_attrs_obj);
1373 if (status)
1374 goto out_err;
1376 *node_man = node_mgr_obj;
1378 DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
1380 return status;
1381 out_err:
1382 delete_node_mgr(node_mgr_obj);
1383 return status;
1387 * ======== node_delete ========
1388 * Purpose:
1389 * Delete a node on the DSP by remotely calling the node's delete function.
1390 * Loads the node's delete function if necessary. Free GPP side resources
1391 * after node's delete function returns.
1393 int node_delete(struct node_res_object *noderes,
1394 struct process_context *pr_ctxt)
1396 struct node_object *pnode = noderes->node;
1397 struct node_mgr *hnode_mgr;
1398 struct proc_object *hprocessor;
1399 struct disp_object *disp_obj;
1400 u32 ul_delete_fxn;
1401 enum node_type node_type;
1402 enum node_state state;
1403 int status = 0;
1404 int status1 = 0;
1405 struct dsp_cbdata cb_data;
1406 u32 proc_id;
1407 struct bridge_drv_interface *intf_fxns;
1409 void *node_res = noderes;
1411 struct dsp_processorstate proc_state;
1412 DBC_REQUIRE(refs > 0);
1414 if (!pnode) {
1415 status = -EFAULT;
1416 goto func_end;
1418 /* create struct dsp_cbdata struct for PWR call */
1419 cb_data.cb_data = PWR_TIMEOUT;
1420 hnode_mgr = pnode->node_mgr;
1421 hprocessor = pnode->processor;
1422 disp_obj = hnode_mgr->disp_obj;
1423 node_type = node_get_type(pnode);
1424 intf_fxns = hnode_mgr->intf_fxns;
1425 /* Enter critical section */
1426 mutex_lock(&hnode_mgr->node_mgr_lock);
1428 state = node_get_state(pnode);
1429 /* Execute delete phase code for non-device node in all cases
1430 * except when the node was only allocated. Delete phase must be
1431 * executed even if create phase was executed, but failed.
1432 * If the node environment pointer is non-NULL, the delete phase
1433 * code must be executed. */
1434 if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1435 node_type != NODE_DEVICE) {
1436 status = proc_get_processor_id(pnode->processor, &proc_id);
1437 if (status)
1438 goto func_cont1;
1440 if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1441 /* If node has terminated, execute phase code will
1442 * have already been unloaded in node_on_exit(). If the
1443 * node is PAUSED, the execute phase is loaded, and it
1444 * is now ok to unload it. If the node is running, we
1445 * will unload the execute phase only after deleting
1446 * the node. */
1447 if (state == NODE_PAUSED && pnode->loaded &&
1448 pnode->phase_split) {
1449 /* Ok to unload execute code as long as node
1450 * is not * running */
1451 status1 =
1452 hnode_mgr->nldr_fxns.
1453 unload(pnode->nldr_node_obj,
1454 NLDR_EXECUTE);
1455 pnode->loaded = false;
1456 NODE_SET_STATE(pnode, NODE_DONE);
1458 /* Load delete phase code if not loaded or if haven't
1459 * * unloaded EXECUTE phase */
1460 if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
1461 pnode->phase_split) {
1462 status =
1463 hnode_mgr->nldr_fxns.
1464 load(pnode->nldr_node_obj, NLDR_DELETE);
1465 if (!status)
1466 pnode->loaded = true;
1467 else
1468 pr_err("%s: fail - load delete code:"
1469 " 0x%x\n", __func__, status);
1472 func_cont1:
1473 if (!status) {
1474 /* Unblock a thread trying to terminate the node */
1475 (void)sync_set_event(pnode->sync_done);
1476 if (proc_id == DSP_UNIT) {
1477 /* ul_delete_fxn = address of node's delete
1478 * function */
1479 status = get_fxn_address(pnode, &ul_delete_fxn,
1480 DELETEPHASE);
1481 } else if (proc_id == IVA_UNIT)
1482 ul_delete_fxn = (u32) pnode->node_env;
1483 if (!status) {
1484 status = proc_get_state(hprocessor,
1485 &proc_state,
1486 sizeof(struct
1487 dsp_processorstate));
1488 if (proc_state.proc_state != PROC_ERROR) {
1489 status =
1490 disp_node_delete(disp_obj, pnode,
1491 hnode_mgr->
1492 fxn_addrs
1493 [RMSDELETENODE],
1494 ul_delete_fxn,
1495 pnode->node_env);
1496 } else
1497 NODE_SET_STATE(pnode, NODE_DONE);
1499 /* Unload execute, if not unloaded, and delete
1500 * function */
1501 if (state == NODE_RUNNING &&
1502 pnode->phase_split) {
1503 status1 =
1504 hnode_mgr->nldr_fxns.
1505 unload(pnode->nldr_node_obj,
1506 NLDR_EXECUTE);
1508 if (status1)
1509 pr_err("%s: fail - unload execute code:"
1510 " 0x%x\n", __func__, status1);
1512 status1 =
1513 hnode_mgr->nldr_fxns.unload(pnode->
1514 nldr_node_obj,
1515 NLDR_DELETE);
1516 pnode->loaded = false;
1517 if (status1)
1518 pr_err("%s: fail - unload delete code: "
1519 "0x%x\n", __func__, status1);
1523 /* Free host side resources even if a failure occurred */
1524 /* Remove node from hnode_mgr->node_list */
1525 list_del(&pnode->list_elem);
1526 hnode_mgr->num_nodes--;
1527 /* Decrement count of nodes created on DSP */
1528 if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1529 (pnode->node_env != (u32) NULL)))
1530 hnode_mgr->num_created--;
1531 /* Free host-side resources allocated by node_create()
1532 * delete_node() fails if SM buffers not freed by client! */
1533 drv_proc_node_update_status(node_res, false);
1534 delete_node(pnode, pr_ctxt);
1537 * Release all Node resources and its context
1539 idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
1540 kfree(node_res);
1542 /* Exit critical section */
1543 mutex_unlock(&hnode_mgr->node_mgr_lock);
1544 proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1545 func_end:
1546 dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
1547 return status;
1551 * ======== node_delete_mgr ========
1552 * Purpose:
1553 * Delete the NODE Manager.
1555 int node_delete_mgr(struct node_mgr *hnode_mgr)
1557 DBC_REQUIRE(refs > 0);
1559 if (!hnode_mgr)
1560 return -EFAULT;
1562 delete_node_mgr(hnode_mgr);
1564 return 0;
1568 * ======== node_enum_nodes ========
1569 * Purpose:
1570 * Enumerate currently allocated nodes.
1572 int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1573 u32 node_tab_size, u32 *pu_num_nodes,
1574 u32 *pu_allocated)
1576 struct node_object *hnode;
1577 u32 i = 0;
1578 int status = 0;
1579 DBC_REQUIRE(refs > 0);
1580 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
1581 DBC_REQUIRE(pu_num_nodes != NULL);
1582 DBC_REQUIRE(pu_allocated != NULL);
1584 if (!hnode_mgr) {
1585 status = -EFAULT;
1586 goto func_end;
1588 /* Enter critical section */
1589 mutex_lock(&hnode_mgr->node_mgr_lock);
1591 if (hnode_mgr->num_nodes > node_tab_size) {
1592 *pu_allocated = hnode_mgr->num_nodes;
1593 *pu_num_nodes = 0;
1594 status = -EINVAL;
1595 } else {
1596 list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem)
1597 node_tab[i++] = hnode;
1598 *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1600 /* end of sync_enter_cs */
1601 /* Exit critical section */
1602 mutex_unlock(&hnode_mgr->node_mgr_lock);
1603 func_end:
1604 return status;
1608 * ======== node_exit ========
1609 * Purpose:
1610 * Discontinue usage of NODE module.
1612 void node_exit(void)
1614 DBC_REQUIRE(refs > 0);
1616 refs--;
1618 DBC_ENSURE(refs >= 0);
1622 * ======== node_free_msg_buf ========
1623 * Purpose:
1624 * Frees the message buffer.
1626 int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1627 struct dsp_bufferattr *pattr)
1629 struct node_object *pnode = (struct node_object *)hnode;
1630 int status = 0;
1631 u32 proc_id;
1632 DBC_REQUIRE(refs > 0);
1633 DBC_REQUIRE(pbuffer != NULL);
1634 DBC_REQUIRE(pnode != NULL);
1635 DBC_REQUIRE(pnode->xlator != NULL);
1637 if (!hnode) {
1638 status = -EFAULT;
1639 goto func_end;
1641 status = proc_get_processor_id(pnode->processor, &proc_id);
1642 if (proc_id == DSP_UNIT) {
1643 if (!status) {
1644 if (pattr == NULL) {
1645 /* set defaults */
1646 pattr = &node_dfltbufattrs;
1648 /* Node supports single SM segment only */
1649 if (pattr->segment_id != 1)
1650 status = -EBADR;
1652 /* pbuffer is clients Va. */
1653 status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1655 } else {
1656 DBC_ASSERT(NULL); /* BUG */
1658 func_end:
1659 return status;
1663 * ======== node_get_attr ========
1664 * Purpose:
1665 * Copy the current attributes of the specified node into a dsp_nodeattr
1666 * structure.
1668 int node_get_attr(struct node_object *hnode,
1669 struct dsp_nodeattr *pattr, u32 attr_size)
1671 struct node_mgr *hnode_mgr;
1672 DBC_REQUIRE(refs > 0);
1673 DBC_REQUIRE(pattr != NULL);
1674 DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
1676 if (!hnode)
1677 return -EFAULT;
1679 hnode_mgr = hnode->node_mgr;
1680 /* Enter hnode_mgr critical section (since we're accessing
1681 * data that could be changed by node_change_priority() and
1682 * node_connect(). */
1683 mutex_lock(&hnode_mgr->node_mgr_lock);
1684 pattr->cb_struct = sizeof(struct dsp_nodeattr);
1685 /* dsp_nodeattrin */
1686 pattr->in_node_attr_in.cb_struct =
1687 sizeof(struct dsp_nodeattrin);
1688 pattr->in_node_attr_in.prio = hnode->prio;
1689 pattr->in_node_attr_in.timeout = hnode->timeout;
1690 pattr->in_node_attr_in.heap_size =
1691 hnode->create_args.asa.task_arg_obj.heap_size;
1692 pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1693 hnode->create_args.asa.task_arg_obj.gpp_heap_addr;
1694 pattr->node_attr_inputs = hnode->num_gpp_inputs;
1695 pattr->node_attr_outputs = hnode->num_gpp_outputs;
1696 /* dsp_nodeinfo */
1697 get_node_info(hnode, &(pattr->node_info));
1698 /* end of sync_enter_cs */
1699 /* Exit critical section */
1700 mutex_unlock(&hnode_mgr->node_mgr_lock);
1702 return 0;
1706 * ======== node_get_channel_id ========
1707 * Purpose:
1708 * Get the channel index reserved for a stream connection between the
1709 * host and a node.
1711 int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1712 u32 *chan_id)
1714 enum node_type node_type;
1715 int status = -EINVAL;
1716 DBC_REQUIRE(refs > 0);
1717 DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
1718 DBC_REQUIRE(chan_id != NULL);
1720 if (!hnode) {
1721 status = -EFAULT;
1722 return status;
1724 node_type = node_get_type(hnode);
1725 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1726 status = -EPERM;
1727 return status;
1729 if (dir == DSP_TONODE) {
1730 if (index < MAX_INPUTS(hnode)) {
1731 if (hnode->inputs[index].type == HOSTCONNECT) {
1732 *chan_id = hnode->inputs[index].dev_id;
1733 status = 0;
1736 } else {
1737 DBC_ASSERT(dir == DSP_FROMNODE);
1738 if (index < MAX_OUTPUTS(hnode)) {
1739 if (hnode->outputs[index].type == HOSTCONNECT) {
1740 *chan_id = hnode->outputs[index].dev_id;
1741 status = 0;
1745 return status;
1749 * ======== node_get_message ========
1750 * Purpose:
1751 * Retrieve a message from a node on the DSP.
1753 int node_get_message(struct node_object *hnode,
1754 struct dsp_msg *message, u32 utimeout)
1756 struct node_mgr *hnode_mgr;
1757 enum node_type node_type;
1758 struct bridge_drv_interface *intf_fxns;
1759 int status = 0;
1760 void *tmp_buf;
1761 struct dsp_processorstate proc_state;
1762 struct proc_object *hprocessor;
1764 DBC_REQUIRE(refs > 0);
1765 DBC_REQUIRE(message != NULL);
1767 if (!hnode) {
1768 status = -EFAULT;
1769 goto func_end;
1771 hprocessor = hnode->processor;
1772 status = proc_get_state(hprocessor, &proc_state,
1773 sizeof(struct dsp_processorstate));
1774 if (status)
1775 goto func_end;
1776 /* If processor is in error state then don't attempt to get the
1777 message */
1778 if (proc_state.proc_state == PROC_ERROR) {
1779 status = -EPERM;
1780 goto func_end;
1782 hnode_mgr = hnode->node_mgr;
1783 node_type = node_get_type(hnode);
1784 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1785 node_type != NODE_DAISSOCKET) {
1786 status = -EPERM;
1787 goto func_end;
1789 /* This function will block unless a message is available. Since
1790 * DSPNode_RegisterNotify() allows notification when a message
1791 * is available, the system can be designed so that
1792 * DSPNode_GetMessage() is only called when a message is
1793 * available. */
1794 intf_fxns = hnode_mgr->intf_fxns;
1795 status =
1796 (*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout);
1797 /* Check if message contains SM descriptor */
1798 if (status || !(message->cmd & DSP_RMSBUFDESC))
1799 goto func_end;
1801 /* Translate DSP byte addr to GPP Va. */
1802 tmp_buf = cmm_xlator_translate(hnode->xlator,
1803 (void *)(message->arg1 *
1804 hnode->node_mgr->
1805 dsp_word_size), CMM_DSPPA2PA);
1806 if (tmp_buf != NULL) {
1807 /* now convert this GPP Pa to Va */
1808 tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1809 CMM_PA2VA);
1810 if (tmp_buf != NULL) {
1811 /* Adjust SM size in msg */
1812 message->arg1 = (u32) tmp_buf;
1813 message->arg2 *= hnode->node_mgr->dsp_word_size;
1814 } else {
1815 status = -ESRCH;
1817 } else {
1818 status = -ESRCH;
1820 func_end:
1821 dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
1822 hnode, message, utimeout);
1823 return status;
1827 * ======== node_get_nldr_obj ========
1829 int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1830 struct nldr_object **nldr_ovlyobj)
1832 int status = 0;
1833 struct node_mgr *node_mgr_obj = hnode_mgr;
1834 DBC_REQUIRE(nldr_ovlyobj != NULL);
1836 if (!hnode_mgr)
1837 status = -EFAULT;
1838 else
1839 *nldr_ovlyobj = node_mgr_obj->nldr_obj;
1841 DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
1842 return status;
1846 * ======== node_get_strm_mgr ========
1847 * Purpose:
1848 * Returns the Stream manager.
1850 int node_get_strm_mgr(struct node_object *hnode,
1851 struct strm_mgr **strm_man)
1853 int status = 0;
1855 DBC_REQUIRE(refs > 0);
1857 if (!hnode)
1858 status = -EFAULT;
1859 else
1860 *strm_man = hnode->node_mgr->strm_mgr_obj;
1862 return status;
1866 * ======== node_get_load_type ========
1868 enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1870 DBC_REQUIRE(refs > 0);
1871 DBC_REQUIRE(hnode);
1872 if (!hnode) {
1873 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1874 return -1;
1875 } else {
1876 return hnode->dcd_props.obj_data.node_obj.load_type;
1881 * ======== node_get_timeout ========
1882 * Purpose:
1883 * Returns the timeout value for this node.
1885 u32 node_get_timeout(struct node_object *hnode)
1887 DBC_REQUIRE(refs > 0);
1888 DBC_REQUIRE(hnode);
1889 if (!hnode) {
1890 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1891 return 0;
1892 } else {
1893 return hnode->timeout;
1898 * ======== node_get_type ========
1899 * Purpose:
1900 * Returns the node type.
1902 enum node_type node_get_type(struct node_object *hnode)
1904 enum node_type node_type;
1906 if (hnode == (struct node_object *)DSP_HGPPNODE)
1907 node_type = NODE_GPP;
1908 else {
1909 if (!hnode)
1910 node_type = -1;
1911 else
1912 node_type = hnode->ntype;
1914 return node_type;
1918 * ======== node_init ========
1919 * Purpose:
1920 * Initialize the NODE module.
1922 bool node_init(void)
1924 DBC_REQUIRE(refs >= 0);
1926 refs++;
1928 return true;
1932 * ======== node_on_exit ========
1933 * Purpose:
1934 * Gets called when RMS_EXIT is received for a node.
1936 void node_on_exit(struct node_object *hnode, s32 node_status)
1938 if (!hnode)
1939 return;
1941 /* Set node state to done */
1942 NODE_SET_STATE(hnode, NODE_DONE);
1943 hnode->exit_status = node_status;
1944 if (hnode->loaded && hnode->phase_split) {
1945 (void)hnode->node_mgr->nldr_fxns.unload(hnode->
1946 nldr_node_obj,
1947 NLDR_EXECUTE);
1948 hnode->loaded = false;
1950 /* Unblock call to node_terminate */
1951 (void)sync_set_event(hnode->sync_done);
1952 /* Notify clients */
1953 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1954 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1958 * ======== node_pause ========
1959 * Purpose:
1960 * Suspend execution of a node currently running on the DSP.
1962 int node_pause(struct node_object *hnode)
1964 struct node_object *pnode = (struct node_object *)hnode;
1965 enum node_type node_type;
1966 enum node_state state;
1967 struct node_mgr *hnode_mgr;
1968 int status = 0;
1969 u32 proc_id;
1970 struct dsp_processorstate proc_state;
1971 struct proc_object *hprocessor;
1973 DBC_REQUIRE(refs > 0);
1975 if (!hnode) {
1976 status = -EFAULT;
1977 } else {
1978 node_type = node_get_type(hnode);
1979 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
1980 status = -EPERM;
1982 if (status)
1983 goto func_end;
1985 status = proc_get_processor_id(pnode->processor, &proc_id);
1987 if (proc_id == IVA_UNIT)
1988 status = -ENOSYS;
1990 if (!status) {
1991 hnode_mgr = hnode->node_mgr;
1993 /* Enter critical section */
1994 mutex_lock(&hnode_mgr->node_mgr_lock);
1995 state = node_get_state(hnode);
1996 /* Check node state */
1997 if (state != NODE_RUNNING)
1998 status = -EBADR;
2000 if (status)
2001 goto func_cont;
2002 hprocessor = hnode->processor;
2003 status = proc_get_state(hprocessor, &proc_state,
2004 sizeof(struct dsp_processorstate));
2005 if (status)
2006 goto func_cont;
2007 /* If processor is in error state then don't attempt
2008 to send the message */
2009 if (proc_state.proc_state == PROC_ERROR) {
2010 status = -EPERM;
2011 goto func_cont;
2014 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2015 hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY],
2016 hnode->node_env, NODE_SUSPENDEDPRI);
2018 /* Update state */
2019 if (status >= 0)
2020 NODE_SET_STATE(hnode, NODE_PAUSED);
2022 func_cont:
2023 /* End of sync_enter_cs */
2024 /* Leave critical section */
2025 mutex_unlock(&hnode_mgr->node_mgr_lock);
2026 if (status >= 0) {
2027 proc_notify_clients(hnode->processor,
2028 DSP_NODESTATECHANGE);
2029 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2032 func_end:
2033 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2034 return status;
2038 * ======== node_put_message ========
2039 * Purpose:
2040 * Send a message to a message node, task node, or XDAIS socket node. This
2041 * function will block until the message stream can accommodate the
2042 * message, or a timeout occurs.
2044 int node_put_message(struct node_object *hnode,
2045 const struct dsp_msg *pmsg, u32 utimeout)
2047 struct node_mgr *hnode_mgr = NULL;
2048 enum node_type node_type;
2049 struct bridge_drv_interface *intf_fxns;
2050 enum node_state state;
2051 int status = 0;
2052 void *tmp_buf;
2053 struct dsp_msg new_msg;
2054 struct dsp_processorstate proc_state;
2055 struct proc_object *hprocessor;
2057 DBC_REQUIRE(refs > 0);
2058 DBC_REQUIRE(pmsg != NULL);
2060 if (!hnode) {
2061 status = -EFAULT;
2062 goto func_end;
2064 hprocessor = hnode->processor;
2065 status = proc_get_state(hprocessor, &proc_state,
2066 sizeof(struct dsp_processorstate));
2067 if (status)
2068 goto func_end;
2069 /* If processor is in bad state then don't attempt sending the
2070 message */
2071 if (proc_state.proc_state == PROC_ERROR) {
2072 status = -EPERM;
2073 goto func_end;
2075 hnode_mgr = hnode->node_mgr;
2076 node_type = node_get_type(hnode);
2077 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
2078 node_type != NODE_DAISSOCKET)
2079 status = -EPERM;
2081 if (!status) {
2082 /* Check node state. Can't send messages to a node after
2083 * we've sent the RMS_EXIT command. There is still the
2084 * possibility that node_terminate can be called after we've
2085 * checked the state. Could add another SYNC object to
2086 * prevent this (can't use node_mgr_lock, since we don't
2087 * want to block other NODE functions). However, the node may
2088 * still exit on its own, before this message is sent. */
2089 mutex_lock(&hnode_mgr->node_mgr_lock);
2090 state = node_get_state(hnode);
2091 if (state == NODE_TERMINATING || state == NODE_DONE)
2092 status = -EBADR;
2094 /* end of sync_enter_cs */
2095 mutex_unlock(&hnode_mgr->node_mgr_lock);
2097 if (status)
2098 goto func_end;
2100 /* assign pmsg values to new msg */
2101 new_msg = *pmsg;
2102 /* Now, check if message contains a SM buffer descriptor */
2103 if (pmsg->cmd & DSP_RMSBUFDESC) {
2104 /* Translate GPP Va to DSP physical buf Ptr. */
2105 tmp_buf = cmm_xlator_translate(hnode->xlator,
2106 (void *)new_msg.arg1,
2107 CMM_VA2DSPPA);
2108 if (tmp_buf != NULL) {
2109 /* got translation, convert to MAUs in msg */
2110 if (hnode->node_mgr->dsp_word_size != 0) {
2111 new_msg.arg1 =
2112 (u32) tmp_buf /
2113 hnode->node_mgr->dsp_word_size;
2114 /* MAUs */
2115 new_msg.arg2 /= hnode->node_mgr->
2116 dsp_word_size;
2117 } else {
2118 pr_err("%s: dsp_word_size is zero!\n",
2119 __func__);
2120 status = -EPERM; /* bad DSPWordSize */
2122 } else { /* failed to translate buffer address */
2123 status = -ESRCH;
2126 if (!status) {
2127 intf_fxns = hnode_mgr->intf_fxns;
2128 status = (*intf_fxns->msg_put) (hnode->msg_queue_obj,
2129 &new_msg, utimeout);
2131 func_end:
2132 dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2133 "status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2134 return status;
2138 * ======== node_register_notify ========
2139 * Purpose:
2140 * Register to be notified on specific events for this node.
2142 int node_register_notify(struct node_object *hnode, u32 event_mask,
2143 u32 notify_type,
2144 struct dsp_notification *hnotification)
2146 struct bridge_drv_interface *intf_fxns;
2147 int status = 0;
2149 DBC_REQUIRE(refs > 0);
2150 DBC_REQUIRE(hnotification != NULL);
2152 if (!hnode) {
2153 status = -EFAULT;
2154 } else {
2155 /* Check if event mask is a valid node related event */
2156 if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2157 status = -EINVAL;
2159 /* Check if notify type is valid */
2160 if (notify_type != DSP_SIGNALEVENT)
2161 status = -EINVAL;
2163 /* Only one Notification can be registered at a
2164 * time - Limitation */
2165 if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2166 status = -EINVAL;
2168 if (!status) {
2169 if (event_mask == DSP_NODESTATECHANGE) {
2170 status = ntfy_register(hnode->ntfy_obj, hnotification,
2171 event_mask & DSP_NODESTATECHANGE,
2172 notify_type);
2173 } else {
2174 /* Send Message part of event mask to msg_ctrl */
2175 intf_fxns = hnode->node_mgr->intf_fxns;
2176 status = (*intf_fxns->msg_register_notify)
2177 (hnode->msg_queue_obj,
2178 event_mask & DSP_NODEMESSAGEREADY, notify_type,
2179 hnotification);
2183 dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2184 "hnotification: %p status 0x%x\n", __func__, hnode,
2185 event_mask, notify_type, hnotification, status);
2186 return status;
2190 * ======== node_run ========
2191 * Purpose:
2192 * Start execution of a node's execute phase, or resume execution of a node
2193 * that has been suspended (via NODE_NodePause()) on the DSP. Load the
2194 * node's execute function if necessary.
2196 int node_run(struct node_object *hnode)
2198 struct node_object *pnode = (struct node_object *)hnode;
2199 struct node_mgr *hnode_mgr;
2200 enum node_type node_type;
2201 enum node_state state;
2202 u32 ul_execute_fxn;
2203 u32 ul_fxn_addr;
2204 int status = 0;
2205 u32 proc_id;
2206 struct bridge_drv_interface *intf_fxns;
2207 struct dsp_processorstate proc_state;
2208 struct proc_object *hprocessor;
2210 DBC_REQUIRE(refs > 0);
2212 if (!hnode) {
2213 status = -EFAULT;
2214 goto func_end;
2216 hprocessor = hnode->processor;
2217 status = proc_get_state(hprocessor, &proc_state,
2218 sizeof(struct dsp_processorstate));
2219 if (status)
2220 goto func_end;
2221 /* If processor is in error state then don't attempt to run the node */
2222 if (proc_state.proc_state == PROC_ERROR) {
2223 status = -EPERM;
2224 goto func_end;
2226 node_type = node_get_type(hnode);
2227 if (node_type == NODE_DEVICE)
2228 status = -EPERM;
2229 if (status)
2230 goto func_end;
2232 hnode_mgr = hnode->node_mgr;
2233 if (!hnode_mgr) {
2234 status = -EFAULT;
2235 goto func_end;
2237 intf_fxns = hnode_mgr->intf_fxns;
2238 /* Enter critical section */
2239 mutex_lock(&hnode_mgr->node_mgr_lock);
2241 state = node_get_state(hnode);
2242 if (state != NODE_CREATED && state != NODE_PAUSED)
2243 status = -EBADR;
2245 if (!status)
2246 status = proc_get_processor_id(pnode->processor, &proc_id);
2248 if (status)
2249 goto func_cont1;
2251 if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2252 goto func_cont1;
2254 if (state == NODE_CREATED) {
2255 /* If node's execute function is not loaded, load it */
2256 if (!(hnode->loaded) && hnode->phase_split) {
2257 status =
2258 hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
2259 NLDR_EXECUTE);
2260 if (!status) {
2261 hnode->loaded = true;
2262 } else {
2263 pr_err("%s: fail - load execute code: 0x%x\n",
2264 __func__, status);
2267 if (!status) {
2268 /* Get address of node's execute function */
2269 if (proc_id == IVA_UNIT)
2270 ul_execute_fxn = (u32) hnode->node_env;
2271 else {
2272 status = get_fxn_address(hnode, &ul_execute_fxn,
2273 EXECUTEPHASE);
2276 if (!status) {
2277 ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE];
2278 status =
2279 disp_node_run(hnode_mgr->disp_obj, hnode,
2280 ul_fxn_addr, ul_execute_fxn,
2281 hnode->node_env);
2283 } else if (state == NODE_PAUSED) {
2284 ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY];
2285 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2286 ul_fxn_addr, hnode->node_env,
2287 NODE_GET_PRIORITY(hnode));
2288 } else {
2289 /* We should never get here */
2290 DBC_ASSERT(false);
2292 func_cont1:
2293 /* Update node state. */
2294 if (status >= 0)
2295 NODE_SET_STATE(hnode, NODE_RUNNING);
2296 else /* Set state back to previous value */
2297 NODE_SET_STATE(hnode, state);
2298 /*End of sync_enter_cs */
2299 /* Exit critical section */
2300 mutex_unlock(&hnode_mgr->node_mgr_lock);
2301 if (status >= 0) {
2302 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
2303 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2305 func_end:
2306 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2307 return status;
2311 * ======== node_terminate ========
2312 * Purpose:
2313 * Signal a node running on the DSP that it should exit its execute phase
2314 * function.
2316 int node_terminate(struct node_object *hnode, int *pstatus)
2318 struct node_object *pnode = (struct node_object *)hnode;
2319 struct node_mgr *hnode_mgr = NULL;
2320 enum node_type node_type;
2321 struct bridge_drv_interface *intf_fxns;
2322 enum node_state state;
2323 struct dsp_msg msg, killmsg;
2324 int status = 0;
2325 u32 proc_id, kill_time_out;
2326 struct deh_mgr *hdeh_mgr;
2327 struct dsp_processorstate proc_state;
2329 DBC_REQUIRE(refs > 0);
2330 DBC_REQUIRE(pstatus != NULL);
2332 if (!hnode || !hnode->node_mgr) {
2333 status = -EFAULT;
2334 goto func_end;
2336 if (pnode->processor == NULL) {
2337 status = -EFAULT;
2338 goto func_end;
2340 status = proc_get_processor_id(pnode->processor, &proc_id);
2342 if (!status) {
2343 hnode_mgr = hnode->node_mgr;
2344 node_type = node_get_type(hnode);
2345 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2346 status = -EPERM;
2348 if (!status) {
2349 /* Check node state */
2350 mutex_lock(&hnode_mgr->node_mgr_lock);
2351 state = node_get_state(hnode);
2352 if (state != NODE_RUNNING) {
2353 status = -EBADR;
2354 /* Set the exit status if node terminated on
2355 * its own. */
2356 if (state == NODE_DONE)
2357 *pstatus = hnode->exit_status;
2359 } else {
2360 NODE_SET_STATE(hnode, NODE_TERMINATING);
2362 /* end of sync_enter_cs */
2363 mutex_unlock(&hnode_mgr->node_mgr_lock);
2365 if (!status) {
2367 * Send exit message. Do not change state to NODE_DONE
2368 * here. That will be done in callback.
2370 status = proc_get_state(pnode->processor, &proc_state,
2371 sizeof(struct dsp_processorstate));
2372 if (status)
2373 goto func_cont;
2374 /* If processor is in error state then don't attempt to send
2375 * A kill task command */
2376 if (proc_state.proc_state == PROC_ERROR) {
2377 status = -EPERM;
2378 goto func_cont;
2381 msg.cmd = RMS_EXIT;
2382 msg.arg1 = hnode->node_env;
2383 killmsg.cmd = RMS_KILLTASK;
2384 killmsg.arg1 = hnode->node_env;
2385 intf_fxns = hnode_mgr->intf_fxns;
2387 if (hnode->timeout > MAXTIMEOUT)
2388 kill_time_out = MAXTIMEOUT;
2389 else
2390 kill_time_out = (hnode->timeout) * 2;
2392 status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg,
2393 hnode->timeout);
2394 if (status)
2395 goto func_cont;
2398 * Wait on synchronization object that will be
2399 * posted in the callback on receiving RMS_EXIT
2400 * message, or by node_delete. Check for valid hnode,
2401 * in case posted by node_delete().
2403 status = sync_wait_on_event(hnode->sync_done,
2404 kill_time_out / 2);
2405 if (status != ETIME)
2406 goto func_cont;
2408 status = (*intf_fxns->msg_put)(hnode->msg_queue_obj,
2409 &killmsg, hnode->timeout);
2410 if (status)
2411 goto func_cont;
2412 status = sync_wait_on_event(hnode->sync_done,
2413 kill_time_out / 2);
2414 if (status) {
2416 * Here it goes the part of the simulation of
2417 * the DSP exception.
2419 dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
2420 if (!hdeh_mgr)
2421 goto func_cont;
2423 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
2426 func_cont:
2427 if (!status) {
2428 /* Enter CS before getting exit status, in case node was
2429 * deleted. */
2430 mutex_lock(&hnode_mgr->node_mgr_lock);
2431 /* Make sure node wasn't deleted while we blocked */
2432 if (!hnode) {
2433 status = -EPERM;
2434 } else {
2435 *pstatus = hnode->exit_status;
2436 dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2437 __func__, hnode, hnode->node_env, status);
2439 mutex_unlock(&hnode_mgr->node_mgr_lock);
2440 } /*End of sync_enter_cs */
2441 func_end:
2442 return status;
2446 * ======== delete_node ========
2447 * Purpose:
2448 * Free GPP resources allocated in node_allocate() or node_connect().
2450 static void delete_node(struct node_object *hnode,
2451 struct process_context *pr_ctxt)
2453 struct node_mgr *hnode_mgr;
2454 struct bridge_drv_interface *intf_fxns;
2455 u32 i;
2456 enum node_type node_type;
2457 struct stream_chnl stream;
2458 struct node_msgargs node_msg_args;
2459 struct node_taskargs task_arg_obj;
2460 #ifdef DSP_DMM_DEBUG
2461 struct dmm_object *dmm_mgr;
2462 struct proc_object *p_proc_object =
2463 (struct proc_object *)hnode->processor;
2464 #endif
2465 int status;
2466 if (!hnode)
2467 goto func_end;
2468 hnode_mgr = hnode->node_mgr;
2469 if (!hnode_mgr)
2470 goto func_end;
2472 node_type = node_get_type(hnode);
2473 if (node_type != NODE_DEVICE) {
2474 node_msg_args = hnode->create_args.asa.node_msg_args;
2475 kfree(node_msg_args.pdata);
2477 /* Free msg_ctrl queue */
2478 if (hnode->msg_queue_obj) {
2479 intf_fxns = hnode_mgr->intf_fxns;
2480 (*intf_fxns->msg_delete_queue) (hnode->
2481 msg_queue_obj);
2482 hnode->msg_queue_obj = NULL;
2485 kfree(hnode->sync_done);
2487 /* Free all stream info */
2488 if (hnode->inputs) {
2489 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2490 stream = hnode->inputs[i];
2491 free_stream(hnode_mgr, stream);
2493 kfree(hnode->inputs);
2494 hnode->inputs = NULL;
2496 if (hnode->outputs) {
2497 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2498 stream = hnode->outputs[i];
2499 free_stream(hnode_mgr, stream);
2501 kfree(hnode->outputs);
2502 hnode->outputs = NULL;
2504 task_arg_obj = hnode->create_args.asa.task_arg_obj;
2505 if (task_arg_obj.strm_in_def) {
2506 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2507 kfree(task_arg_obj.strm_in_def[i].sz_device);
2508 task_arg_obj.strm_in_def[i].sz_device = NULL;
2510 kfree(task_arg_obj.strm_in_def);
2511 task_arg_obj.strm_in_def = NULL;
2513 if (task_arg_obj.strm_out_def) {
2514 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2515 kfree(task_arg_obj.strm_out_def[i].sz_device);
2516 task_arg_obj.strm_out_def[i].sz_device = NULL;
2518 kfree(task_arg_obj.strm_out_def);
2519 task_arg_obj.strm_out_def = NULL;
2521 if (task_arg_obj.dsp_heap_res_addr) {
2522 status = proc_un_map(hnode->processor, (void *)
2523 task_arg_obj.dsp_heap_addr,
2524 pr_ctxt);
2526 status = proc_un_reserve_memory(hnode->processor,
2527 (void *)
2528 task_arg_obj.
2529 dsp_heap_res_addr,
2530 pr_ctxt);
2531 #ifdef DSP_DMM_DEBUG
2532 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2533 if (dmm_mgr)
2534 dmm_mem_map_dump(dmm_mgr);
2535 else
2536 status = DSP_EHANDLE;
2537 #endif
2540 if (node_type != NODE_MESSAGE) {
2541 kfree(hnode->stream_connect);
2542 hnode->stream_connect = NULL;
2544 kfree(hnode->str_dev_name);
2545 hnode->str_dev_name = NULL;
2547 if (hnode->ntfy_obj) {
2548 ntfy_delete(hnode->ntfy_obj);
2549 kfree(hnode->ntfy_obj);
2550 hnode->ntfy_obj = NULL;
2553 /* These were allocated in dcd_get_object_def (via node_allocate) */
2554 kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn);
2555 hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL;
2557 kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn);
2558 hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL;
2560 kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn);
2561 hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL;
2563 kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name);
2564 hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL;
2566 /* Free all SM address translator resources */
2567 kfree(hnode->xlator);
2568 kfree(hnode->nldr_node_obj);
2569 hnode->nldr_node_obj = NULL;
2570 hnode->node_mgr = NULL;
2571 kfree(hnode);
2572 hnode = NULL;
2573 func_end:
2574 return;
2578 * ======== delete_node_mgr ========
2579 * Purpose:
2580 * Frees the node manager.
2582 static void delete_node_mgr(struct node_mgr *hnode_mgr)
2584 struct node_object *hnode, *tmp;
2586 if (hnode_mgr) {
2587 /* Free resources */
2588 if (hnode_mgr->dcd_mgr)
2589 dcd_destroy_manager(hnode_mgr->dcd_mgr);
2591 /* Remove any elements remaining in lists */
2592 list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
2593 list_elem) {
2594 list_del(&hnode->list_elem);
2595 delete_node(hnode, NULL);
2597 mutex_destroy(&hnode_mgr->node_mgr_lock);
2598 if (hnode_mgr->ntfy_obj) {
2599 ntfy_delete(hnode_mgr->ntfy_obj);
2600 kfree(hnode_mgr->ntfy_obj);
2603 if (hnode_mgr->disp_obj)
2604 disp_delete(hnode_mgr->disp_obj);
2606 if (hnode_mgr->strm_mgr_obj)
2607 strm_delete(hnode_mgr->strm_mgr_obj);
2609 /* Delete the loader */
2610 if (hnode_mgr->nldr_obj)
2611 hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
2613 if (hnode_mgr->loader_init)
2614 hnode_mgr->nldr_fxns.exit();
2616 kfree(hnode_mgr);
2621 * ======== fill_stream_connect ========
2622 * Purpose:
2623 * Fills stream information.
2625 static void fill_stream_connect(struct node_object *node1,
2626 struct node_object *node2,
2627 u32 stream1, u32 stream2)
2629 u32 strm_index;
2630 struct dsp_streamconnect *strm1 = NULL;
2631 struct dsp_streamconnect *strm2 = NULL;
2632 enum node_type node1_type = NODE_TASK;
2633 enum node_type node2_type = NODE_TASK;
2635 node1_type = node_get_type(node1);
2636 node2_type = node_get_type(node2);
2637 if (node1 != (struct node_object *)DSP_HGPPNODE) {
2639 if (node1_type != NODE_DEVICE) {
2640 strm_index = node1->num_inputs +
2641 node1->num_outputs - 1;
2642 strm1 = &(node1->stream_connect[strm_index]);
2643 strm1->cb_struct = sizeof(struct dsp_streamconnect);
2644 strm1->this_node_stream_index = stream1;
2647 if (node2 != (struct node_object *)DSP_HGPPNODE) {
2648 /* NODE == > NODE */
2649 if (node1_type != NODE_DEVICE) {
2650 strm1->connected_node = node2;
2651 strm1->ui_connected_node_id = node2->node_uuid;
2652 strm1->connected_node_stream_index = stream2;
2653 strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2655 if (node2_type != NODE_DEVICE) {
2656 strm_index = node2->num_inputs +
2657 node2->num_outputs - 1;
2658 strm2 = &(node2->stream_connect[strm_index]);
2659 strm2->cb_struct =
2660 sizeof(struct dsp_streamconnect);
2661 strm2->this_node_stream_index = stream2;
2662 strm2->connected_node = node1;
2663 strm2->ui_connected_node_id = node1->node_uuid;
2664 strm2->connected_node_stream_index = stream1;
2665 strm2->connect_type = CONNECTTYPE_NODEINPUT;
2667 } else if (node1_type != NODE_DEVICE)
2668 strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2669 } else {
2670 /* GPP == > NODE */
2671 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
2672 strm_index = node2->num_inputs + node2->num_outputs - 1;
2673 strm2 = &(node2->stream_connect[strm_index]);
2674 strm2->cb_struct = sizeof(struct dsp_streamconnect);
2675 strm2->this_node_stream_index = stream2;
2676 strm2->connect_type = CONNECTTYPE_GPPINPUT;
2681 * ======== fill_stream_def ========
2682 * Purpose:
2683 * Fills Stream attributes.
2685 static void fill_stream_def(struct node_object *hnode,
2686 struct node_strmdef *pstrm_def,
2687 struct dsp_strmattr *pattrs)
2689 struct node_mgr *hnode_mgr = hnode->node_mgr;
2691 if (pattrs != NULL) {
2692 pstrm_def->num_bufs = pattrs->num_bufs;
2693 pstrm_def->buf_size =
2694 pattrs->buf_size / hnode_mgr->dsp_data_mau_size;
2695 pstrm_def->seg_id = pattrs->seg_id;
2696 pstrm_def->buf_alignment = pattrs->buf_alignment;
2697 pstrm_def->timeout = pattrs->timeout;
2698 } else {
2699 pstrm_def->num_bufs = DEFAULTNBUFS;
2700 pstrm_def->buf_size =
2701 DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size;
2702 pstrm_def->seg_id = DEFAULTSEGID;
2703 pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2704 pstrm_def->timeout = DEFAULTTIMEOUT;
2709 * ======== free_stream ========
2710 * Purpose:
2711 * Updates the channel mask and frees the pipe id.
2713 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2715 /* Free up the pipe id unless other node has not yet been deleted. */
2716 if (stream.type == NODECONNECT) {
2717 if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) {
2718 /* The other node has already been deleted */
2719 clear_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2720 clear_bit(stream.dev_id, hnode_mgr->pipe_map);
2721 } else {
2722 /* The other node has not been deleted yet */
2723 set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2725 } else if (stream.type == HOSTCONNECT) {
2726 if (stream.dev_id < hnode_mgr->num_chnls) {
2727 clear_bit(stream.dev_id, hnode_mgr->chnl_map);
2728 } else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
2729 /* dsp-dma */
2730 clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
2731 hnode_mgr->dma_chnl_map);
2732 } else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
2733 /* zero-copy */
2734 clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
2735 hnode_mgr->zc_chnl_map);
2741 * ======== get_fxn_address ========
2742 * Purpose:
2743 * Retrieves the address for create, execute or delete phase for a node.
2745 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2746 u32 phase)
2748 char *pstr_fxn_name = NULL;
2749 struct node_mgr *hnode_mgr = hnode->node_mgr;
2750 int status = 0;
2751 DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
2752 node_get_type(hnode) == NODE_DAISSOCKET ||
2753 node_get_type(hnode) == NODE_MESSAGE);
2755 switch (phase) {
2756 case CREATEPHASE:
2757 pstr_fxn_name =
2758 hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn;
2759 break;
2760 case EXECUTEPHASE:
2761 pstr_fxn_name =
2762 hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn;
2763 break;
2764 case DELETEPHASE:
2765 pstr_fxn_name =
2766 hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn;
2767 break;
2768 default:
2769 /* Should never get here */
2770 DBC_ASSERT(false);
2771 break;
2774 status =
2775 hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj,
2776 pstr_fxn_name, fxn_addr);
2778 return status;
2782 * ======== get_node_info ========
2783 * Purpose:
2784 * Retrieves the node information.
2786 void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2788 u32 i;
2790 DBC_REQUIRE(hnode);
2791 DBC_REQUIRE(node_info != NULL);
2793 node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2794 node_info->nb_node_database_props =
2795 hnode->dcd_props.obj_data.node_obj.ndb_props;
2796 node_info->execution_priority = hnode->prio;
2797 node_info->device_owner = hnode->device_owner;
2798 node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
2799 node_info->node_env = hnode->node_env;
2801 node_info->ns_execution_state = node_get_state(hnode);
2803 /* Copy stream connect data */
2804 for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2805 node_info->sc_stream_connection[i] = hnode->stream_connect[i];
2810 * ======== get_node_props ========
2811 * Purpose:
2812 * Retrieve node properties.
2814 static int get_node_props(struct dcd_manager *hdcd_mgr,
2815 struct node_object *hnode,
2816 const struct dsp_uuid *node_uuid,
2817 struct dcd_genericobj *dcd_prop)
2819 u32 len;
2820 struct node_msgargs *pmsg_args;
2821 struct node_taskargs *task_arg_obj;
2822 enum node_type node_type = NODE_TASK;
2823 struct dsp_ndbprops *pndb_props =
2824 &(dcd_prop->obj_data.node_obj.ndb_props);
2825 int status = 0;
2826 char sz_uuid[MAXUUIDLEN];
2828 status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
2829 DSP_DCDNODETYPE, dcd_prop);
2831 if (!status) {
2832 hnode->ntype = node_type = pndb_props->ntype;
2834 /* Create UUID value to set in registry. */
2835 uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
2836 MAXUUIDLEN);
2837 dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2839 /* Fill in message args that come from NDB */
2840 if (node_type != NODE_DEVICE) {
2841 pmsg_args = &(hnode->create_args.asa.node_msg_args);
2842 pmsg_args->seg_id =
2843 dcd_prop->obj_data.node_obj.msg_segid;
2844 pmsg_args->notify_type =
2845 dcd_prop->obj_data.node_obj.msg_notify_type;
2846 pmsg_args->max_msgs = pndb_props->message_depth;
2847 dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2848 pmsg_args->max_msgs);
2849 } else {
2850 /* Copy device name */
2851 DBC_REQUIRE(pndb_props->ac_name);
2852 len = strlen(pndb_props->ac_name);
2853 DBC_ASSERT(len < MAXDEVNAMELEN);
2854 hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
2855 if (hnode->str_dev_name == NULL) {
2856 status = -ENOMEM;
2857 } else {
2858 strncpy(hnode->str_dev_name,
2859 pndb_props->ac_name, len);
2863 if (!status) {
2864 /* Fill in create args that come from NDB */
2865 if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2866 task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2867 task_arg_obj->prio = pndb_props->prio;
2868 task_arg_obj->stack_size = pndb_props->stack_size;
2869 task_arg_obj->sys_stack_size =
2870 pndb_props->sys_stack_size;
2871 task_arg_obj->stack_seg = pndb_props->stack_seg;
2872 dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2873 "0x%x words System Stack Size: 0x%x words "
2874 "Stack Segment: 0x%x profile count : 0x%x\n",
2875 task_arg_obj->prio, task_arg_obj->stack_size,
2876 task_arg_obj->sys_stack_size,
2877 task_arg_obj->stack_seg,
2878 pndb_props->count_profiles);
2882 return status;
2886 * ======== get_proc_props ========
2887 * Purpose:
2888 * Retrieve the processor properties.
2890 static int get_proc_props(struct node_mgr *hnode_mgr,
2891 struct dev_object *hdev_obj)
2893 struct cfg_hostres *host_res;
2894 struct bridge_dev_context *pbridge_context;
2895 int status = 0;
2897 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2898 if (!pbridge_context)
2899 status = -EFAULT;
2901 if (!status) {
2902 host_res = pbridge_context->resources;
2903 if (!host_res)
2904 return -EPERM;
2905 hnode_mgr->chnl_offset = host_res->chnl_offset;
2906 hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
2907 hnode_mgr->num_chnls = host_res->num_chnls;
2910 * PROC will add an API to get dsp_processorinfo.
2911 * Fill in default values for now.
2913 /* TODO -- Instead of hard coding, take from registry */
2914 hnode_mgr->proc_family = 6000;
2915 hnode_mgr->proc_type = 6410;
2916 hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2917 hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2918 hnode_mgr->dsp_word_size = DSPWORDSIZE;
2919 hnode_mgr->dsp_data_mau_size = DSPWORDSIZE;
2920 hnode_mgr->dsp_mau_size = 1;
2923 return status;
2927 * ======== node_get_uuid_props ========
2928 * Purpose:
2929 * Fetch Node UUID properties from DCD/DOF file.
2931 int node_get_uuid_props(void *hprocessor,
2932 const struct dsp_uuid *node_uuid,
2933 struct dsp_ndbprops *node_props)
2935 struct node_mgr *hnode_mgr = NULL;
2936 struct dev_object *hdev_obj;
2937 int status = 0;
2938 struct dcd_nodeprops dcd_node_props;
2939 struct dsp_processorstate proc_state;
2941 DBC_REQUIRE(refs > 0);
2942 DBC_REQUIRE(hprocessor != NULL);
2943 DBC_REQUIRE(node_uuid != NULL);
2945 if (hprocessor == NULL || node_uuid == NULL) {
2946 status = -EFAULT;
2947 goto func_end;
2949 status = proc_get_state(hprocessor, &proc_state,
2950 sizeof(struct dsp_processorstate));
2951 if (status)
2952 goto func_end;
2953 /* If processor is in error state then don't attempt
2954 to send the message */
2955 if (proc_state.proc_state == PROC_ERROR) {
2956 status = -EPERM;
2957 goto func_end;
2960 status = proc_get_dev_object(hprocessor, &hdev_obj);
2961 if (hdev_obj) {
2962 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
2963 if (hnode_mgr == NULL) {
2964 status = -EFAULT;
2965 goto func_end;
2970 * Enter the critical section. This is needed because
2971 * dcd_get_object_def will ultimately end up calling dbll_open/close,
2972 * which needs to be protected in order to not corrupt the zlib manager
2973 * (COD).
2975 mutex_lock(&hnode_mgr->node_mgr_lock);
2977 dcd_node_props.str_create_phase_fxn = NULL;
2978 dcd_node_props.str_execute_phase_fxn = NULL;
2979 dcd_node_props.str_delete_phase_fxn = NULL;
2980 dcd_node_props.str_i_alg_name = NULL;
2982 status = dcd_get_object_def(hnode_mgr->dcd_mgr,
2983 (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
2984 (struct dcd_genericobj *)&dcd_node_props);
2986 if (!status) {
2987 *node_props = dcd_node_props.ndb_props;
2988 kfree(dcd_node_props.str_create_phase_fxn);
2990 kfree(dcd_node_props.str_execute_phase_fxn);
2992 kfree(dcd_node_props.str_delete_phase_fxn);
2994 kfree(dcd_node_props.str_i_alg_name);
2996 /* Leave the critical section, we're done. */
2997 mutex_unlock(&hnode_mgr->node_mgr_lock);
2998 func_end:
2999 return status;
3003 * ======== get_rms_fxns ========
3004 * Purpose:
3005 * Retrieve the RMS functions.
3007 static int get_rms_fxns(struct node_mgr *hnode_mgr)
3009 s32 i;
3010 struct dev_object *dev_obj = hnode_mgr->dev_obj;
3011 int status = 0;
3013 static char *psz_fxns[NUMRMSFXNS] = {
3014 "RMS_queryServer", /* RMSQUERYSERVER */
3015 "RMS_configureServer", /* RMSCONFIGURESERVER */
3016 "RMS_createNode", /* RMSCREATENODE */
3017 "RMS_executeNode", /* RMSEXECUTENODE */
3018 "RMS_deleteNode", /* RMSDELETENODE */
3019 "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
3020 "RMS_readMemory", /* RMSREADMEMORY */
3021 "RMS_writeMemory", /* RMSWRITEMEMORY */
3022 "RMS_copy", /* RMSCOPY */
3025 for (i = 0; i < NUMRMSFXNS; i++) {
3026 status = dev_get_symbol(dev_obj, psz_fxns[i],
3027 &(hnode_mgr->fxn_addrs[i]));
3028 if (status) {
3029 if (status == -ESPIPE) {
3031 * May be loaded dynamically (in the future),
3032 * but return an error for now.
3034 dev_dbg(bridge, "%s: RMS function: %s currently"
3035 " not loaded\n", __func__, psz_fxns[i]);
3036 } else {
3037 dev_dbg(bridge, "%s: Symbol not found: %s "
3038 "status = 0x%x\n", __func__,
3039 psz_fxns[i], status);
3040 break;
3045 return status;
3049 * ======== ovly ========
3050 * Purpose:
3051 * Called during overlay.Sends command to RMS to copy a block of data.
3053 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
3054 u32 ul_num_bytes, u32 mem_space)
3056 struct node_object *hnode = (struct node_object *)priv_ref;
3057 struct node_mgr *hnode_mgr;
3058 u32 ul_bytes = 0;
3059 u32 ul_size;
3060 u32 ul_timeout;
3061 int status = 0;
3062 struct bridge_dev_context *hbridge_context;
3063 /* Function interface to Bridge driver*/
3064 struct bridge_drv_interface *intf_fxns;
3066 DBC_REQUIRE(hnode);
3068 hnode_mgr = hnode->node_mgr;
3070 ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
3071 ul_timeout = hnode->timeout;
3073 /* Call new MemCopy function */
3074 intf_fxns = hnode_mgr->intf_fxns;
3075 status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
3076 if (!status) {
3077 status =
3078 (*intf_fxns->brd_mem_copy) (hbridge_context,
3079 dsp_run_addr, dsp_load_addr,
3080 ul_num_bytes, (u32) mem_space);
3081 if (!status)
3082 ul_bytes = ul_num_bytes;
3083 else
3084 pr_debug("%s: failed to copy brd memory, status 0x%x\n",
3085 __func__, status);
3086 } else {
3087 pr_debug("%s: failed to get Bridge context, status 0x%x\n",
3088 __func__, status);
3091 return ul_bytes;
3095 * ======== mem_write ========
3097 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
3098 u32 ul_num_bytes, u32 mem_space)
3100 struct node_object *hnode = (struct node_object *)priv_ref;
3101 struct node_mgr *hnode_mgr;
3102 u16 mem_sect_type;
3103 u32 ul_timeout;
3104 int status = 0;
3105 struct bridge_dev_context *hbridge_context;
3106 /* Function interface to Bridge driver */
3107 struct bridge_drv_interface *intf_fxns;
3109 DBC_REQUIRE(hnode);
3110 DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
3112 hnode_mgr = hnode->node_mgr;
3114 ul_timeout = hnode->timeout;
3115 mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
3117 /* Call new MemWrite function */
3118 intf_fxns = hnode_mgr->intf_fxns;
3119 status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
3120 status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
3121 dsp_add, ul_num_bytes, mem_sect_type);
3123 return ul_num_bytes;
3126 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
3128 * ======== node_find_addr ========
3130 int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3131 u32 offset_range, void *sym_addr_output, char *sym_name)
3133 struct node_object *node_obj;
3134 int status = -ENOENT;
3136 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
3137 (unsigned int) node_mgr,
3138 sym_addr, offset_range,
3139 (unsigned int) sym_addr_output, sym_name);
3141 list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
3142 status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3143 offset_range, sym_addr_output, sym_name);
3144 if (!status)
3145 break;
3148 return status;
3150 #endif