Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[zen-stable.git] / sound / pci / asihpi / hpimsgx.c
blob7352a5f7b4f7ca50fa048d63cf730e1a804a8206
1 /******************************************************************************
3 AudioScience HPI driver
4 Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as
8 published by the Free Software Foundation;
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 Extended Message Function With Response Cacheing
21 (C) Copyright AudioScience Inc. 2002
22 *****************************************************************************/
23 #define SOURCEFILE_NAME "hpimsgx.c"
24 #include "hpi_internal.h"
25 #include "hpimsginit.h"
26 #include "hpicmn.h"
27 #include "hpimsgx.h"
28 #include "hpidebug.h"
30 static struct pci_device_id asihpi_pci_tbl[] = {
31 #include "hpipcida.h"
34 static struct hpios_spinlock msgx_lock;
36 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
38 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
39 *pci_info)
42 int i;
44 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
45 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
46 && asihpi_pci_tbl[i].vendor !=
47 pci_info->pci_dev->vendor)
48 continue;
49 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
50 && asihpi_pci_tbl[i].device !=
51 pci_info->pci_dev->device)
52 continue;
53 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
54 && asihpi_pci_tbl[i].subvendor !=
55 pci_info->pci_dev->subsystem_vendor)
56 continue;
57 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
58 && asihpi_pci_tbl[i].subdevice !=
59 pci_info->pci_dev->subsystem_device)
60 continue;
62 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
63 asihpi_pci_tbl[i].driver_data); */
64 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
67 return NULL;
70 static inline void hw_entry_point(struct hpi_message *phm,
71 struct hpi_response *phr)
73 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
74 && hpi_entry_points[phm->adapter_index])
75 hpi_entry_points[phm->adapter_index] (phm, phr);
76 else
77 hpi_init_response(phr, phm->object, phm->function,
78 HPI_ERROR_PROCESSING_MESSAGE);
81 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
82 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
84 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
85 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
87 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
88 void *h_owner);
89 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
90 void *h_owner);
91 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
92 void *h_owner);
93 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
94 void *h_owner);
96 static void HPIMSGX__reset(u16 adapter_index);
98 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
99 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
101 #ifndef DISABLE_PRAGMA_PACK1
102 #pragma pack(push, 1)
103 #endif
105 struct hpi_subsys_response {
106 struct hpi_response_header h;
107 struct hpi_subsys_res s;
110 struct hpi_adapter_response {
111 struct hpi_response_header h;
112 struct hpi_adapter_res a;
115 struct hpi_mixer_response {
116 struct hpi_response_header h;
117 struct hpi_mixer_res m;
120 struct hpi_stream_response {
121 struct hpi_response_header h;
122 struct hpi_stream_res d;
125 struct adapter_info {
126 u16 type;
127 u16 num_instreams;
128 u16 num_outstreams;
131 struct asi_open_state {
132 int open_flag;
133 void *h_owner;
136 #ifndef DISABLE_PRAGMA_PACK1
137 #pragma pack(pop)
138 #endif
140 /* Globals */
141 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
143 static struct hpi_stream_response
144 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
146 static struct hpi_stream_response
147 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
149 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
151 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
153 /* use these to keep track of opens from user mode apps/DLLs */
154 static struct asi_open_state
155 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
157 static struct asi_open_state
158 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
160 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
161 void *h_owner)
163 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
164 HPI_DEBUG_LOG(WARNING,
165 "suspicious adapter index %d in subsys message 0x%x.\n",
166 phm->adapter_index, phm->function);
168 switch (phm->function) {
169 case HPI_SUBSYS_GET_VERSION:
170 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
171 HPI_SUBSYS_GET_VERSION, 0);
172 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
173 phr->u.s.data = HPI_VER; /* return major.minor.release */
174 break;
175 case HPI_SUBSYS_OPEN:
176 /*do not propagate the message down the chain */
177 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
178 break;
179 case HPI_SUBSYS_CLOSE:
180 /*do not propagate the message down the chain */
181 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
183 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
184 break;
185 case HPI_SUBSYS_DRIVER_LOAD:
186 /* Initialize this module's internal state */
187 hpios_msgxlock_init(&msgx_lock);
188 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
189 hpios_locked_mem_init();
190 /* Init subsys_findadapters response to no-adapters */
191 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
192 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
193 HPI_SUBSYS_DRIVER_LOAD, 0);
194 /* individual HPIs dont implement driver load */
195 HPI_COMMON(phm, phr);
196 break;
197 case HPI_SUBSYS_DRIVER_UNLOAD:
198 HPI_COMMON(phm, phr);
199 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
200 hpios_locked_mem_free_all();
201 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
202 HPI_SUBSYS_DRIVER_UNLOAD, 0);
203 return;
205 case HPI_SUBSYS_GET_NUM_ADAPTERS:
206 case HPI_SUBSYS_GET_ADAPTER:
207 HPI_COMMON(phm, phr);
208 break;
210 case HPI_SUBSYS_CREATE_ADAPTER:
211 HPIMSGX__init(phm, phr);
212 break;
214 default:
215 /* Must explicitly handle every subsys message in this switch */
216 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
217 HPI_ERROR_INVALID_FUNC);
218 break;
222 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
223 void *h_owner)
225 switch (phm->function) {
226 case HPI_ADAPTER_OPEN:
227 adapter_open(phm, phr);
228 break;
229 case HPI_ADAPTER_CLOSE:
230 adapter_close(phm, phr);
231 break;
232 case HPI_ADAPTER_DELETE:
233 HPIMSGX__cleanup(phm->adapter_index, h_owner);
235 struct hpi_message hm;
236 struct hpi_response hr;
237 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
238 HPI_ADAPTER_CLOSE);
239 hm.adapter_index = phm->adapter_index;
240 hw_entry_point(&hm, &hr);
242 hw_entry_point(phm, phr);
243 break;
245 default:
246 hw_entry_point(phm, phr);
247 break;
251 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
253 switch (phm->function) {
254 case HPI_MIXER_OPEN:
255 mixer_open(phm, phr);
256 break;
257 case HPI_MIXER_CLOSE:
258 mixer_close(phm, phr);
259 break;
260 default:
261 hw_entry_point(phm, phr);
262 break;
266 static void outstream_message(struct hpi_message *phm,
267 struct hpi_response *phr, void *h_owner)
269 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
270 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
271 HPI_ERROR_INVALID_OBJ_INDEX);
272 return;
275 switch (phm->function) {
276 case HPI_OSTREAM_OPEN:
277 outstream_open(phm, phr, h_owner);
278 break;
279 case HPI_OSTREAM_CLOSE:
280 outstream_close(phm, phr, h_owner);
281 break;
282 default:
283 hw_entry_point(phm, phr);
284 break;
288 static void instream_message(struct hpi_message *phm,
289 struct hpi_response *phr, void *h_owner)
291 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
292 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
293 HPI_ERROR_INVALID_OBJ_INDEX);
294 return;
297 switch (phm->function) {
298 case HPI_ISTREAM_OPEN:
299 instream_open(phm, phr, h_owner);
300 break;
301 case HPI_ISTREAM_CLOSE:
302 instream_close(phm, phr, h_owner);
303 break;
304 default:
305 hw_entry_point(phm, phr);
306 break;
310 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
311 * HPI_MessageEx so that functions in hpifunc.c compile.
313 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
314 void *h_owner)
316 HPI_DEBUG_MESSAGE(DEBUG, phm);
318 if (phm->type != HPI_TYPE_MESSAGE) {
319 hpi_init_response(phr, phm->object, phm->function,
320 HPI_ERROR_INVALID_TYPE);
321 return;
324 if (phm->adapter_index >= HPI_MAX_ADAPTERS
325 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
326 hpi_init_response(phr, phm->object, phm->function,
327 HPI_ERROR_BAD_ADAPTER_NUMBER);
328 return;
331 switch (phm->object) {
332 case HPI_OBJ_SUBSYSTEM:
333 subsys_message(phm, phr, h_owner);
334 break;
336 case HPI_OBJ_ADAPTER:
337 adapter_message(phm, phr, h_owner);
338 break;
340 case HPI_OBJ_MIXER:
341 mixer_message(phm, phr);
342 break;
344 case HPI_OBJ_OSTREAM:
345 outstream_message(phm, phr, h_owner);
346 break;
348 case HPI_OBJ_ISTREAM:
349 instream_message(phm, phr, h_owner);
350 break;
352 default:
353 hw_entry_point(phm, phr);
354 break;
356 HPI_DEBUG_RESPONSE(phr);
360 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
362 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
363 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
364 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
367 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
369 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
370 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
373 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
375 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
376 sizeof(rESP_HPI_MIXER_OPEN[0]));
379 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
381 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
384 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
385 void *h_owner)
388 struct hpi_message hm;
389 struct hpi_response hr;
391 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
393 hpios_msgxlock_lock(&msgx_lock);
395 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
396 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
397 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
398 [phm->obj_index].h.error)
399 memcpy(phr,
400 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
401 obj_index],
402 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
403 else {
404 instream_user_open[phm->adapter_index][phm->
405 obj_index].open_flag = 1;
406 hpios_msgxlock_unlock(&msgx_lock);
408 /* issue a reset */
409 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
410 HPI_ISTREAM_RESET);
411 hm.adapter_index = phm->adapter_index;
412 hm.obj_index = phm->obj_index;
413 hw_entry_point(&hm, &hr);
415 hpios_msgxlock_lock(&msgx_lock);
416 if (hr.error) {
417 instream_user_open[phm->adapter_index][phm->
418 obj_index].open_flag = 0;
419 phr->error = hr.error;
420 } else {
421 instream_user_open[phm->adapter_index][phm->
422 obj_index].open_flag = 1;
423 instream_user_open[phm->adapter_index][phm->
424 obj_index].h_owner = h_owner;
425 memcpy(phr,
426 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
427 [phm->obj_index],
428 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
431 hpios_msgxlock_unlock(&msgx_lock);
434 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
435 void *h_owner)
438 struct hpi_message hm;
439 struct hpi_response hr;
441 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
443 hpios_msgxlock_lock(&msgx_lock);
444 if (h_owner ==
445 instream_user_open[phm->adapter_index][phm->
446 obj_index].h_owner) {
447 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
448 "instream %d owned by %p\n",
449 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
450 instream_user_open[phm->adapter_index][phm->
451 obj_index].h_owner = NULL;
452 hpios_msgxlock_unlock(&msgx_lock);
453 /* issue a reset */
454 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
455 HPI_ISTREAM_RESET);
456 hm.adapter_index = phm->adapter_index;
457 hm.obj_index = phm->obj_index;
458 hw_entry_point(&hm, &hr);
459 hpios_msgxlock_lock(&msgx_lock);
460 if (hr.error) {
461 instream_user_open[phm->adapter_index][phm->
462 obj_index].h_owner = h_owner;
463 phr->error = hr.error;
464 } else {
465 instream_user_open[phm->adapter_index][phm->
466 obj_index].open_flag = 0;
467 instream_user_open[phm->adapter_index][phm->
468 obj_index].h_owner = NULL;
470 } else {
471 HPI_DEBUG_LOG(WARNING,
472 "%p trying to close %d instream %d owned by %p\n",
473 h_owner, phm->adapter_index, phm->obj_index,
474 instream_user_open[phm->adapter_index][phm->
475 obj_index].h_owner);
476 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
478 hpios_msgxlock_unlock(&msgx_lock);
481 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
482 void *h_owner)
485 struct hpi_message hm;
486 struct hpi_response hr;
488 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
490 hpios_msgxlock_lock(&msgx_lock);
492 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
493 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
494 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
495 [phm->obj_index].h.error)
496 memcpy(phr,
497 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
498 obj_index],
499 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
500 else {
501 outstream_user_open[phm->adapter_index][phm->
502 obj_index].open_flag = 1;
503 hpios_msgxlock_unlock(&msgx_lock);
505 /* issue a reset */
506 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
507 HPI_OSTREAM_RESET);
508 hm.adapter_index = phm->adapter_index;
509 hm.obj_index = phm->obj_index;
510 hw_entry_point(&hm, &hr);
512 hpios_msgxlock_lock(&msgx_lock);
513 if (hr.error) {
514 outstream_user_open[phm->adapter_index][phm->
515 obj_index].open_flag = 0;
516 phr->error = hr.error;
517 } else {
518 outstream_user_open[phm->adapter_index][phm->
519 obj_index].open_flag = 1;
520 outstream_user_open[phm->adapter_index][phm->
521 obj_index].h_owner = h_owner;
522 memcpy(phr,
523 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
524 [phm->obj_index],
525 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
528 hpios_msgxlock_unlock(&msgx_lock);
531 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
532 void *h_owner)
535 struct hpi_message hm;
536 struct hpi_response hr;
538 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
540 hpios_msgxlock_lock(&msgx_lock);
542 if (h_owner ==
543 outstream_user_open[phm->adapter_index][phm->
544 obj_index].h_owner) {
545 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
546 "outstream %d owned by %p\n",
547 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
548 outstream_user_open[phm->adapter_index][phm->
549 obj_index].h_owner = NULL;
550 hpios_msgxlock_unlock(&msgx_lock);
551 /* issue a reset */
552 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
553 HPI_OSTREAM_RESET);
554 hm.adapter_index = phm->adapter_index;
555 hm.obj_index = phm->obj_index;
556 hw_entry_point(&hm, &hr);
557 hpios_msgxlock_lock(&msgx_lock);
558 if (hr.error) {
559 outstream_user_open[phm->adapter_index][phm->
560 obj_index].h_owner = h_owner;
561 phr->error = hr.error;
562 } else {
563 outstream_user_open[phm->adapter_index][phm->
564 obj_index].open_flag = 0;
565 outstream_user_open[phm->adapter_index][phm->
566 obj_index].h_owner = NULL;
568 } else {
569 HPI_DEBUG_LOG(WARNING,
570 "%p trying to close %d outstream %d owned by %p\n",
571 h_owner, phm->adapter_index, phm->obj_index,
572 outstream_user_open[phm->adapter_index][phm->
573 obj_index].h_owner);
574 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
576 hpios_msgxlock_unlock(&msgx_lock);
579 static u16 adapter_prepare(u16 adapter)
581 struct hpi_message hm;
582 struct hpi_response hr;
584 /* Open the adapter and streams */
585 u16 i;
587 /* call to HPI_ADAPTER_OPEN */
588 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
589 HPI_ADAPTER_OPEN);
590 hm.adapter_index = adapter;
591 hw_entry_point(&hm, &hr);
592 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
593 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
594 if (hr.error)
595 return hr.error;
597 /* call to HPI_ADAPTER_GET_INFO */
598 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
599 HPI_ADAPTER_GET_INFO);
600 hm.adapter_index = adapter;
601 hw_entry_point(&hm, &hr);
602 if (hr.error)
603 return hr.error;
605 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
606 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
607 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
609 /* call to HPI_OSTREAM_OPEN */
610 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
611 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
612 HPI_OSTREAM_OPEN);
613 hm.adapter_index = adapter;
614 hm.obj_index = i;
615 hw_entry_point(&hm, &hr);
616 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
617 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
618 outstream_user_open[adapter][i].open_flag = 0;
619 outstream_user_open[adapter][i].h_owner = NULL;
622 /* call to HPI_ISTREAM_OPEN */
623 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
624 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
625 HPI_ISTREAM_OPEN);
626 hm.adapter_index = adapter;
627 hm.obj_index = i;
628 hw_entry_point(&hm, &hr);
629 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
630 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
631 instream_user_open[adapter][i].open_flag = 0;
632 instream_user_open[adapter][i].h_owner = NULL;
635 /* call to HPI_MIXER_OPEN */
636 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
637 hm.adapter_index = adapter;
638 hw_entry_point(&hm, &hr);
639 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
640 sizeof(rESP_HPI_MIXER_OPEN[0]));
642 return 0;
645 static void HPIMSGX__reset(u16 adapter_index)
647 int i;
648 u16 adapter;
649 struct hpi_response hr;
651 if (adapter_index == HPIMSGX_ALLADAPTERS) {
652 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
654 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
655 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
656 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
657 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
659 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
660 HPI_ERROR_INVALID_OBJ);
661 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
662 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
664 for (i = 0; i < HPI_MAX_STREAMS; i++) {
665 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
666 HPI_OSTREAM_OPEN,
667 HPI_ERROR_INVALID_OBJ);
668 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
669 &hr,
670 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
671 [i]));
672 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
673 HPI_ISTREAM_OPEN,
674 HPI_ERROR_INVALID_OBJ);
675 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
676 &hr,
677 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
678 [i]));
681 } else if (adapter_index < HPI_MAX_ADAPTERS) {
682 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
683 HPI_ERROR_BAD_ADAPTER;
684 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
685 HPI_ERROR_INVALID_OBJ;
686 for (i = 0; i < HPI_MAX_STREAMS; i++) {
687 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
688 HPI_ERROR_INVALID_OBJ;
689 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
690 HPI_ERROR_INVALID_OBJ;
695 static u16 HPIMSGX__init(struct hpi_message *phm,
696 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
697 /* resource list or NULL=find all */
698 struct hpi_response *phr
699 /* response from HPI_ADAPTER_GET_INFO */
702 hpi_handler_func *entry_point_func;
703 struct hpi_response hr;
705 /* Init response here so we can pass in previous adapter list */
706 hpi_init_response(&hr, phm->object, phm->function,
707 HPI_ERROR_INVALID_OBJ);
709 entry_point_func =
710 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
712 if (entry_point_func) {
713 HPI_DEBUG_MESSAGE(DEBUG, phm);
714 entry_point_func(phm, &hr);
715 } else {
716 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
717 return phr->error;
719 if (hr.error == 0) {
720 /* the adapter was created successfully
721 save the mapping for future use */
722 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
723 /* prepare adapter (pre-open streams etc.) */
724 HPI_DEBUG_LOG(DEBUG,
725 "HPI_SUBSYS_CREATE_ADAPTER successful,"
726 " preparing adapter\n");
727 adapter_prepare(hr.u.s.adapter_index);
729 memcpy(phr, &hr, hr.size);
730 return phr->error;
733 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
735 int i, adapter, adapter_limit;
737 if (!h_owner)
738 return;
740 if (adapter_index == HPIMSGX_ALLADAPTERS) {
741 adapter = 0;
742 adapter_limit = HPI_MAX_ADAPTERS;
743 } else {
744 adapter = adapter_index;
745 adapter_limit = adapter + 1;
748 for (; adapter < adapter_limit; adapter++) {
749 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
750 for (i = 0; i < HPI_MAX_STREAMS; i++) {
751 if (h_owner ==
752 outstream_user_open[adapter][i].h_owner) {
753 struct hpi_message hm;
754 struct hpi_response hr;
756 HPI_DEBUG_LOG(DEBUG,
757 "Close adapter %d ostream %d\n",
758 adapter, i);
760 hpi_init_message_response(&hm, &hr,
761 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
762 hm.adapter_index = (u16)adapter;
763 hm.obj_index = (u16)i;
764 hw_entry_point(&hm, &hr);
766 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
767 hw_entry_point(&hm, &hr);
769 hm.function = HPI_OSTREAM_GROUP_RESET;
770 hw_entry_point(&hm, &hr);
772 outstream_user_open[adapter][i].open_flag = 0;
773 outstream_user_open[adapter][i].h_owner =
774 NULL;
776 if (h_owner == instream_user_open[adapter][i].h_owner) {
777 struct hpi_message hm;
778 struct hpi_response hr;
780 HPI_DEBUG_LOG(DEBUG,
781 "Close adapter %d istream %d\n",
782 adapter, i);
784 hpi_init_message_response(&hm, &hr,
785 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
786 hm.adapter_index = (u16)adapter;
787 hm.obj_index = (u16)i;
788 hw_entry_point(&hm, &hr);
790 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
791 hw_entry_point(&hm, &hr);
793 hm.function = HPI_ISTREAM_GROUP_RESET;
794 hw_entry_point(&hm, &hr);
796 instream_user_open[adapter][i].open_flag = 0;
797 instream_user_open[adapter][i].h_owner = NULL;