Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / sound / pci / asihpi / hpimsgx.c
blobd4790ddc225c1cbdfb7d8c5348f48ca717db8856
1 /******************************************************************************
3 AudioScience HPI driver
4 Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as
8 published by the Free Software Foundation;
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 Extended Message Function With Response Caching
21 (C) Copyright AudioScience Inc. 2002
22 *****************************************************************************/
23 #define SOURCEFILE_NAME "hpimsgx.c"
24 #include "hpi_internal.h"
25 #include "hpi_version.h"
26 #include "hpimsginit.h"
27 #include "hpicmn.h"
28 #include "hpimsgx.h"
29 #include "hpidebug.h"
31 static struct pci_device_id asihpi_pci_tbl[] = {
32 #include "hpipcida.h"
35 static struct hpios_spinlock msgx_lock;
37 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
39 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
40 *pci_info)
43 int i;
45 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
46 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
47 && asihpi_pci_tbl[i].vendor !=
48 pci_info->pci_dev->vendor)
49 continue;
50 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
51 && asihpi_pci_tbl[i].device !=
52 pci_info->pci_dev->device)
53 continue;
54 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
55 && asihpi_pci_tbl[i].subvendor !=
56 pci_info->pci_dev->subsystem_vendor)
57 continue;
58 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
59 && asihpi_pci_tbl[i].subdevice !=
60 pci_info->pci_dev->subsystem_device)
61 continue;
63 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
64 asihpi_pci_tbl[i].driver_data); */
65 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
68 return NULL;
71 static inline void hw_entry_point(struct hpi_message *phm,
72 struct hpi_response *phr)
74 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
75 && hpi_entry_points[phm->adapter_index])
76 hpi_entry_points[phm->adapter_index] (phm, phr);
77 else
78 hpi_init_response(phr, phm->object, phm->function,
79 HPI_ERROR_PROCESSING_MESSAGE);
82 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
83 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
85 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
86 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
88 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
89 void *h_owner);
90 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
91 void *h_owner);
92 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
93 void *h_owner);
94 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
95 void *h_owner);
97 static void HPIMSGX__reset(u16 adapter_index);
99 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
100 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
102 #ifndef DISABLE_PRAGMA_PACK1
103 #pragma pack(push, 1)
104 #endif
106 struct hpi_subsys_response {
107 struct hpi_response_header h;
108 struct hpi_subsys_res s;
111 struct hpi_adapter_response {
112 struct hpi_response_header h;
113 struct hpi_adapter_res a;
116 struct hpi_mixer_response {
117 struct hpi_response_header h;
118 struct hpi_mixer_res m;
121 struct hpi_stream_response {
122 struct hpi_response_header h;
123 struct hpi_stream_res d;
126 struct adapter_info {
127 u16 type;
128 u16 num_instreams;
129 u16 num_outstreams;
132 struct asi_open_state {
133 int open_flag;
134 void *h_owner;
137 #ifndef DISABLE_PRAGMA_PACK1
138 #pragma pack(pop)
139 #endif
141 /* Globals */
142 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
144 static struct hpi_stream_response
145 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
147 static struct hpi_stream_response
148 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
150 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
152 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
154 /* use these to keep track of opens from user mode apps/DLLs */
155 static struct asi_open_state
156 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
158 static struct asi_open_state
159 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
161 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
162 void *h_owner)
164 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
165 HPI_DEBUG_LOG(WARNING,
166 "suspicious adapter index %d in subsys message 0x%x.\n",
167 phm->adapter_index, phm->function);
169 switch (phm->function) {
170 case HPI_SUBSYS_GET_VERSION:
171 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
172 HPI_SUBSYS_GET_VERSION, 0);
173 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
174 phr->u.s.data = HPI_VER; /* return major.minor.release */
175 break;
176 case HPI_SUBSYS_OPEN:
177 /*do not propagate the message down the chain */
178 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
179 break;
180 case HPI_SUBSYS_CLOSE:
181 /*do not propagate the message down the chain */
182 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
184 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
185 break;
186 case HPI_SUBSYS_DRIVER_LOAD:
187 /* Initialize this module's internal state */
188 hpios_msgxlock_init(&msgx_lock);
189 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
190 /* Init subsys_findadapters response to no-adapters */
191 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
192 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
193 HPI_SUBSYS_DRIVER_LOAD, 0);
194 /* individual HPIs dont implement driver load */
195 HPI_COMMON(phm, phr);
196 break;
197 case HPI_SUBSYS_DRIVER_UNLOAD:
198 HPI_COMMON(phm, phr);
199 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
200 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
201 HPI_SUBSYS_DRIVER_UNLOAD, 0);
202 return;
204 case HPI_SUBSYS_GET_NUM_ADAPTERS:
205 case HPI_SUBSYS_GET_ADAPTER:
206 HPI_COMMON(phm, phr);
207 break;
209 case HPI_SUBSYS_CREATE_ADAPTER:
210 HPIMSGX__init(phm, phr);
211 break;
213 default:
214 /* Must explicitly handle every subsys message in this switch */
215 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
216 HPI_ERROR_INVALID_FUNC);
217 break;
221 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
222 void *h_owner)
224 switch (phm->function) {
225 case HPI_ADAPTER_OPEN:
226 adapter_open(phm, phr);
227 break;
228 case HPI_ADAPTER_CLOSE:
229 adapter_close(phm, phr);
230 break;
231 case HPI_ADAPTER_DELETE:
232 HPIMSGX__cleanup(phm->adapter_index, h_owner);
234 struct hpi_message hm;
235 struct hpi_response hr;
236 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
237 HPI_ADAPTER_CLOSE);
238 hm.adapter_index = phm->adapter_index;
239 hw_entry_point(&hm, &hr);
241 hw_entry_point(phm, phr);
242 break;
244 default:
245 hw_entry_point(phm, phr);
246 break;
250 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
252 switch (phm->function) {
253 case HPI_MIXER_OPEN:
254 mixer_open(phm, phr);
255 break;
256 case HPI_MIXER_CLOSE:
257 mixer_close(phm, phr);
258 break;
259 default:
260 hw_entry_point(phm, phr);
261 break;
265 static void outstream_message(struct hpi_message *phm,
266 struct hpi_response *phr, void *h_owner)
268 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
269 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
270 HPI_ERROR_INVALID_OBJ_INDEX);
271 return;
274 switch (phm->function) {
275 case HPI_OSTREAM_OPEN:
276 outstream_open(phm, phr, h_owner);
277 break;
278 case HPI_OSTREAM_CLOSE:
279 outstream_close(phm, phr, h_owner);
280 break;
281 default:
282 hw_entry_point(phm, phr);
283 break;
287 static void instream_message(struct hpi_message *phm,
288 struct hpi_response *phr, void *h_owner)
290 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
291 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
292 HPI_ERROR_INVALID_OBJ_INDEX);
293 return;
296 switch (phm->function) {
297 case HPI_ISTREAM_OPEN:
298 instream_open(phm, phr, h_owner);
299 break;
300 case HPI_ISTREAM_CLOSE:
301 instream_close(phm, phr, h_owner);
302 break;
303 default:
304 hw_entry_point(phm, phr);
305 break;
309 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
310 * HPI_MessageEx so that functions in hpifunc.c compile.
312 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
313 void *h_owner)
315 HPI_DEBUG_MESSAGE(DEBUG, phm);
317 if (phm->type != HPI_TYPE_REQUEST) {
318 hpi_init_response(phr, phm->object, phm->function,
319 HPI_ERROR_INVALID_TYPE);
320 return;
323 if (phm->adapter_index >= HPI_MAX_ADAPTERS
324 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
325 hpi_init_response(phr, phm->object, phm->function,
326 HPI_ERROR_BAD_ADAPTER_NUMBER);
327 return;
330 switch (phm->object) {
331 case HPI_OBJ_SUBSYSTEM:
332 subsys_message(phm, phr, h_owner);
333 break;
335 case HPI_OBJ_ADAPTER:
336 adapter_message(phm, phr, h_owner);
337 break;
339 case HPI_OBJ_MIXER:
340 mixer_message(phm, phr);
341 break;
343 case HPI_OBJ_OSTREAM:
344 outstream_message(phm, phr, h_owner);
345 break;
347 case HPI_OBJ_ISTREAM:
348 instream_message(phm, phr, h_owner);
349 break;
351 default:
352 hw_entry_point(phm, phr);
353 break;
355 HPI_DEBUG_RESPONSE(phr);
359 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
361 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
362 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
363 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
366 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
368 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
369 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
372 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
374 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
375 sizeof(rESP_HPI_MIXER_OPEN[0]));
378 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
380 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
383 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
384 void *h_owner)
387 struct hpi_message hm;
388 struct hpi_response hr;
390 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
392 hpios_msgxlock_lock(&msgx_lock);
394 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
395 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
396 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
397 [phm->obj_index].h.error)
398 memcpy(phr,
399 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
400 obj_index],
401 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
402 else {
403 instream_user_open[phm->adapter_index][phm->
404 obj_index].open_flag = 1;
405 hpios_msgxlock_unlock(&msgx_lock);
407 /* issue a reset */
408 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
409 HPI_ISTREAM_RESET);
410 hm.adapter_index = phm->adapter_index;
411 hm.obj_index = phm->obj_index;
412 hw_entry_point(&hm, &hr);
414 hpios_msgxlock_lock(&msgx_lock);
415 if (hr.error) {
416 instream_user_open[phm->adapter_index][phm->
417 obj_index].open_flag = 0;
418 phr->error = hr.error;
419 } else {
420 instream_user_open[phm->adapter_index][phm->
421 obj_index].open_flag = 1;
422 instream_user_open[phm->adapter_index][phm->
423 obj_index].h_owner = h_owner;
424 memcpy(phr,
425 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
426 [phm->obj_index],
427 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
430 hpios_msgxlock_unlock(&msgx_lock);
433 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
434 void *h_owner)
437 struct hpi_message hm;
438 struct hpi_response hr;
440 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
442 hpios_msgxlock_lock(&msgx_lock);
443 if (h_owner ==
444 instream_user_open[phm->adapter_index][phm->
445 obj_index].h_owner) {
446 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
447 "instream %d owned by %p\n",
448 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
449 instream_user_open[phm->adapter_index][phm->
450 obj_index].h_owner = NULL;
451 hpios_msgxlock_unlock(&msgx_lock);
452 /* issue a reset */
453 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
454 HPI_ISTREAM_RESET);
455 hm.adapter_index = phm->adapter_index;
456 hm.obj_index = phm->obj_index;
457 hw_entry_point(&hm, &hr);
458 hpios_msgxlock_lock(&msgx_lock);
459 if (hr.error) {
460 instream_user_open[phm->adapter_index][phm->
461 obj_index].h_owner = h_owner;
462 phr->error = hr.error;
463 } else {
464 instream_user_open[phm->adapter_index][phm->
465 obj_index].open_flag = 0;
466 instream_user_open[phm->adapter_index][phm->
467 obj_index].h_owner = NULL;
469 } else {
470 HPI_DEBUG_LOG(WARNING,
471 "%p trying to close %d instream %d owned by %p\n",
472 h_owner, phm->adapter_index, phm->obj_index,
473 instream_user_open[phm->adapter_index][phm->
474 obj_index].h_owner);
475 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
477 hpios_msgxlock_unlock(&msgx_lock);
480 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
481 void *h_owner)
484 struct hpi_message hm;
485 struct hpi_response hr;
487 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
489 hpios_msgxlock_lock(&msgx_lock);
491 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
492 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
493 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
494 [phm->obj_index].h.error)
495 memcpy(phr,
496 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
497 obj_index],
498 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
499 else {
500 outstream_user_open[phm->adapter_index][phm->
501 obj_index].open_flag = 1;
502 hpios_msgxlock_unlock(&msgx_lock);
504 /* issue a reset */
505 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
506 HPI_OSTREAM_RESET);
507 hm.adapter_index = phm->adapter_index;
508 hm.obj_index = phm->obj_index;
509 hw_entry_point(&hm, &hr);
511 hpios_msgxlock_lock(&msgx_lock);
512 if (hr.error) {
513 outstream_user_open[phm->adapter_index][phm->
514 obj_index].open_flag = 0;
515 phr->error = hr.error;
516 } else {
517 outstream_user_open[phm->adapter_index][phm->
518 obj_index].open_flag = 1;
519 outstream_user_open[phm->adapter_index][phm->
520 obj_index].h_owner = h_owner;
521 memcpy(phr,
522 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
523 [phm->obj_index],
524 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
527 hpios_msgxlock_unlock(&msgx_lock);
530 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
531 void *h_owner)
534 struct hpi_message hm;
535 struct hpi_response hr;
537 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
539 hpios_msgxlock_lock(&msgx_lock);
541 if (h_owner ==
542 outstream_user_open[phm->adapter_index][phm->
543 obj_index].h_owner) {
544 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
545 "outstream %d owned by %p\n",
546 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
547 outstream_user_open[phm->adapter_index][phm->
548 obj_index].h_owner = NULL;
549 hpios_msgxlock_unlock(&msgx_lock);
550 /* issue a reset */
551 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
552 HPI_OSTREAM_RESET);
553 hm.adapter_index = phm->adapter_index;
554 hm.obj_index = phm->obj_index;
555 hw_entry_point(&hm, &hr);
556 hpios_msgxlock_lock(&msgx_lock);
557 if (hr.error) {
558 outstream_user_open[phm->adapter_index][phm->
559 obj_index].h_owner = h_owner;
560 phr->error = hr.error;
561 } else {
562 outstream_user_open[phm->adapter_index][phm->
563 obj_index].open_flag = 0;
564 outstream_user_open[phm->adapter_index][phm->
565 obj_index].h_owner = NULL;
567 } else {
568 HPI_DEBUG_LOG(WARNING,
569 "%p trying to close %d outstream %d owned by %p\n",
570 h_owner, phm->adapter_index, phm->obj_index,
571 outstream_user_open[phm->adapter_index][phm->
572 obj_index].h_owner);
573 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
575 hpios_msgxlock_unlock(&msgx_lock);
578 static u16 adapter_prepare(u16 adapter)
580 struct hpi_message hm;
581 struct hpi_response hr;
583 /* Open the adapter and streams */
584 u16 i;
586 /* call to HPI_ADAPTER_OPEN */
587 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
588 HPI_ADAPTER_OPEN);
589 hm.adapter_index = adapter;
590 hw_entry_point(&hm, &hr);
591 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
592 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
593 if (hr.error)
594 return hr.error;
596 /* call to HPI_ADAPTER_GET_INFO */
597 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
598 HPI_ADAPTER_GET_INFO);
599 hm.adapter_index = adapter;
600 hw_entry_point(&hm, &hr);
601 if (hr.error)
602 return hr.error;
604 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
605 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
606 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
608 /* call to HPI_OSTREAM_OPEN */
609 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
610 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
611 HPI_OSTREAM_OPEN);
612 hm.adapter_index = adapter;
613 hm.obj_index = i;
614 hw_entry_point(&hm, &hr);
615 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
616 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
617 outstream_user_open[adapter][i].open_flag = 0;
618 outstream_user_open[adapter][i].h_owner = NULL;
621 /* call to HPI_ISTREAM_OPEN */
622 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
623 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
624 HPI_ISTREAM_OPEN);
625 hm.adapter_index = adapter;
626 hm.obj_index = i;
627 hw_entry_point(&hm, &hr);
628 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
629 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
630 instream_user_open[adapter][i].open_flag = 0;
631 instream_user_open[adapter][i].h_owner = NULL;
634 /* call to HPI_MIXER_OPEN */
635 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
636 hm.adapter_index = adapter;
637 hw_entry_point(&hm, &hr);
638 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
639 sizeof(rESP_HPI_MIXER_OPEN[0]));
641 return 0;
644 static void HPIMSGX__reset(u16 adapter_index)
646 int i;
647 u16 adapter;
648 struct hpi_response hr;
650 if (adapter_index == HPIMSGX_ALLADAPTERS) {
651 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
653 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
654 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
655 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
656 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
658 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
659 HPI_ERROR_INVALID_OBJ);
660 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
661 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
663 for (i = 0; i < HPI_MAX_STREAMS; i++) {
664 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
665 HPI_OSTREAM_OPEN,
666 HPI_ERROR_INVALID_OBJ);
667 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
668 &hr,
669 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
670 [i]));
671 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
672 HPI_ISTREAM_OPEN,
673 HPI_ERROR_INVALID_OBJ);
674 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
675 &hr,
676 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
677 [i]));
680 } else if (adapter_index < HPI_MAX_ADAPTERS) {
681 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
682 HPI_ERROR_BAD_ADAPTER;
683 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
684 HPI_ERROR_INVALID_OBJ;
685 for (i = 0; i < HPI_MAX_STREAMS; i++) {
686 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
687 HPI_ERROR_INVALID_OBJ;
688 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
689 HPI_ERROR_INVALID_OBJ;
694 static u16 HPIMSGX__init(struct hpi_message *phm,
695 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
696 /* resource list or NULL=find all */
697 struct hpi_response *phr
698 /* response from HPI_ADAPTER_GET_INFO */
701 hpi_handler_func *entry_point_func;
702 struct hpi_response hr;
704 /* Init response here so we can pass in previous adapter list */
705 hpi_init_response(&hr, phm->object, phm->function,
706 HPI_ERROR_INVALID_OBJ);
708 entry_point_func =
709 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
711 if (entry_point_func) {
712 HPI_DEBUG_MESSAGE(DEBUG, phm);
713 entry_point_func(phm, &hr);
714 } else {
715 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
716 return phr->error;
718 if (hr.error == 0) {
719 /* the adapter was created successfully
720 save the mapping for future use */
721 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
722 /* prepare adapter (pre-open streams etc.) */
723 HPI_DEBUG_LOG(DEBUG,
724 "HPI_SUBSYS_CREATE_ADAPTER successful,"
725 " preparing adapter\n");
726 adapter_prepare(hr.u.s.adapter_index);
728 memcpy(phr, &hr, hr.size);
729 return phr->error;
732 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
734 int i, adapter, adapter_limit;
736 if (!h_owner)
737 return;
739 if (adapter_index == HPIMSGX_ALLADAPTERS) {
740 adapter = 0;
741 adapter_limit = HPI_MAX_ADAPTERS;
742 } else {
743 adapter = adapter_index;
744 adapter_limit = adapter + 1;
747 for (; adapter < adapter_limit; adapter++) {
748 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
749 for (i = 0; i < HPI_MAX_STREAMS; i++) {
750 if (h_owner ==
751 outstream_user_open[adapter][i].h_owner) {
752 struct hpi_message hm;
753 struct hpi_response hr;
755 HPI_DEBUG_LOG(DEBUG,
756 "Close adapter %d ostream %d\n",
757 adapter, i);
759 hpi_init_message_response(&hm, &hr,
760 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
761 hm.adapter_index = (u16)adapter;
762 hm.obj_index = (u16)i;
763 hw_entry_point(&hm, &hr);
765 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
766 hw_entry_point(&hm, &hr);
768 hm.function = HPI_OSTREAM_GROUP_RESET;
769 hw_entry_point(&hm, &hr);
771 outstream_user_open[adapter][i].open_flag = 0;
772 outstream_user_open[adapter][i].h_owner =
773 NULL;
775 if (h_owner == instream_user_open[adapter][i].h_owner) {
776 struct hpi_message hm;
777 struct hpi_response hr;
779 HPI_DEBUG_LOG(DEBUG,
780 "Close adapter %d istream %d\n",
781 adapter, i);
783 hpi_init_message_response(&hm, &hr,
784 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
785 hm.adapter_index = (u16)adapter;
786 hm.obj_index = (u16)i;
787 hw_entry_point(&hm, &hr);
789 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
790 hw_entry_point(&hm, &hr);
792 hm.function = HPI_ISTREAM_GROUP_RESET;
793 hw_entry_point(&hm, &hr);
795 instream_user_open[adapter][i].open_flag = 0;
796 instream_user_open[adapter][i].h_owner = NULL;