Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[zen-stable.git] / sound / pci / asihpi / hpimsgx.c
blob2e779421a6188374fe2b0d0cccb50da0257f90fd
1 /******************************************************************************
3 AudioScience HPI driver
4 Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as
8 published by the Free Software Foundation;
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 Extended Message Function With Response Caching
21 (C) Copyright AudioScience Inc. 2002
22 *****************************************************************************/
23 #define SOURCEFILE_NAME "hpimsgx.c"
24 #include "hpi_internal.h"
25 #include "hpimsginit.h"
26 #include "hpicmn.h"
27 #include "hpimsgx.h"
28 #include "hpidebug.h"
30 static struct pci_device_id asihpi_pci_tbl[] = {
31 #include "hpipcida.h"
34 static struct hpios_spinlock msgx_lock;
36 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
38 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
39 *pci_info)
42 int i;
44 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
45 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
46 && asihpi_pci_tbl[i].vendor !=
47 pci_info->pci_dev->vendor)
48 continue;
49 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
50 && asihpi_pci_tbl[i].device !=
51 pci_info->pci_dev->device)
52 continue;
53 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
54 && asihpi_pci_tbl[i].subvendor !=
55 pci_info->pci_dev->subsystem_vendor)
56 continue;
57 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
58 && asihpi_pci_tbl[i].subdevice !=
59 pci_info->pci_dev->subsystem_device)
60 continue;
62 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
63 asihpi_pci_tbl[i].driver_data); */
64 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
67 return NULL;
70 static inline void hw_entry_point(struct hpi_message *phm,
71 struct hpi_response *phr)
73 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
74 && hpi_entry_points[phm->adapter_index])
75 hpi_entry_points[phm->adapter_index] (phm, phr);
76 else
77 hpi_init_response(phr, phm->object, phm->function,
78 HPI_ERROR_PROCESSING_MESSAGE);
81 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
82 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
84 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
85 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
87 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
88 void *h_owner);
89 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
90 void *h_owner);
91 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
92 void *h_owner);
93 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
94 void *h_owner);
96 static void HPIMSGX__reset(u16 adapter_index);
98 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
99 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
101 #ifndef DISABLE_PRAGMA_PACK1
102 #pragma pack(push, 1)
103 #endif
105 struct hpi_subsys_response {
106 struct hpi_response_header h;
107 struct hpi_subsys_res s;
110 struct hpi_adapter_response {
111 struct hpi_response_header h;
112 struct hpi_adapter_res a;
115 struct hpi_mixer_response {
116 struct hpi_response_header h;
117 struct hpi_mixer_res m;
120 struct hpi_stream_response {
121 struct hpi_response_header h;
122 struct hpi_stream_res d;
125 struct adapter_info {
126 u16 type;
127 u16 num_instreams;
128 u16 num_outstreams;
131 struct asi_open_state {
132 int open_flag;
133 void *h_owner;
136 #ifndef DISABLE_PRAGMA_PACK1
137 #pragma pack(pop)
138 #endif
140 /* Globals */
141 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
143 static struct hpi_stream_response
144 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
146 static struct hpi_stream_response
147 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
149 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
151 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
153 /* use these to keep track of opens from user mode apps/DLLs */
154 static struct asi_open_state
155 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
157 static struct asi_open_state
158 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
160 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
161 void *h_owner)
163 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
164 HPI_DEBUG_LOG(WARNING,
165 "suspicious adapter index %d in subsys message 0x%x.\n",
166 phm->adapter_index, phm->function);
168 switch (phm->function) {
169 case HPI_SUBSYS_GET_VERSION:
170 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
171 HPI_SUBSYS_GET_VERSION, 0);
172 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
173 phr->u.s.data = HPI_VER; /* return major.minor.release */
174 break;
175 case HPI_SUBSYS_OPEN:
176 /*do not propagate the message down the chain */
177 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
178 break;
179 case HPI_SUBSYS_CLOSE:
180 /*do not propagate the message down the chain */
181 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
183 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
184 break;
185 case HPI_SUBSYS_DRIVER_LOAD:
186 /* Initialize this module's internal state */
187 hpios_msgxlock_init(&msgx_lock);
188 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
189 /* Init subsys_findadapters response to no-adapters */
190 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
191 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
192 HPI_SUBSYS_DRIVER_LOAD, 0);
193 /* individual HPIs dont implement driver load */
194 HPI_COMMON(phm, phr);
195 break;
196 case HPI_SUBSYS_DRIVER_UNLOAD:
197 HPI_COMMON(phm, phr);
198 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
199 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
200 HPI_SUBSYS_DRIVER_UNLOAD, 0);
201 return;
203 case HPI_SUBSYS_GET_NUM_ADAPTERS:
204 case HPI_SUBSYS_GET_ADAPTER:
205 HPI_COMMON(phm, phr);
206 break;
208 case HPI_SUBSYS_CREATE_ADAPTER:
209 HPIMSGX__init(phm, phr);
210 break;
212 default:
213 /* Must explicitly handle every subsys message in this switch */
214 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
215 HPI_ERROR_INVALID_FUNC);
216 break;
220 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
221 void *h_owner)
223 switch (phm->function) {
224 case HPI_ADAPTER_OPEN:
225 adapter_open(phm, phr);
226 break;
227 case HPI_ADAPTER_CLOSE:
228 adapter_close(phm, phr);
229 break;
230 case HPI_ADAPTER_DELETE:
231 HPIMSGX__cleanup(phm->adapter_index, h_owner);
233 struct hpi_message hm;
234 struct hpi_response hr;
235 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
236 HPI_ADAPTER_CLOSE);
237 hm.adapter_index = phm->adapter_index;
238 hw_entry_point(&hm, &hr);
240 hw_entry_point(phm, phr);
241 break;
243 default:
244 hw_entry_point(phm, phr);
245 break;
249 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
251 switch (phm->function) {
252 case HPI_MIXER_OPEN:
253 mixer_open(phm, phr);
254 break;
255 case HPI_MIXER_CLOSE:
256 mixer_close(phm, phr);
257 break;
258 default:
259 hw_entry_point(phm, phr);
260 break;
264 static void outstream_message(struct hpi_message *phm,
265 struct hpi_response *phr, void *h_owner)
267 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
268 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
269 HPI_ERROR_INVALID_OBJ_INDEX);
270 return;
273 switch (phm->function) {
274 case HPI_OSTREAM_OPEN:
275 outstream_open(phm, phr, h_owner);
276 break;
277 case HPI_OSTREAM_CLOSE:
278 outstream_close(phm, phr, h_owner);
279 break;
280 default:
281 hw_entry_point(phm, phr);
282 break;
286 static void instream_message(struct hpi_message *phm,
287 struct hpi_response *phr, void *h_owner)
289 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
290 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
291 HPI_ERROR_INVALID_OBJ_INDEX);
292 return;
295 switch (phm->function) {
296 case HPI_ISTREAM_OPEN:
297 instream_open(phm, phr, h_owner);
298 break;
299 case HPI_ISTREAM_CLOSE:
300 instream_close(phm, phr, h_owner);
301 break;
302 default:
303 hw_entry_point(phm, phr);
304 break;
308 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
309 * HPI_MessageEx so that functions in hpifunc.c compile.
311 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
312 void *h_owner)
314 HPI_DEBUG_MESSAGE(DEBUG, phm);
316 if (phm->type != HPI_TYPE_REQUEST) {
317 hpi_init_response(phr, phm->object, phm->function,
318 HPI_ERROR_INVALID_TYPE);
319 return;
322 if (phm->adapter_index >= HPI_MAX_ADAPTERS
323 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
324 hpi_init_response(phr, phm->object, phm->function,
325 HPI_ERROR_BAD_ADAPTER_NUMBER);
326 return;
329 switch (phm->object) {
330 case HPI_OBJ_SUBSYSTEM:
331 subsys_message(phm, phr, h_owner);
332 break;
334 case HPI_OBJ_ADAPTER:
335 adapter_message(phm, phr, h_owner);
336 break;
338 case HPI_OBJ_MIXER:
339 mixer_message(phm, phr);
340 break;
342 case HPI_OBJ_OSTREAM:
343 outstream_message(phm, phr, h_owner);
344 break;
346 case HPI_OBJ_ISTREAM:
347 instream_message(phm, phr, h_owner);
348 break;
350 default:
351 hw_entry_point(phm, phr);
352 break;
354 HPI_DEBUG_RESPONSE(phr);
358 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
360 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
361 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
362 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
365 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
367 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
368 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
371 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
373 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
374 sizeof(rESP_HPI_MIXER_OPEN[0]));
377 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
379 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
382 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
383 void *h_owner)
386 struct hpi_message hm;
387 struct hpi_response hr;
389 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
391 hpios_msgxlock_lock(&msgx_lock);
393 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
394 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
395 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
396 [phm->obj_index].h.error)
397 memcpy(phr,
398 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
399 obj_index],
400 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
401 else {
402 instream_user_open[phm->adapter_index][phm->
403 obj_index].open_flag = 1;
404 hpios_msgxlock_unlock(&msgx_lock);
406 /* issue a reset */
407 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
408 HPI_ISTREAM_RESET);
409 hm.adapter_index = phm->adapter_index;
410 hm.obj_index = phm->obj_index;
411 hw_entry_point(&hm, &hr);
413 hpios_msgxlock_lock(&msgx_lock);
414 if (hr.error) {
415 instream_user_open[phm->adapter_index][phm->
416 obj_index].open_flag = 0;
417 phr->error = hr.error;
418 } else {
419 instream_user_open[phm->adapter_index][phm->
420 obj_index].open_flag = 1;
421 instream_user_open[phm->adapter_index][phm->
422 obj_index].h_owner = h_owner;
423 memcpy(phr,
424 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
425 [phm->obj_index],
426 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
429 hpios_msgxlock_unlock(&msgx_lock);
432 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
433 void *h_owner)
436 struct hpi_message hm;
437 struct hpi_response hr;
439 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
441 hpios_msgxlock_lock(&msgx_lock);
442 if (h_owner ==
443 instream_user_open[phm->adapter_index][phm->
444 obj_index].h_owner) {
445 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
446 "instream %d owned by %p\n",
447 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
448 instream_user_open[phm->adapter_index][phm->
449 obj_index].h_owner = NULL;
450 hpios_msgxlock_unlock(&msgx_lock);
451 /* issue a reset */
452 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
453 HPI_ISTREAM_RESET);
454 hm.adapter_index = phm->adapter_index;
455 hm.obj_index = phm->obj_index;
456 hw_entry_point(&hm, &hr);
457 hpios_msgxlock_lock(&msgx_lock);
458 if (hr.error) {
459 instream_user_open[phm->adapter_index][phm->
460 obj_index].h_owner = h_owner;
461 phr->error = hr.error;
462 } else {
463 instream_user_open[phm->adapter_index][phm->
464 obj_index].open_flag = 0;
465 instream_user_open[phm->adapter_index][phm->
466 obj_index].h_owner = NULL;
468 } else {
469 HPI_DEBUG_LOG(WARNING,
470 "%p trying to close %d instream %d owned by %p\n",
471 h_owner, phm->adapter_index, phm->obj_index,
472 instream_user_open[phm->adapter_index][phm->
473 obj_index].h_owner);
474 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
476 hpios_msgxlock_unlock(&msgx_lock);
479 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
480 void *h_owner)
483 struct hpi_message hm;
484 struct hpi_response hr;
486 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
488 hpios_msgxlock_lock(&msgx_lock);
490 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
491 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
492 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
493 [phm->obj_index].h.error)
494 memcpy(phr,
495 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
496 obj_index],
497 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
498 else {
499 outstream_user_open[phm->adapter_index][phm->
500 obj_index].open_flag = 1;
501 hpios_msgxlock_unlock(&msgx_lock);
503 /* issue a reset */
504 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
505 HPI_OSTREAM_RESET);
506 hm.adapter_index = phm->adapter_index;
507 hm.obj_index = phm->obj_index;
508 hw_entry_point(&hm, &hr);
510 hpios_msgxlock_lock(&msgx_lock);
511 if (hr.error) {
512 outstream_user_open[phm->adapter_index][phm->
513 obj_index].open_flag = 0;
514 phr->error = hr.error;
515 } else {
516 outstream_user_open[phm->adapter_index][phm->
517 obj_index].open_flag = 1;
518 outstream_user_open[phm->adapter_index][phm->
519 obj_index].h_owner = h_owner;
520 memcpy(phr,
521 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
522 [phm->obj_index],
523 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
526 hpios_msgxlock_unlock(&msgx_lock);
529 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
530 void *h_owner)
533 struct hpi_message hm;
534 struct hpi_response hr;
536 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
538 hpios_msgxlock_lock(&msgx_lock);
540 if (h_owner ==
541 outstream_user_open[phm->adapter_index][phm->
542 obj_index].h_owner) {
543 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
544 "outstream %d owned by %p\n",
545 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
546 outstream_user_open[phm->adapter_index][phm->
547 obj_index].h_owner = NULL;
548 hpios_msgxlock_unlock(&msgx_lock);
549 /* issue a reset */
550 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
551 HPI_OSTREAM_RESET);
552 hm.adapter_index = phm->adapter_index;
553 hm.obj_index = phm->obj_index;
554 hw_entry_point(&hm, &hr);
555 hpios_msgxlock_lock(&msgx_lock);
556 if (hr.error) {
557 outstream_user_open[phm->adapter_index][phm->
558 obj_index].h_owner = h_owner;
559 phr->error = hr.error;
560 } else {
561 outstream_user_open[phm->adapter_index][phm->
562 obj_index].open_flag = 0;
563 outstream_user_open[phm->adapter_index][phm->
564 obj_index].h_owner = NULL;
566 } else {
567 HPI_DEBUG_LOG(WARNING,
568 "%p trying to close %d outstream %d owned by %p\n",
569 h_owner, phm->adapter_index, phm->obj_index,
570 outstream_user_open[phm->adapter_index][phm->
571 obj_index].h_owner);
572 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
574 hpios_msgxlock_unlock(&msgx_lock);
577 static u16 adapter_prepare(u16 adapter)
579 struct hpi_message hm;
580 struct hpi_response hr;
582 /* Open the adapter and streams */
583 u16 i;
585 /* call to HPI_ADAPTER_OPEN */
586 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
587 HPI_ADAPTER_OPEN);
588 hm.adapter_index = adapter;
589 hw_entry_point(&hm, &hr);
590 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
591 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
592 if (hr.error)
593 return hr.error;
595 /* call to HPI_ADAPTER_GET_INFO */
596 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
597 HPI_ADAPTER_GET_INFO);
598 hm.adapter_index = adapter;
599 hw_entry_point(&hm, &hr);
600 if (hr.error)
601 return hr.error;
603 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
604 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
605 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
607 /* call to HPI_OSTREAM_OPEN */
608 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
609 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
610 HPI_OSTREAM_OPEN);
611 hm.adapter_index = adapter;
612 hm.obj_index = i;
613 hw_entry_point(&hm, &hr);
614 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
615 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
616 outstream_user_open[adapter][i].open_flag = 0;
617 outstream_user_open[adapter][i].h_owner = NULL;
620 /* call to HPI_ISTREAM_OPEN */
621 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
622 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
623 HPI_ISTREAM_OPEN);
624 hm.adapter_index = adapter;
625 hm.obj_index = i;
626 hw_entry_point(&hm, &hr);
627 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
628 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
629 instream_user_open[adapter][i].open_flag = 0;
630 instream_user_open[adapter][i].h_owner = NULL;
633 /* call to HPI_MIXER_OPEN */
634 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
635 hm.adapter_index = adapter;
636 hw_entry_point(&hm, &hr);
637 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
638 sizeof(rESP_HPI_MIXER_OPEN[0]));
640 return 0;
643 static void HPIMSGX__reset(u16 adapter_index)
645 int i;
646 u16 adapter;
647 struct hpi_response hr;
649 if (adapter_index == HPIMSGX_ALLADAPTERS) {
650 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
652 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
653 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
654 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
655 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
657 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
658 HPI_ERROR_INVALID_OBJ);
659 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
660 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
662 for (i = 0; i < HPI_MAX_STREAMS; i++) {
663 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
664 HPI_OSTREAM_OPEN,
665 HPI_ERROR_INVALID_OBJ);
666 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
667 &hr,
668 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
669 [i]));
670 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
671 HPI_ISTREAM_OPEN,
672 HPI_ERROR_INVALID_OBJ);
673 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
674 &hr,
675 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
676 [i]));
679 } else if (adapter_index < HPI_MAX_ADAPTERS) {
680 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
681 HPI_ERROR_BAD_ADAPTER;
682 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
683 HPI_ERROR_INVALID_OBJ;
684 for (i = 0; i < HPI_MAX_STREAMS; i++) {
685 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
686 HPI_ERROR_INVALID_OBJ;
687 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
688 HPI_ERROR_INVALID_OBJ;
693 static u16 HPIMSGX__init(struct hpi_message *phm,
694 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
695 /* resource list or NULL=find all */
696 struct hpi_response *phr
697 /* response from HPI_ADAPTER_GET_INFO */
700 hpi_handler_func *entry_point_func;
701 struct hpi_response hr;
703 /* Init response here so we can pass in previous adapter list */
704 hpi_init_response(&hr, phm->object, phm->function,
705 HPI_ERROR_INVALID_OBJ);
707 entry_point_func =
708 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
710 if (entry_point_func) {
711 HPI_DEBUG_MESSAGE(DEBUG, phm);
712 entry_point_func(phm, &hr);
713 } else {
714 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
715 return phr->error;
717 if (hr.error == 0) {
718 /* the adapter was created successfully
719 save the mapping for future use */
720 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
721 /* prepare adapter (pre-open streams etc.) */
722 HPI_DEBUG_LOG(DEBUG,
723 "HPI_SUBSYS_CREATE_ADAPTER successful,"
724 " preparing adapter\n");
725 adapter_prepare(hr.u.s.adapter_index);
727 memcpy(phr, &hr, hr.size);
728 return phr->error;
731 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
733 int i, adapter, adapter_limit;
735 if (!h_owner)
736 return;
738 if (adapter_index == HPIMSGX_ALLADAPTERS) {
739 adapter = 0;
740 adapter_limit = HPI_MAX_ADAPTERS;
741 } else {
742 adapter = adapter_index;
743 adapter_limit = adapter + 1;
746 for (; adapter < adapter_limit; adapter++) {
747 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
748 for (i = 0; i < HPI_MAX_STREAMS; i++) {
749 if (h_owner ==
750 outstream_user_open[adapter][i].h_owner) {
751 struct hpi_message hm;
752 struct hpi_response hr;
754 HPI_DEBUG_LOG(DEBUG,
755 "Close adapter %d ostream %d\n",
756 adapter, i);
758 hpi_init_message_response(&hm, &hr,
759 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
760 hm.adapter_index = (u16)adapter;
761 hm.obj_index = (u16)i;
762 hw_entry_point(&hm, &hr);
764 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
765 hw_entry_point(&hm, &hr);
767 hm.function = HPI_OSTREAM_GROUP_RESET;
768 hw_entry_point(&hm, &hr);
770 outstream_user_open[adapter][i].open_flag = 0;
771 outstream_user_open[adapter][i].h_owner =
772 NULL;
774 if (h_owner == instream_user_open[adapter][i].h_owner) {
775 struct hpi_message hm;
776 struct hpi_response hr;
778 HPI_DEBUG_LOG(DEBUG,
779 "Close adapter %d istream %d\n",
780 adapter, i);
782 hpi_init_message_response(&hm, &hr,
783 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
784 hm.adapter_index = (u16)adapter;
785 hm.obj_index = (u16)i;
786 hw_entry_point(&hm, &hr);
788 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
789 hw_entry_point(&hm, &hr);
791 hm.function = HPI_ISTREAM_GROUP_RESET;
792 hw_entry_point(&hm, &hr);
794 instream_user_open[adapter][i].open_flag = 0;
795 instream_user_open[adapter][i].h_owner = NULL;