vmalloc: fix __GFP_HIGHMEM usage for vmalloc_32 on 32b systems
[linux/fpc-iii.git] / sound / pci / asihpi / hpimsgx.c
blob736f45337fc70c18c2507eb40d00796f40c569cf
1 /******************************************************************************
3 AudioScience HPI driver
4 Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as
8 published by the Free Software Foundation;
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 Extended Message Function With Response Caching
21 (C) Copyright AudioScience Inc. 2002
22 *****************************************************************************/
23 #define SOURCEFILE_NAME "hpimsgx.c"
24 #include "hpi_internal.h"
25 #include "hpi_version.h"
26 #include "hpimsginit.h"
27 #include "hpicmn.h"
28 #include "hpimsgx.h"
29 #include "hpidebug.h"
31 static struct pci_device_id asihpi_pci_tbl[] = {
32 #include "hpipcida.h"
35 static struct hpios_spinlock msgx_lock;
37 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
38 static int logging_enabled = 1;
40 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
41 *pci_info)
44 int i;
46 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
47 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
48 && asihpi_pci_tbl[i].vendor !=
49 pci_info->pci_dev->vendor)
50 continue;
51 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
52 && asihpi_pci_tbl[i].device !=
53 pci_info->pci_dev->device)
54 continue;
55 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
56 && asihpi_pci_tbl[i].subvendor !=
57 pci_info->pci_dev->subsystem_vendor)
58 continue;
59 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
60 && asihpi_pci_tbl[i].subdevice !=
61 pci_info->pci_dev->subsystem_device)
62 continue;
64 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
65 asihpi_pci_tbl[i].driver_data); */
66 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
69 return NULL;
72 static inline void hw_entry_point(struct hpi_message *phm,
73 struct hpi_response *phr)
75 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
76 && hpi_entry_points[phm->adapter_index])
77 hpi_entry_points[phm->adapter_index] (phm, phr);
78 else
79 hpi_init_response(phr, phm->object, phm->function,
80 HPI_ERROR_PROCESSING_MESSAGE);
83 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
84 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
86 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
87 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
89 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
90 void *h_owner);
91 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
92 void *h_owner);
93 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
94 void *h_owner);
95 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
96 void *h_owner);
98 static void HPIMSGX__reset(u16 adapter_index);
100 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
101 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
103 #ifndef DISABLE_PRAGMA_PACK1
104 #pragma pack(push, 1)
105 #endif
107 struct hpi_subsys_response {
108 struct hpi_response_header h;
109 struct hpi_subsys_res s;
112 struct hpi_adapter_response {
113 struct hpi_response_header h;
114 struct hpi_adapter_res a;
117 struct hpi_mixer_response {
118 struct hpi_response_header h;
119 struct hpi_mixer_res m;
122 struct hpi_stream_response {
123 struct hpi_response_header h;
124 struct hpi_stream_res d;
127 struct adapter_info {
128 u16 type;
129 u16 num_instreams;
130 u16 num_outstreams;
133 struct asi_open_state {
134 int open_flag;
135 void *h_owner;
138 #ifndef DISABLE_PRAGMA_PACK1
139 #pragma pack(pop)
140 #endif
142 /* Globals */
143 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
145 static struct hpi_stream_response
146 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
148 static struct hpi_stream_response
149 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
151 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
153 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
155 /* use these to keep track of opens from user mode apps/DLLs */
156 static struct asi_open_state
157 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
159 static struct asi_open_state
160 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
162 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
163 void *h_owner)
165 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
166 HPI_DEBUG_LOG(WARNING,
167 "suspicious adapter index %d in subsys message 0x%x.\n",
168 phm->adapter_index, phm->function);
170 switch (phm->function) {
171 case HPI_SUBSYS_GET_VERSION:
172 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
173 HPI_SUBSYS_GET_VERSION, 0);
174 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
175 phr->u.s.data = HPI_VER; /* return major.minor.release */
176 break;
177 case HPI_SUBSYS_OPEN:
178 /*do not propagate the message down the chain */
179 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
180 break;
181 case HPI_SUBSYS_CLOSE:
182 /*do not propagate the message down the chain */
183 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
185 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
186 break;
187 case HPI_SUBSYS_DRIVER_LOAD:
188 /* Initialize this module's internal state */
189 hpios_msgxlock_init(&msgx_lock);
190 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
191 /* Init subsys_findadapters response to no-adapters */
192 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
193 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
194 HPI_SUBSYS_DRIVER_LOAD, 0);
195 /* individual HPIs dont implement driver load */
196 HPI_COMMON(phm, phr);
197 break;
198 case HPI_SUBSYS_DRIVER_UNLOAD:
199 HPI_COMMON(phm, phr);
200 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
201 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
202 HPI_SUBSYS_DRIVER_UNLOAD, 0);
203 return;
205 case HPI_SUBSYS_GET_NUM_ADAPTERS:
206 case HPI_SUBSYS_GET_ADAPTER:
207 HPI_COMMON(phm, phr);
208 break;
210 case HPI_SUBSYS_CREATE_ADAPTER:
211 HPIMSGX__init(phm, phr);
212 break;
214 default:
215 /* Must explicitly handle every subsys message in this switch */
216 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
217 HPI_ERROR_INVALID_FUNC);
218 break;
222 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
223 void *h_owner)
225 switch (phm->function) {
226 case HPI_ADAPTER_OPEN:
227 adapter_open(phm, phr);
228 break;
229 case HPI_ADAPTER_CLOSE:
230 adapter_close(phm, phr);
231 break;
232 case HPI_ADAPTER_DELETE:
233 HPIMSGX__cleanup(phm->adapter_index, h_owner);
235 struct hpi_message hm;
236 struct hpi_response hr;
237 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
238 HPI_ADAPTER_CLOSE);
239 hm.adapter_index = phm->adapter_index;
240 hw_entry_point(&hm, &hr);
242 hw_entry_point(phm, phr);
243 break;
245 default:
246 hw_entry_point(phm, phr);
247 break;
251 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
253 switch (phm->function) {
254 case HPI_MIXER_OPEN:
255 mixer_open(phm, phr);
256 break;
257 case HPI_MIXER_CLOSE:
258 mixer_close(phm, phr);
259 break;
260 default:
261 hw_entry_point(phm, phr);
262 break;
266 static void outstream_message(struct hpi_message *phm,
267 struct hpi_response *phr, void *h_owner)
269 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
270 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
271 HPI_ERROR_INVALID_OBJ_INDEX);
272 return;
275 switch (phm->function) {
276 case HPI_OSTREAM_OPEN:
277 outstream_open(phm, phr, h_owner);
278 break;
279 case HPI_OSTREAM_CLOSE:
280 outstream_close(phm, phr, h_owner);
281 break;
282 default:
283 hw_entry_point(phm, phr);
284 break;
288 static void instream_message(struct hpi_message *phm,
289 struct hpi_response *phr, void *h_owner)
291 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
292 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
293 HPI_ERROR_INVALID_OBJ_INDEX);
294 return;
297 switch (phm->function) {
298 case HPI_ISTREAM_OPEN:
299 instream_open(phm, phr, h_owner);
300 break;
301 case HPI_ISTREAM_CLOSE:
302 instream_close(phm, phr, h_owner);
303 break;
304 default:
305 hw_entry_point(phm, phr);
306 break;
310 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
311 * HPI_MessageEx so that functions in hpifunc.c compile.
313 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
314 void *h_owner)
317 if (logging_enabled)
318 HPI_DEBUG_MESSAGE(DEBUG, phm);
320 if (phm->type != HPI_TYPE_REQUEST) {
321 hpi_init_response(phr, phm->object, phm->function,
322 HPI_ERROR_INVALID_TYPE);
323 return;
326 if (phm->adapter_index >= HPI_MAX_ADAPTERS
327 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
328 hpi_init_response(phr, phm->object, phm->function,
329 HPI_ERROR_BAD_ADAPTER_NUMBER);
330 return;
333 switch (phm->object) {
334 case HPI_OBJ_SUBSYSTEM:
335 subsys_message(phm, phr, h_owner);
336 break;
338 case HPI_OBJ_ADAPTER:
339 adapter_message(phm, phr, h_owner);
340 break;
342 case HPI_OBJ_MIXER:
343 mixer_message(phm, phr);
344 break;
346 case HPI_OBJ_OSTREAM:
347 outstream_message(phm, phr, h_owner);
348 break;
350 case HPI_OBJ_ISTREAM:
351 instream_message(phm, phr, h_owner);
352 break;
354 default:
355 hw_entry_point(phm, phr);
356 break;
359 if (logging_enabled)
360 HPI_DEBUG_RESPONSE(phr);
362 if (phr->error >= HPI_ERROR_DSP_COMMUNICATION) {
363 hpi_debug_level_set(HPI_DEBUG_LEVEL_ERROR);
364 logging_enabled = 0;
368 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
370 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
371 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
372 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
375 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
377 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
378 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
381 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
383 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
384 sizeof(rESP_HPI_MIXER_OPEN[0]));
387 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
389 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
392 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
393 void *h_owner)
396 struct hpi_message hm;
397 struct hpi_response hr;
399 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
401 hpios_msgxlock_lock(&msgx_lock);
403 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
404 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
405 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
406 [phm->obj_index].h.error)
407 memcpy(phr,
408 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
409 obj_index],
410 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
411 else {
412 instream_user_open[phm->adapter_index][phm->
413 obj_index].open_flag = 1;
414 hpios_msgxlock_unlock(&msgx_lock);
416 /* issue a reset */
417 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
418 HPI_ISTREAM_RESET);
419 hm.adapter_index = phm->adapter_index;
420 hm.obj_index = phm->obj_index;
421 hw_entry_point(&hm, &hr);
423 hpios_msgxlock_lock(&msgx_lock);
424 if (hr.error) {
425 instream_user_open[phm->adapter_index][phm->
426 obj_index].open_flag = 0;
427 phr->error = hr.error;
428 } else {
429 instream_user_open[phm->adapter_index][phm->
430 obj_index].open_flag = 1;
431 instream_user_open[phm->adapter_index][phm->
432 obj_index].h_owner = h_owner;
433 memcpy(phr,
434 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
435 [phm->obj_index],
436 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
439 hpios_msgxlock_unlock(&msgx_lock);
442 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
443 void *h_owner)
446 struct hpi_message hm;
447 struct hpi_response hr;
449 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
451 hpios_msgxlock_lock(&msgx_lock);
452 if (h_owner ==
453 instream_user_open[phm->adapter_index][phm->
454 obj_index].h_owner) {
455 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
456 "instream %d owned by %p\n",
457 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
458 instream_user_open[phm->adapter_index][phm->
459 obj_index].h_owner = NULL;
460 hpios_msgxlock_unlock(&msgx_lock);
461 /* issue a reset */
462 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
463 HPI_ISTREAM_RESET);
464 hm.adapter_index = phm->adapter_index;
465 hm.obj_index = phm->obj_index;
466 hw_entry_point(&hm, &hr);
467 hpios_msgxlock_lock(&msgx_lock);
468 if (hr.error) {
469 instream_user_open[phm->adapter_index][phm->
470 obj_index].h_owner = h_owner;
471 phr->error = hr.error;
472 } else {
473 instream_user_open[phm->adapter_index][phm->
474 obj_index].open_flag = 0;
475 instream_user_open[phm->adapter_index][phm->
476 obj_index].h_owner = NULL;
478 } else {
479 HPI_DEBUG_LOG(WARNING,
480 "%p trying to close %d instream %d owned by %p\n",
481 h_owner, phm->adapter_index, phm->obj_index,
482 instream_user_open[phm->adapter_index][phm->
483 obj_index].h_owner);
484 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
486 hpios_msgxlock_unlock(&msgx_lock);
489 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
490 void *h_owner)
493 struct hpi_message hm;
494 struct hpi_response hr;
496 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
498 hpios_msgxlock_lock(&msgx_lock);
500 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
501 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
502 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
503 [phm->obj_index].h.error)
504 memcpy(phr,
505 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
506 obj_index],
507 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
508 else {
509 outstream_user_open[phm->adapter_index][phm->
510 obj_index].open_flag = 1;
511 hpios_msgxlock_unlock(&msgx_lock);
513 /* issue a reset */
514 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
515 HPI_OSTREAM_RESET);
516 hm.adapter_index = phm->adapter_index;
517 hm.obj_index = phm->obj_index;
518 hw_entry_point(&hm, &hr);
520 hpios_msgxlock_lock(&msgx_lock);
521 if (hr.error) {
522 outstream_user_open[phm->adapter_index][phm->
523 obj_index].open_flag = 0;
524 phr->error = hr.error;
525 } else {
526 outstream_user_open[phm->adapter_index][phm->
527 obj_index].open_flag = 1;
528 outstream_user_open[phm->adapter_index][phm->
529 obj_index].h_owner = h_owner;
530 memcpy(phr,
531 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
532 [phm->obj_index],
533 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
536 hpios_msgxlock_unlock(&msgx_lock);
539 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
540 void *h_owner)
543 struct hpi_message hm;
544 struct hpi_response hr;
546 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
548 hpios_msgxlock_lock(&msgx_lock);
550 if (h_owner ==
551 outstream_user_open[phm->adapter_index][phm->
552 obj_index].h_owner) {
553 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
554 "outstream %d owned by %p\n",
555 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
556 outstream_user_open[phm->adapter_index][phm->
557 obj_index].h_owner = NULL;
558 hpios_msgxlock_unlock(&msgx_lock);
559 /* issue a reset */
560 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
561 HPI_OSTREAM_RESET);
562 hm.adapter_index = phm->adapter_index;
563 hm.obj_index = phm->obj_index;
564 hw_entry_point(&hm, &hr);
565 hpios_msgxlock_lock(&msgx_lock);
566 if (hr.error) {
567 outstream_user_open[phm->adapter_index][phm->
568 obj_index].h_owner = h_owner;
569 phr->error = hr.error;
570 } else {
571 outstream_user_open[phm->adapter_index][phm->
572 obj_index].open_flag = 0;
573 outstream_user_open[phm->adapter_index][phm->
574 obj_index].h_owner = NULL;
576 } else {
577 HPI_DEBUG_LOG(WARNING,
578 "%p trying to close %d outstream %d owned by %p\n",
579 h_owner, phm->adapter_index, phm->obj_index,
580 outstream_user_open[phm->adapter_index][phm->
581 obj_index].h_owner);
582 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
584 hpios_msgxlock_unlock(&msgx_lock);
587 static u16 adapter_prepare(u16 adapter)
589 struct hpi_message hm;
590 struct hpi_response hr;
592 /* Open the adapter and streams */
593 u16 i;
595 /* call to HPI_ADAPTER_OPEN */
596 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
597 HPI_ADAPTER_OPEN);
598 hm.adapter_index = adapter;
599 hw_entry_point(&hm, &hr);
600 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
601 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
602 if (hr.error)
603 return hr.error;
605 /* call to HPI_ADAPTER_GET_INFO */
606 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
607 HPI_ADAPTER_GET_INFO);
608 hm.adapter_index = adapter;
609 hw_entry_point(&hm, &hr);
610 if (hr.error)
611 return hr.error;
613 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
614 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
615 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
617 /* call to HPI_OSTREAM_OPEN */
618 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
619 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
620 HPI_OSTREAM_OPEN);
621 hm.adapter_index = adapter;
622 hm.obj_index = i;
623 hw_entry_point(&hm, &hr);
624 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
625 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
626 outstream_user_open[adapter][i].open_flag = 0;
627 outstream_user_open[adapter][i].h_owner = NULL;
630 /* call to HPI_ISTREAM_OPEN */
631 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
632 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
633 HPI_ISTREAM_OPEN);
634 hm.adapter_index = adapter;
635 hm.obj_index = i;
636 hw_entry_point(&hm, &hr);
637 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
638 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
639 instream_user_open[adapter][i].open_flag = 0;
640 instream_user_open[adapter][i].h_owner = NULL;
643 /* call to HPI_MIXER_OPEN */
644 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
645 hm.adapter_index = adapter;
646 hw_entry_point(&hm, &hr);
647 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
648 sizeof(rESP_HPI_MIXER_OPEN[0]));
650 return 0;
653 static void HPIMSGX__reset(u16 adapter_index)
655 int i;
656 u16 adapter;
657 struct hpi_response hr;
659 if (adapter_index == HPIMSGX_ALLADAPTERS) {
660 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
662 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
663 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
664 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
665 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
667 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
668 HPI_ERROR_INVALID_OBJ);
669 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
670 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
672 for (i = 0; i < HPI_MAX_STREAMS; i++) {
673 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
674 HPI_OSTREAM_OPEN,
675 HPI_ERROR_INVALID_OBJ);
676 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
677 &hr,
678 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
679 [i]));
680 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
681 HPI_ISTREAM_OPEN,
682 HPI_ERROR_INVALID_OBJ);
683 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
684 &hr,
685 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
686 [i]));
689 } else if (adapter_index < HPI_MAX_ADAPTERS) {
690 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
691 HPI_ERROR_BAD_ADAPTER;
692 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
693 HPI_ERROR_INVALID_OBJ;
694 for (i = 0; i < HPI_MAX_STREAMS; i++) {
695 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
696 HPI_ERROR_INVALID_OBJ;
697 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
698 HPI_ERROR_INVALID_OBJ;
703 static u16 HPIMSGX__init(struct hpi_message *phm,
704 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
705 /* resource list or NULL=find all */
706 struct hpi_response *phr
707 /* response from HPI_ADAPTER_GET_INFO */
710 hpi_handler_func *entry_point_func;
711 struct hpi_response hr;
713 /* Init response here so we can pass in previous adapter list */
714 hpi_init_response(&hr, phm->object, phm->function,
715 HPI_ERROR_INVALID_OBJ);
717 entry_point_func =
718 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
720 if (entry_point_func) {
721 HPI_DEBUG_MESSAGE(DEBUG, phm);
722 entry_point_func(phm, &hr);
723 } else {
724 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
725 return phr->error;
727 if (hr.error == 0) {
728 /* the adapter was created successfully
729 save the mapping for future use */
730 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
731 /* prepare adapter (pre-open streams etc.) */
732 HPI_DEBUG_LOG(DEBUG,
733 "HPI_SUBSYS_CREATE_ADAPTER successful,"
734 " preparing adapter\n");
735 adapter_prepare(hr.u.s.adapter_index);
737 memcpy(phr, &hr, hr.size);
738 return phr->error;
741 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
743 int i, adapter, adapter_limit;
745 if (!h_owner)
746 return;
748 if (adapter_index == HPIMSGX_ALLADAPTERS) {
749 adapter = 0;
750 adapter_limit = HPI_MAX_ADAPTERS;
751 } else {
752 adapter = adapter_index;
753 adapter_limit = adapter + 1;
756 for (; adapter < adapter_limit; adapter++) {
757 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
758 for (i = 0; i < HPI_MAX_STREAMS; i++) {
759 if (h_owner ==
760 outstream_user_open[adapter][i].h_owner) {
761 struct hpi_message hm;
762 struct hpi_response hr;
764 HPI_DEBUG_LOG(DEBUG,
765 "Close adapter %d ostream %d\n",
766 adapter, i);
768 hpi_init_message_response(&hm, &hr,
769 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
770 hm.adapter_index = (u16)adapter;
771 hm.obj_index = (u16)i;
772 hw_entry_point(&hm, &hr);
774 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
775 hw_entry_point(&hm, &hr);
777 hm.function = HPI_OSTREAM_GROUP_RESET;
778 hw_entry_point(&hm, &hr);
780 outstream_user_open[adapter][i].open_flag = 0;
781 outstream_user_open[adapter][i].h_owner =
782 NULL;
784 if (h_owner == instream_user_open[adapter][i].h_owner) {
785 struct hpi_message hm;
786 struct hpi_response hr;
788 HPI_DEBUG_LOG(DEBUG,
789 "Close adapter %d istream %d\n",
790 adapter, i);
792 hpi_init_message_response(&hm, &hr,
793 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
794 hm.adapter_index = (u16)adapter;
795 hm.obj_index = (u16)i;
796 hw_entry_point(&hm, &hr);
798 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
799 hw_entry_point(&hm, &hr);
801 hm.function = HPI_ISTREAM_GROUP_RESET;
802 hw_entry_point(&hm, &hr);
804 instream_user_open[adapter][i].open_flag = 0;
805 instream_user_open[adapter][i].h_owner = NULL;