[PATCH] dvb: saa7134-dvb must select tda1004x
[linux-ginger.git] / sound / core / seq / seq_instr.c
blob5b40ea2ba8f4f9aaff3f3cbd3aad60fc16510b7a
1 /*
2 * Generic Instrument routines for ALSA sequencer
3 * Copyright (c) 1999 by Jaroslav Kysela <perex@suse.cz>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <sound/driver.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include "seq_clientmgr.h"
26 #include <sound/seq_instr.h>
27 #include <sound/initval.h>
29 MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>");
30 MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer instrument library.");
31 MODULE_LICENSE("GPL");
34 static void snd_instr_lock_ops(snd_seq_kinstr_list_t *list)
36 if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
37 spin_lock_irqsave(&list->ops_lock, list->ops_flags);
38 } else {
39 down(&list->ops_mutex);
43 static void snd_instr_unlock_ops(snd_seq_kinstr_list_t *list)
45 if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
46 spin_unlock_irqrestore(&list->ops_lock, list->ops_flags);
47 } else {
48 up(&list->ops_mutex);
52 static snd_seq_kinstr_t *snd_seq_instr_new(int add_len, int atomic)
54 snd_seq_kinstr_t *instr;
56 instr = kcalloc(1, sizeof(snd_seq_kinstr_t) + add_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
57 if (instr == NULL)
58 return NULL;
59 instr->add_len = add_len;
60 return instr;
63 static int snd_seq_instr_free(snd_seq_kinstr_t *instr, int atomic)
65 int result = 0;
67 if (instr == NULL)
68 return -EINVAL;
69 if (instr->ops && instr->ops->remove)
70 result = instr->ops->remove(instr->ops->private_data, instr, 1);
71 if (!result)
72 kfree(instr);
73 return result;
76 snd_seq_kinstr_list_t *snd_seq_instr_list_new(void)
78 snd_seq_kinstr_list_t *list;
80 list = kcalloc(1, sizeof(snd_seq_kinstr_list_t), GFP_KERNEL);
81 if (list == NULL)
82 return NULL;
83 spin_lock_init(&list->lock);
84 spin_lock_init(&list->ops_lock);
85 init_MUTEX(&list->ops_mutex);
86 list->owner = -1;
87 return list;
90 void snd_seq_instr_list_free(snd_seq_kinstr_list_t **list_ptr)
92 snd_seq_kinstr_list_t *list;
93 snd_seq_kinstr_t *instr;
94 snd_seq_kcluster_t *cluster;
95 int idx;
96 unsigned long flags;
98 if (list_ptr == NULL)
99 return;
100 list = *list_ptr;
101 *list_ptr = NULL;
102 if (list == NULL)
103 return;
105 for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
106 while ((instr = list->hash[idx]) != NULL) {
107 list->hash[idx] = instr->next;
108 list->count--;
109 spin_lock_irqsave(&list->lock, flags);
110 while (instr->use) {
111 spin_unlock_irqrestore(&list->lock, flags);
112 set_current_state(TASK_INTERRUPTIBLE);
113 schedule_timeout(1);
114 spin_lock_irqsave(&list->lock, flags);
116 spin_unlock_irqrestore(&list->lock, flags);
117 if (snd_seq_instr_free(instr, 0)<0)
118 snd_printk(KERN_WARNING "instrument free problem\n");
120 while ((cluster = list->chash[idx]) != NULL) {
121 list->chash[idx] = cluster->next;
122 list->ccount--;
123 kfree(cluster);
126 kfree(list);
129 static int instr_free_compare(snd_seq_kinstr_t *instr,
130 snd_seq_instr_header_t *ifree,
131 unsigned int client)
133 switch (ifree->cmd) {
134 case SNDRV_SEQ_INSTR_FREE_CMD_ALL:
135 /* all, except private for other clients */
136 if ((instr->instr.std & 0xff000000) == 0)
137 return 0;
138 if (((instr->instr.std >> 24) & 0xff) == client)
139 return 0;
140 return 1;
141 case SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE:
142 /* all my private instruments */
143 if ((instr->instr.std & 0xff000000) == 0)
144 return 1;
145 if (((instr->instr.std >> 24) & 0xff) == client)
146 return 0;
147 return 1;
148 case SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER:
149 /* all my private instruments */
150 if ((instr->instr.std & 0xff000000) == 0) {
151 if (instr->instr.cluster == ifree->id.cluster)
152 return 0;
153 return 1;
155 if (((instr->instr.std >> 24) & 0xff) == client) {
156 if (instr->instr.cluster == ifree->id.cluster)
157 return 0;
159 return 1;
161 return 1;
164 int snd_seq_instr_list_free_cond(snd_seq_kinstr_list_t *list,
165 snd_seq_instr_header_t *ifree,
166 int client,
167 int atomic)
169 snd_seq_kinstr_t *instr, *prev, *next, *flist;
170 int idx;
171 unsigned long flags;
173 snd_instr_lock_ops(list);
174 for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
175 spin_lock_irqsave(&list->lock, flags);
176 instr = list->hash[idx];
177 prev = flist = NULL;
178 while (instr) {
179 while (instr && instr_free_compare(instr, ifree, (unsigned int)client)) {
180 prev = instr;
181 instr = instr->next;
183 if (instr == NULL)
184 continue;
185 if (instr->ops && instr->ops->notify)
186 instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
187 next = instr->next;
188 if (prev == NULL) {
189 list->hash[idx] = next;
190 } else {
191 prev->next = next;
193 list->count--;
194 instr->next = flist;
195 flist = instr;
196 instr = next;
198 spin_unlock_irqrestore(&list->lock, flags);
199 while (flist) {
200 instr = flist;
201 flist = instr->next;
202 while (instr->use) {
203 set_current_state(TASK_INTERRUPTIBLE);
204 schedule_timeout(1);
206 if (snd_seq_instr_free(instr, atomic)<0)
207 snd_printk(KERN_WARNING "instrument free problem\n");
208 instr = next;
211 snd_instr_unlock_ops(list);
212 return 0;
215 static int compute_hash_instr_key(snd_seq_instr_t *instr)
217 int result;
219 result = instr->bank | (instr->prg << 16);
220 result += result >> 24;
221 result += result >> 16;
222 result += result >> 8;
223 return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
226 #if 0
227 static int compute_hash_cluster_key(snd_seq_instr_cluster_t cluster)
229 int result;
231 result = cluster;
232 result += result >> 24;
233 result += result >> 16;
234 result += result >> 8;
235 return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
237 #endif
239 static int compare_instr(snd_seq_instr_t *i1, snd_seq_instr_t *i2, int exact)
241 if (exact) {
242 if (i1->cluster != i2->cluster ||
243 i1->bank != i2->bank ||
244 i1->prg != i2->prg)
245 return 1;
246 if ((i1->std & 0xff000000) != (i2->std & 0xff000000))
247 return 1;
248 if (!(i1->std & i2->std))
249 return 1;
250 return 0;
251 } else {
252 unsigned int client_check;
254 if (i2->cluster && i1->cluster != i2->cluster)
255 return 1;
256 client_check = i2->std & 0xff000000;
257 if (client_check) {
258 if ((i1->std & 0xff000000) != client_check)
259 return 1;
260 } else {
261 if ((i1->std & i2->std) != i2->std)
262 return 1;
264 return i1->bank != i2->bank || i1->prg != i2->prg;
268 snd_seq_kinstr_t *snd_seq_instr_find(snd_seq_kinstr_list_t *list,
269 snd_seq_instr_t *instr,
270 int exact,
271 int follow_alias)
273 unsigned long flags;
274 int depth = 0;
275 snd_seq_kinstr_t *result;
277 if (list == NULL || instr == NULL)
278 return NULL;
279 spin_lock_irqsave(&list->lock, flags);
280 __again:
281 result = list->hash[compute_hash_instr_key(instr)];
282 while (result) {
283 if (!compare_instr(&result->instr, instr, exact)) {
284 if (follow_alias && (result->type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)) {
285 instr = (snd_seq_instr_t *)KINSTR_DATA(result);
286 if (++depth > 10)
287 goto __not_found;
288 goto __again;
290 result->use++;
291 spin_unlock_irqrestore(&list->lock, flags);
292 return result;
294 result = result->next;
296 __not_found:
297 spin_unlock_irqrestore(&list->lock, flags);
298 return NULL;
301 void snd_seq_instr_free_use(snd_seq_kinstr_list_t *list,
302 snd_seq_kinstr_t *instr)
304 unsigned long flags;
306 if (list == NULL || instr == NULL)
307 return;
308 spin_lock_irqsave(&list->lock, flags);
309 if (instr->use <= 0) {
310 snd_printk(KERN_ERR "free_use: fatal!!! use = %i, name = '%s'\n", instr->use, instr->name);
311 } else {
312 instr->use--;
314 spin_unlock_irqrestore(&list->lock, flags);
317 static snd_seq_kinstr_ops_t *instr_ops(snd_seq_kinstr_ops_t *ops, char *instr_type)
319 while (ops) {
320 if (!strcmp(ops->instr_type, instr_type))
321 return ops;
322 ops = ops->next;
324 return NULL;
327 static int instr_result(snd_seq_event_t *ev,
328 int type, int result,
329 int atomic)
331 snd_seq_event_t sev;
333 memset(&sev, 0, sizeof(sev));
334 sev.type = SNDRV_SEQ_EVENT_RESULT;
335 sev.flags = SNDRV_SEQ_TIME_STAMP_REAL | SNDRV_SEQ_EVENT_LENGTH_FIXED |
336 SNDRV_SEQ_PRIORITY_NORMAL;
337 sev.source = ev->dest;
338 sev.dest = ev->source;
339 sev.data.result.event = type;
340 sev.data.result.result = result;
341 #if 0
342 printk("instr result - type = %i, result = %i, queue = %i, source.client:port = %i:%i, dest.client:port = %i:%i\n",
343 type, result,
344 sev.queue,
345 sev.source.client, sev.source.port,
346 sev.dest.client, sev.dest.port);
347 #endif
348 return snd_seq_kernel_client_dispatch(sev.source.client, &sev, atomic, 0);
351 static int instr_begin(snd_seq_kinstr_ops_t *ops,
352 snd_seq_kinstr_list_t *list,
353 snd_seq_event_t *ev,
354 int atomic, int hop)
356 unsigned long flags;
358 spin_lock_irqsave(&list->lock, flags);
359 if (list->owner >= 0 && list->owner != ev->source.client) {
360 spin_unlock_irqrestore(&list->lock, flags);
361 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, -EBUSY, atomic);
363 list->owner = ev->source.client;
364 spin_unlock_irqrestore(&list->lock, flags);
365 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, 0, atomic);
368 static int instr_end(snd_seq_kinstr_ops_t *ops,
369 snd_seq_kinstr_list_t *list,
370 snd_seq_event_t *ev,
371 int atomic, int hop)
373 unsigned long flags;
375 /* TODO: timeout handling */
376 spin_lock_irqsave(&list->lock, flags);
377 if (list->owner == ev->source.client) {
378 list->owner = -1;
379 spin_unlock_irqrestore(&list->lock, flags);
380 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, 0, atomic);
382 spin_unlock_irqrestore(&list->lock, flags);
383 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, -EINVAL, atomic);
386 static int instr_info(snd_seq_kinstr_ops_t *ops,
387 snd_seq_kinstr_list_t *list,
388 snd_seq_event_t *ev,
389 int atomic, int hop)
391 return -ENXIO;
394 static int instr_format_info(snd_seq_kinstr_ops_t *ops,
395 snd_seq_kinstr_list_t *list,
396 snd_seq_event_t *ev,
397 int atomic, int hop)
399 return -ENXIO;
402 static int instr_reset(snd_seq_kinstr_ops_t *ops,
403 snd_seq_kinstr_list_t *list,
404 snd_seq_event_t *ev,
405 int atomic, int hop)
407 return -ENXIO;
410 static int instr_status(snd_seq_kinstr_ops_t *ops,
411 snd_seq_kinstr_list_t *list,
412 snd_seq_event_t *ev,
413 int atomic, int hop)
415 return -ENXIO;
418 static int instr_put(snd_seq_kinstr_ops_t *ops,
419 snd_seq_kinstr_list_t *list,
420 snd_seq_event_t *ev,
421 int atomic, int hop)
423 unsigned long flags;
424 snd_seq_instr_header_t put;
425 snd_seq_kinstr_t *instr;
426 int result = -EINVAL, len, key;
428 if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
429 goto __return;
431 if (ev->data.ext.len < sizeof(snd_seq_instr_header_t))
432 goto __return;
433 if (copy_from_user(&put, (void __user *)ev->data.ext.ptr, sizeof(snd_seq_instr_header_t))) {
434 result = -EFAULT;
435 goto __return;
437 snd_instr_lock_ops(list);
438 if (put.id.instr.std & 0xff000000) { /* private instrument */
439 put.id.instr.std &= 0x00ffffff;
440 put.id.instr.std |= (unsigned int)ev->source.client << 24;
442 if ((instr = snd_seq_instr_find(list, &put.id.instr, 1, 0))) {
443 snd_seq_instr_free_use(list, instr);
444 snd_instr_unlock_ops(list);
445 result = -EBUSY;
446 goto __return;
448 ops = instr_ops(ops, put.data.data.format);
449 if (ops == NULL) {
450 snd_instr_unlock_ops(list);
451 goto __return;
453 len = ops->add_len;
454 if (put.data.type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)
455 len = sizeof(snd_seq_instr_t);
456 instr = snd_seq_instr_new(len, atomic);
457 if (instr == NULL) {
458 snd_instr_unlock_ops(list);
459 result = -ENOMEM;
460 goto __return;
462 instr->ops = ops;
463 instr->instr = put.id.instr;
464 strlcpy(instr->name, put.data.name, sizeof(instr->name));
465 instr->type = put.data.type;
466 if (instr->type == SNDRV_SEQ_INSTR_ATYPE_DATA) {
467 result = ops->put(ops->private_data,
468 instr,
469 (void __user *)ev->data.ext.ptr + sizeof(snd_seq_instr_header_t),
470 ev->data.ext.len - sizeof(snd_seq_instr_header_t),
471 atomic,
472 put.cmd);
473 if (result < 0) {
474 snd_seq_instr_free(instr, atomic);
475 snd_instr_unlock_ops(list);
476 goto __return;
479 key = compute_hash_instr_key(&instr->instr);
480 spin_lock_irqsave(&list->lock, flags);
481 instr->next = list->hash[key];
482 list->hash[key] = instr;
483 list->count++;
484 spin_unlock_irqrestore(&list->lock, flags);
485 snd_instr_unlock_ops(list);
486 result = 0;
487 __return:
488 instr_result(ev, SNDRV_SEQ_EVENT_INSTR_PUT, result, atomic);
489 return result;
492 static int instr_get(snd_seq_kinstr_ops_t *ops,
493 snd_seq_kinstr_list_t *list,
494 snd_seq_event_t *ev,
495 int atomic, int hop)
497 return -ENXIO;
500 static int instr_free(snd_seq_kinstr_ops_t *ops,
501 snd_seq_kinstr_list_t *list,
502 snd_seq_event_t *ev,
503 int atomic, int hop)
505 snd_seq_instr_header_t ifree;
506 snd_seq_kinstr_t *instr, *prev;
507 int result = -EINVAL;
508 unsigned long flags;
509 unsigned int hash;
511 if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
512 goto __return;
514 if (ev->data.ext.len < sizeof(snd_seq_instr_header_t))
515 goto __return;
516 if (copy_from_user(&ifree, (void __user *)ev->data.ext.ptr, sizeof(snd_seq_instr_header_t))) {
517 result = -EFAULT;
518 goto __return;
520 if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_ALL ||
521 ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE ||
522 ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER) {
523 result = snd_seq_instr_list_free_cond(list, &ifree, ev->dest.client, atomic);
524 goto __return;
526 if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_SINGLE) {
527 if (ifree.id.instr.std & 0xff000000) {
528 ifree.id.instr.std &= 0x00ffffff;
529 ifree.id.instr.std |= (unsigned int)ev->source.client << 24;
531 hash = compute_hash_instr_key(&ifree.id.instr);
532 snd_instr_lock_ops(list);
533 spin_lock_irqsave(&list->lock, flags);
534 instr = list->hash[hash];
535 prev = NULL;
536 while (instr) {
537 if (!compare_instr(&instr->instr, &ifree.id.instr, 1))
538 goto __free_single;
539 prev = instr;
540 instr = instr->next;
542 result = -ENOENT;
543 spin_unlock_irqrestore(&list->lock, flags);
544 snd_instr_unlock_ops(list);
545 goto __return;
547 __free_single:
548 if (prev) {
549 prev->next = instr->next;
550 } else {
551 list->hash[hash] = instr->next;
553 if (instr->ops && instr->ops->notify)
554 instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
555 while (instr->use) {
556 spin_unlock_irqrestore(&list->lock, flags);
557 set_current_state(TASK_INTERRUPTIBLE);
558 schedule_timeout(1);
559 spin_lock_irqsave(&list->lock, flags);
561 spin_unlock_irqrestore(&list->lock, flags);
562 result = snd_seq_instr_free(instr, atomic);
563 snd_instr_unlock_ops(list);
564 goto __return;
567 __return:
568 instr_result(ev, SNDRV_SEQ_EVENT_INSTR_FREE, result, atomic);
569 return result;
572 static int instr_list(snd_seq_kinstr_ops_t *ops,
573 snd_seq_kinstr_list_t *list,
574 snd_seq_event_t *ev,
575 int atomic, int hop)
577 return -ENXIO;
580 static int instr_cluster(snd_seq_kinstr_ops_t *ops,
581 snd_seq_kinstr_list_t *list,
582 snd_seq_event_t *ev,
583 int atomic, int hop)
585 return -ENXIO;
588 int snd_seq_instr_event(snd_seq_kinstr_ops_t *ops,
589 snd_seq_kinstr_list_t *list,
590 snd_seq_event_t *ev,
591 int client,
592 int atomic,
593 int hop)
595 int direct = 0;
597 snd_assert(ops != NULL && list != NULL && ev != NULL, return -EINVAL);
598 if (snd_seq_ev_is_direct(ev)) {
599 direct = 1;
600 switch (ev->type) {
601 case SNDRV_SEQ_EVENT_INSTR_BEGIN:
602 return instr_begin(ops, list, ev, atomic, hop);
603 case SNDRV_SEQ_EVENT_INSTR_END:
604 return instr_end(ops, list, ev, atomic, hop);
607 if ((list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT) && !direct)
608 return -EINVAL;
609 switch (ev->type) {
610 case SNDRV_SEQ_EVENT_INSTR_INFO:
611 return instr_info(ops, list, ev, atomic, hop);
612 case SNDRV_SEQ_EVENT_INSTR_FINFO:
613 return instr_format_info(ops, list, ev, atomic, hop);
614 case SNDRV_SEQ_EVENT_INSTR_RESET:
615 return instr_reset(ops, list, ev, atomic, hop);
616 case SNDRV_SEQ_EVENT_INSTR_STATUS:
617 return instr_status(ops, list, ev, atomic, hop);
618 case SNDRV_SEQ_EVENT_INSTR_PUT:
619 return instr_put(ops, list, ev, atomic, hop);
620 case SNDRV_SEQ_EVENT_INSTR_GET:
621 return instr_get(ops, list, ev, atomic, hop);
622 case SNDRV_SEQ_EVENT_INSTR_FREE:
623 return instr_free(ops, list, ev, atomic, hop);
624 case SNDRV_SEQ_EVENT_INSTR_LIST:
625 return instr_list(ops, list, ev, atomic, hop);
626 case SNDRV_SEQ_EVENT_INSTR_CLUSTER:
627 return instr_cluster(ops, list, ev, atomic, hop);
629 return -EINVAL;
633 * Init part
636 static int __init alsa_seq_instr_init(void)
638 return 0;
641 static void __exit alsa_seq_instr_exit(void)
645 module_init(alsa_seq_instr_init)
646 module_exit(alsa_seq_instr_exit)
648 EXPORT_SYMBOL(snd_seq_instr_list_new);
649 EXPORT_SYMBOL(snd_seq_instr_list_free);
650 EXPORT_SYMBOL(snd_seq_instr_list_free_cond);
651 EXPORT_SYMBOL(snd_seq_instr_find);
652 EXPORT_SYMBOL(snd_seq_instr_free_use);
653 EXPORT_SYMBOL(snd_seq_instr_event);