2 * Generic Instrument routines for ALSA sequencer
3 * Copyright (c) 1999 by Jaroslav Kysela <perex@perex.cz>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <sound/driver.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include "seq_clientmgr.h"
26 #include <sound/seq_instr.h>
27 #include <sound/initval.h>
29 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
30 MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer instrument library.");
31 MODULE_LICENSE("GPL");
34 static void snd_instr_lock_ops(struct snd_seq_kinstr_list
*list
)
36 if (!(list
->flags
& SNDRV_SEQ_INSTR_FLG_DIRECT
)) {
37 spin_lock_irqsave(&list
->ops_lock
, list
->ops_flags
);
39 mutex_lock(&list
->ops_mutex
);
43 static void snd_instr_unlock_ops(struct snd_seq_kinstr_list
*list
)
45 if (!(list
->flags
& SNDRV_SEQ_INSTR_FLG_DIRECT
)) {
46 spin_unlock_irqrestore(&list
->ops_lock
, list
->ops_flags
);
48 mutex_unlock(&list
->ops_mutex
);
52 static struct snd_seq_kinstr
*snd_seq_instr_new(int add_len
, int atomic
)
54 struct snd_seq_kinstr
*instr
;
56 instr
= kzalloc(sizeof(struct snd_seq_kinstr
) + add_len
, atomic
? GFP_ATOMIC
: GFP_KERNEL
);
59 instr
->add_len
= add_len
;
63 static int snd_seq_instr_free(struct snd_seq_kinstr
*instr
, int atomic
)
69 if (instr
->ops
&& instr
->ops
->remove
)
70 result
= instr
->ops
->remove(instr
->ops
->private_data
, instr
, 1);
76 struct snd_seq_kinstr_list
*snd_seq_instr_list_new(void)
78 struct snd_seq_kinstr_list
*list
;
80 list
= kzalloc(sizeof(struct snd_seq_kinstr_list
), GFP_KERNEL
);
83 spin_lock_init(&list
->lock
);
84 spin_lock_init(&list
->ops_lock
);
85 mutex_init(&list
->ops_mutex
);
90 void snd_seq_instr_list_free(struct snd_seq_kinstr_list
**list_ptr
)
92 struct snd_seq_kinstr_list
*list
;
93 struct snd_seq_kinstr
*instr
;
94 struct snd_seq_kcluster
*cluster
;
105 for (idx
= 0; idx
< SNDRV_SEQ_INSTR_HASH_SIZE
; idx
++) {
106 while ((instr
= list
->hash
[idx
]) != NULL
) {
107 list
->hash
[idx
] = instr
->next
;
109 spin_lock_irqsave(&list
->lock
, flags
);
111 spin_unlock_irqrestore(&list
->lock
, flags
);
112 schedule_timeout_uninterruptible(1);
113 spin_lock_irqsave(&list
->lock
, flags
);
115 spin_unlock_irqrestore(&list
->lock
, flags
);
116 if (snd_seq_instr_free(instr
, 0)<0)
117 snd_printk(KERN_WARNING
"instrument free problem\n");
119 while ((cluster
= list
->chash
[idx
]) != NULL
) {
120 list
->chash
[idx
] = cluster
->next
;
128 static int instr_free_compare(struct snd_seq_kinstr
*instr
,
129 struct snd_seq_instr_header
*ifree
,
132 switch (ifree
->cmd
) {
133 case SNDRV_SEQ_INSTR_FREE_CMD_ALL
:
134 /* all, except private for other clients */
135 if ((instr
->instr
.std
& 0xff000000) == 0)
137 if (((instr
->instr
.std
>> 24) & 0xff) == client
)
140 case SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE
:
141 /* all my private instruments */
142 if ((instr
->instr
.std
& 0xff000000) == 0)
144 if (((instr
->instr
.std
>> 24) & 0xff) == client
)
147 case SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER
:
148 /* all my private instruments */
149 if ((instr
->instr
.std
& 0xff000000) == 0) {
150 if (instr
->instr
.cluster
== ifree
->id
.cluster
)
154 if (((instr
->instr
.std
>> 24) & 0xff) == client
) {
155 if (instr
->instr
.cluster
== ifree
->id
.cluster
)
163 int snd_seq_instr_list_free_cond(struct snd_seq_kinstr_list
*list
,
164 struct snd_seq_instr_header
*ifree
,
168 struct snd_seq_kinstr
*instr
, *prev
, *next
, *flist
;
172 snd_instr_lock_ops(list
);
173 for (idx
= 0; idx
< SNDRV_SEQ_INSTR_HASH_SIZE
; idx
++) {
174 spin_lock_irqsave(&list
->lock
, flags
);
175 instr
= list
->hash
[idx
];
178 while (instr
&& instr_free_compare(instr
, ifree
, (unsigned int)client
)) {
184 if (instr
->ops
&& instr
->ops
->notify
)
185 instr
->ops
->notify(instr
->ops
->private_data
, instr
, SNDRV_SEQ_INSTR_NOTIFY_REMOVE
);
188 list
->hash
[idx
] = next
;
197 spin_unlock_irqrestore(&list
->lock
, flags
);
202 schedule_timeout_uninterruptible(1);
205 if (snd_seq_instr_free(instr
, atomic
)<0)
206 snd_printk(KERN_WARNING
"instrument free problem\n");
210 snd_instr_unlock_ops(list
);
214 static int compute_hash_instr_key(struct snd_seq_instr
*instr
)
218 result
= instr
->bank
| (instr
->prg
<< 16);
219 result
+= result
>> 24;
220 result
+= result
>> 16;
221 result
+= result
>> 8;
222 return result
& (SNDRV_SEQ_INSTR_HASH_SIZE
-1);
226 static int compute_hash_cluster_key(snd_seq_instr_cluster_t cluster
)
231 result
+= result
>> 24;
232 result
+= result
>> 16;
233 result
+= result
>> 8;
234 return result
& (SNDRV_SEQ_INSTR_HASH_SIZE
-1);
238 static int compare_instr(struct snd_seq_instr
*i1
, struct snd_seq_instr
*i2
, int exact
)
241 if (i1
->cluster
!= i2
->cluster
||
242 i1
->bank
!= i2
->bank
||
245 if ((i1
->std
& 0xff000000) != (i2
->std
& 0xff000000))
247 if (!(i1
->std
& i2
->std
))
251 unsigned int client_check
;
253 if (i2
->cluster
&& i1
->cluster
!= i2
->cluster
)
255 client_check
= i2
->std
& 0xff000000;
257 if ((i1
->std
& 0xff000000) != client_check
)
260 if ((i1
->std
& i2
->std
) != i2
->std
)
263 return i1
->bank
!= i2
->bank
|| i1
->prg
!= i2
->prg
;
267 struct snd_seq_kinstr
*snd_seq_instr_find(struct snd_seq_kinstr_list
*list
,
268 struct snd_seq_instr
*instr
,
274 struct snd_seq_kinstr
*result
;
276 if (list
== NULL
|| instr
== NULL
)
278 spin_lock_irqsave(&list
->lock
, flags
);
280 result
= list
->hash
[compute_hash_instr_key(instr
)];
282 if (!compare_instr(&result
->instr
, instr
, exact
)) {
283 if (follow_alias
&& (result
->type
== SNDRV_SEQ_INSTR_ATYPE_ALIAS
)) {
284 instr
= (struct snd_seq_instr
*)KINSTR_DATA(result
);
290 spin_unlock_irqrestore(&list
->lock
, flags
);
293 result
= result
->next
;
296 spin_unlock_irqrestore(&list
->lock
, flags
);
300 void snd_seq_instr_free_use(struct snd_seq_kinstr_list
*list
,
301 struct snd_seq_kinstr
*instr
)
305 if (list
== NULL
|| instr
== NULL
)
307 spin_lock_irqsave(&list
->lock
, flags
);
308 if (instr
->use
<= 0) {
309 snd_printk(KERN_ERR
"free_use: fatal!!! use = %i, name = '%s'\n", instr
->use
, instr
->name
);
313 spin_unlock_irqrestore(&list
->lock
, flags
);
316 static struct snd_seq_kinstr_ops
*instr_ops(struct snd_seq_kinstr_ops
*ops
,
320 if (!strcmp(ops
->instr_type
, instr_type
))
327 static int instr_result(struct snd_seq_event
*ev
,
328 int type
, int result
,
331 struct snd_seq_event sev
;
333 memset(&sev
, 0, sizeof(sev
));
334 sev
.type
= SNDRV_SEQ_EVENT_RESULT
;
335 sev
.flags
= SNDRV_SEQ_TIME_STAMP_REAL
| SNDRV_SEQ_EVENT_LENGTH_FIXED
|
336 SNDRV_SEQ_PRIORITY_NORMAL
;
337 sev
.source
= ev
->dest
;
338 sev
.dest
= ev
->source
;
339 sev
.data
.result
.event
= type
;
340 sev
.data
.result
.result
= result
;
342 printk("instr result - type = %i, result = %i, queue = %i, source.client:port = %i:%i, dest.client:port = %i:%i\n",
345 sev
.source
.client
, sev
.source
.port
,
346 sev
.dest
.client
, sev
.dest
.port
);
348 return snd_seq_kernel_client_dispatch(sev
.source
.client
, &sev
, atomic
, 0);
351 static int instr_begin(struct snd_seq_kinstr_ops
*ops
,
352 struct snd_seq_kinstr_list
*list
,
353 struct snd_seq_event
*ev
,
358 spin_lock_irqsave(&list
->lock
, flags
);
359 if (list
->owner
>= 0 && list
->owner
!= ev
->source
.client
) {
360 spin_unlock_irqrestore(&list
->lock
, flags
);
361 return instr_result(ev
, SNDRV_SEQ_EVENT_INSTR_BEGIN
, -EBUSY
, atomic
);
363 list
->owner
= ev
->source
.client
;
364 spin_unlock_irqrestore(&list
->lock
, flags
);
365 return instr_result(ev
, SNDRV_SEQ_EVENT_INSTR_BEGIN
, 0, atomic
);
368 static int instr_end(struct snd_seq_kinstr_ops
*ops
,
369 struct snd_seq_kinstr_list
*list
,
370 struct snd_seq_event
*ev
,
375 /* TODO: timeout handling */
376 spin_lock_irqsave(&list
->lock
, flags
);
377 if (list
->owner
== ev
->source
.client
) {
379 spin_unlock_irqrestore(&list
->lock
, flags
);
380 return instr_result(ev
, SNDRV_SEQ_EVENT_INSTR_END
, 0, atomic
);
382 spin_unlock_irqrestore(&list
->lock
, flags
);
383 return instr_result(ev
, SNDRV_SEQ_EVENT_INSTR_END
, -EINVAL
, atomic
);
386 static int instr_info(struct snd_seq_kinstr_ops
*ops
,
387 struct snd_seq_kinstr_list
*list
,
388 struct snd_seq_event
*ev
,
394 static int instr_format_info(struct snd_seq_kinstr_ops
*ops
,
395 struct snd_seq_kinstr_list
*list
,
396 struct snd_seq_event
*ev
,
402 static int instr_reset(struct snd_seq_kinstr_ops
*ops
,
403 struct snd_seq_kinstr_list
*list
,
404 struct snd_seq_event
*ev
,
410 static int instr_status(struct snd_seq_kinstr_ops
*ops
,
411 struct snd_seq_kinstr_list
*list
,
412 struct snd_seq_event
*ev
,
418 static int instr_put(struct snd_seq_kinstr_ops
*ops
,
419 struct snd_seq_kinstr_list
*list
,
420 struct snd_seq_event
*ev
,
424 struct snd_seq_instr_header put
;
425 struct snd_seq_kinstr
*instr
;
426 int result
= -EINVAL
, len
, key
;
428 if ((ev
->flags
& SNDRV_SEQ_EVENT_LENGTH_MASK
) != SNDRV_SEQ_EVENT_LENGTH_VARUSR
)
431 if (ev
->data
.ext
.len
< sizeof(struct snd_seq_instr_header
))
433 if (copy_from_user(&put
, (void __user
*)ev
->data
.ext
.ptr
,
434 sizeof(struct snd_seq_instr_header
))) {
438 snd_instr_lock_ops(list
);
439 if (put
.id
.instr
.std
& 0xff000000) { /* private instrument */
440 put
.id
.instr
.std
&= 0x00ffffff;
441 put
.id
.instr
.std
|= (unsigned int)ev
->source
.client
<< 24;
443 if ((instr
= snd_seq_instr_find(list
, &put
.id
.instr
, 1, 0))) {
444 snd_seq_instr_free_use(list
, instr
);
445 snd_instr_unlock_ops(list
);
449 ops
= instr_ops(ops
, put
.data
.data
.format
);
451 snd_instr_unlock_ops(list
);
455 if (put
.data
.type
== SNDRV_SEQ_INSTR_ATYPE_ALIAS
)
456 len
= sizeof(struct snd_seq_instr
);
457 instr
= snd_seq_instr_new(len
, atomic
);
459 snd_instr_unlock_ops(list
);
464 instr
->instr
= put
.id
.instr
;
465 strlcpy(instr
->name
, put
.data
.name
, sizeof(instr
->name
));
466 instr
->type
= put
.data
.type
;
467 if (instr
->type
== SNDRV_SEQ_INSTR_ATYPE_DATA
) {
468 result
= ops
->put(ops
->private_data
,
470 (void __user
*)ev
->data
.ext
.ptr
+ sizeof(struct snd_seq_instr_header
),
471 ev
->data
.ext
.len
- sizeof(struct snd_seq_instr_header
),
475 snd_seq_instr_free(instr
, atomic
);
476 snd_instr_unlock_ops(list
);
480 key
= compute_hash_instr_key(&instr
->instr
);
481 spin_lock_irqsave(&list
->lock
, flags
);
482 instr
->next
= list
->hash
[key
];
483 list
->hash
[key
] = instr
;
485 spin_unlock_irqrestore(&list
->lock
, flags
);
486 snd_instr_unlock_ops(list
);
489 instr_result(ev
, SNDRV_SEQ_EVENT_INSTR_PUT
, result
, atomic
);
493 static int instr_get(struct snd_seq_kinstr_ops
*ops
,
494 struct snd_seq_kinstr_list
*list
,
495 struct snd_seq_event
*ev
,
501 static int instr_free(struct snd_seq_kinstr_ops
*ops
,
502 struct snd_seq_kinstr_list
*list
,
503 struct snd_seq_event
*ev
,
506 struct snd_seq_instr_header ifree
;
507 struct snd_seq_kinstr
*instr
, *prev
;
508 int result
= -EINVAL
;
512 if ((ev
->flags
& SNDRV_SEQ_EVENT_LENGTH_MASK
) != SNDRV_SEQ_EVENT_LENGTH_VARUSR
)
515 if (ev
->data
.ext
.len
< sizeof(struct snd_seq_instr_header
))
517 if (copy_from_user(&ifree
, (void __user
*)ev
->data
.ext
.ptr
,
518 sizeof(struct snd_seq_instr_header
))) {
522 if (ifree
.cmd
== SNDRV_SEQ_INSTR_FREE_CMD_ALL
||
523 ifree
.cmd
== SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE
||
524 ifree
.cmd
== SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER
) {
525 result
= snd_seq_instr_list_free_cond(list
, &ifree
, ev
->dest
.client
, atomic
);
528 if (ifree
.cmd
== SNDRV_SEQ_INSTR_FREE_CMD_SINGLE
) {
529 if (ifree
.id
.instr
.std
& 0xff000000) {
530 ifree
.id
.instr
.std
&= 0x00ffffff;
531 ifree
.id
.instr
.std
|= (unsigned int)ev
->source
.client
<< 24;
533 hash
= compute_hash_instr_key(&ifree
.id
.instr
);
534 snd_instr_lock_ops(list
);
535 spin_lock_irqsave(&list
->lock
, flags
);
536 instr
= list
->hash
[hash
];
539 if (!compare_instr(&instr
->instr
, &ifree
.id
.instr
, 1))
545 spin_unlock_irqrestore(&list
->lock
, flags
);
546 snd_instr_unlock_ops(list
);
551 prev
->next
= instr
->next
;
553 list
->hash
[hash
] = instr
->next
;
555 if (instr
->ops
&& instr
->ops
->notify
)
556 instr
->ops
->notify(instr
->ops
->private_data
, instr
,
557 SNDRV_SEQ_INSTR_NOTIFY_REMOVE
);
559 spin_unlock_irqrestore(&list
->lock
, flags
);
560 schedule_timeout_uninterruptible(1);
561 spin_lock_irqsave(&list
->lock
, flags
);
563 spin_unlock_irqrestore(&list
->lock
, flags
);
564 result
= snd_seq_instr_free(instr
, atomic
);
565 snd_instr_unlock_ops(list
);
570 instr_result(ev
, SNDRV_SEQ_EVENT_INSTR_FREE
, result
, atomic
);
574 static int instr_list(struct snd_seq_kinstr_ops
*ops
,
575 struct snd_seq_kinstr_list
*list
,
576 struct snd_seq_event
*ev
,
582 static int instr_cluster(struct snd_seq_kinstr_ops
*ops
,
583 struct snd_seq_kinstr_list
*list
,
584 struct snd_seq_event
*ev
,
590 int snd_seq_instr_event(struct snd_seq_kinstr_ops
*ops
,
591 struct snd_seq_kinstr_list
*list
,
592 struct snd_seq_event
*ev
,
599 snd_assert(ops
!= NULL
&& list
!= NULL
&& ev
!= NULL
, return -EINVAL
);
600 if (snd_seq_ev_is_direct(ev
)) {
603 case SNDRV_SEQ_EVENT_INSTR_BEGIN
:
604 return instr_begin(ops
, list
, ev
, atomic
, hop
);
605 case SNDRV_SEQ_EVENT_INSTR_END
:
606 return instr_end(ops
, list
, ev
, atomic
, hop
);
609 if ((list
->flags
& SNDRV_SEQ_INSTR_FLG_DIRECT
) && !direct
)
612 case SNDRV_SEQ_EVENT_INSTR_INFO
:
613 return instr_info(ops
, list
, ev
, atomic
, hop
);
614 case SNDRV_SEQ_EVENT_INSTR_FINFO
:
615 return instr_format_info(ops
, list
, ev
, atomic
, hop
);
616 case SNDRV_SEQ_EVENT_INSTR_RESET
:
617 return instr_reset(ops
, list
, ev
, atomic
, hop
);
618 case SNDRV_SEQ_EVENT_INSTR_STATUS
:
619 return instr_status(ops
, list
, ev
, atomic
, hop
);
620 case SNDRV_SEQ_EVENT_INSTR_PUT
:
621 return instr_put(ops
, list
, ev
, atomic
, hop
);
622 case SNDRV_SEQ_EVENT_INSTR_GET
:
623 return instr_get(ops
, list
, ev
, atomic
, hop
);
624 case SNDRV_SEQ_EVENT_INSTR_FREE
:
625 return instr_free(ops
, list
, ev
, atomic
, hop
);
626 case SNDRV_SEQ_EVENT_INSTR_LIST
:
627 return instr_list(ops
, list
, ev
, atomic
, hop
);
628 case SNDRV_SEQ_EVENT_INSTR_CLUSTER
:
629 return instr_cluster(ops
, list
, ev
, atomic
, hop
);
638 static int __init
alsa_seq_instr_init(void)
643 static void __exit
alsa_seq_instr_exit(void)
647 module_init(alsa_seq_instr_init
)
648 module_exit(alsa_seq_instr_exit
)
650 EXPORT_SYMBOL(snd_seq_instr_list_new
);
651 EXPORT_SYMBOL(snd_seq_instr_list_free
);
652 EXPORT_SYMBOL(snd_seq_instr_list_free_cond
);
653 EXPORT_SYMBOL(snd_seq_instr_find
);
654 EXPORT_SYMBOL(snd_seq_instr_free_use
);
655 EXPORT_SYMBOL(snd_seq_instr_event
);