2 * ALSA sequencer Client Manager
3 * Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl>
4 * Jaroslav Kysela <perex@perex.cz>
5 * Takashi Iwai <tiwai@suse.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/init.h>
25 #include <linux/export.h>
26 #include <linux/slab.h>
27 #include <sound/core.h>
28 #include <sound/minors.h>
29 #include <linux/kmod.h>
31 #include <sound/seq_kernel.h>
32 #include "seq_clientmgr.h"
33 #include "seq_memory.h"
34 #include "seq_queue.h"
35 #include "seq_timer.h"
37 #include "seq_system.h"
38 #include <sound/seq_device.h>
40 #include <linux/compat.h>
45 * this module handles the connections of userland and kernel clients
50 * There are four ranges of client numbers (last two shared):
51 * 0..15: global clients
52 * 16..127: statically allocated client numbers for cards 0..27
53 * 128..191: dynamically allocated client numbers for cards 28..31
54 * 128..191: dynamically allocated client numbers for applications
57 /* number of kernel non-card clients */
58 #define SNDRV_SEQ_GLOBAL_CLIENTS 16
59 /* clients per cards, for static clients */
60 #define SNDRV_SEQ_CLIENTS_PER_CARD 4
61 /* dynamically allocated client numbers (both kernel drivers and user space) */
62 #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN 128
64 #define SNDRV_SEQ_LFLG_INPUT 0x0001
65 #define SNDRV_SEQ_LFLG_OUTPUT 0x0002
66 #define SNDRV_SEQ_LFLG_OPEN (SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT)
68 static DEFINE_SPINLOCK(clients_lock
);
69 static DEFINE_MUTEX(register_mutex
);
74 static char clienttablock
[SNDRV_SEQ_MAX_CLIENTS
];
75 static struct snd_seq_client
*clienttab
[SNDRV_SEQ_MAX_CLIENTS
];
76 static struct snd_seq_usage client_usage
;
81 static int bounce_error_event(struct snd_seq_client
*client
,
82 struct snd_seq_event
*event
,
83 int err
, int atomic
, int hop
);
84 static int snd_seq_deliver_single_event(struct snd_seq_client
*client
,
85 struct snd_seq_event
*event
,
86 int filter
, int atomic
, int hop
);
90 static inline unsigned short snd_seq_file_flags(struct file
*file
)
92 switch (file
->f_mode
& (FMODE_READ
| FMODE_WRITE
)) {
94 return SNDRV_SEQ_LFLG_OUTPUT
;
96 return SNDRV_SEQ_LFLG_INPUT
;
98 return SNDRV_SEQ_LFLG_OPEN
;
102 static inline int snd_seq_write_pool_allocated(struct snd_seq_client
*client
)
104 return snd_seq_total_cells(client
->pool
) > 0;
107 /* return pointer to client structure for specified id */
108 static struct snd_seq_client
*clientptr(int clientid
)
110 if (clientid
< 0 || clientid
>= SNDRV_SEQ_MAX_CLIENTS
) {
111 pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n",
115 return clienttab
[clientid
];
118 struct snd_seq_client
*snd_seq_client_use_ptr(int clientid
)
121 struct snd_seq_client
*client
;
123 if (clientid
< 0 || clientid
>= SNDRV_SEQ_MAX_CLIENTS
) {
124 pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n",
128 spin_lock_irqsave(&clients_lock
, flags
);
129 client
= clientptr(clientid
);
132 if (clienttablock
[clientid
]) {
133 spin_unlock_irqrestore(&clients_lock
, flags
);
136 spin_unlock_irqrestore(&clients_lock
, flags
);
137 #ifdef CONFIG_MODULES
138 if (!in_interrupt()) {
139 static char client_requested
[SNDRV_SEQ_GLOBAL_CLIENTS
];
140 static char card_requested
[SNDRV_CARDS
];
141 if (clientid
< SNDRV_SEQ_GLOBAL_CLIENTS
) {
144 if (!client_requested
[clientid
]) {
145 client_requested
[clientid
] = 1;
146 for (idx
= 0; idx
< 15; idx
++) {
147 if (seq_client_load
[idx
] < 0)
149 if (seq_client_load
[idx
] == clientid
) {
150 request_module("snd-seq-client-%i",
156 } else if (clientid
< SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN
) {
157 int card
= (clientid
- SNDRV_SEQ_GLOBAL_CLIENTS
) /
158 SNDRV_SEQ_CLIENTS_PER_CARD
;
159 if (card
< snd_ecards_limit
) {
160 if (! card_requested
[card
]) {
161 card_requested
[card
] = 1;
162 snd_request_card(card
);
164 snd_seq_device_load_drivers();
167 spin_lock_irqsave(&clients_lock
, flags
);
168 client
= clientptr(clientid
);
171 spin_unlock_irqrestore(&clients_lock
, flags
);
177 snd_use_lock_use(&client
->use_lock
);
178 spin_unlock_irqrestore(&clients_lock
, flags
);
182 static void usage_alloc(struct snd_seq_usage
*res
, int num
)
185 if (res
->cur
> res
->peak
)
186 res
->peak
= res
->cur
;
189 static void usage_free(struct snd_seq_usage
*res
, int num
)
194 /* initialise data structures */
195 int __init
client_init_data(void)
197 /* zap out the client table */
198 memset(&clienttablock
, 0, sizeof(clienttablock
));
199 memset(&clienttab
, 0, sizeof(clienttab
));
204 static struct snd_seq_client
*seq_create_client1(int client_index
, int poolsize
)
208 struct snd_seq_client
*client
;
210 /* init client data */
211 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
214 client
->pool
= snd_seq_pool_new(poolsize
);
215 if (client
->pool
== NULL
) {
219 client
->type
= NO_CLIENT
;
220 snd_use_lock_init(&client
->use_lock
);
221 rwlock_init(&client
->ports_lock
);
222 mutex_init(&client
->ports_mutex
);
223 INIT_LIST_HEAD(&client
->ports_list_head
);
224 mutex_init(&client
->ioctl_mutex
);
226 /* find free slot in the client table */
227 spin_lock_irqsave(&clients_lock
, flags
);
228 if (client_index
< 0) {
229 for (c
= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN
;
230 c
< SNDRV_SEQ_MAX_CLIENTS
;
232 if (clienttab
[c
] || clienttablock
[c
])
234 clienttab
[client
->number
= c
] = client
;
235 spin_unlock_irqrestore(&clients_lock
, flags
);
239 if (clienttab
[client_index
] == NULL
&& !clienttablock
[client_index
]) {
240 clienttab
[client
->number
= client_index
] = client
;
241 spin_unlock_irqrestore(&clients_lock
, flags
);
245 spin_unlock_irqrestore(&clients_lock
, flags
);
246 snd_seq_pool_delete(&client
->pool
);
248 return NULL
; /* no free slot found or busy, return failure code */
252 static int seq_free_client1(struct snd_seq_client
*client
)
258 spin_lock_irqsave(&clients_lock
, flags
);
259 clienttablock
[client
->number
] = 1;
260 clienttab
[client
->number
] = NULL
;
261 spin_unlock_irqrestore(&clients_lock
, flags
);
262 snd_seq_delete_all_ports(client
);
263 snd_seq_queue_client_leave(client
->number
);
264 snd_use_lock_sync(&client
->use_lock
);
265 snd_seq_queue_client_termination(client
->number
);
267 snd_seq_pool_delete(&client
->pool
);
268 spin_lock_irqsave(&clients_lock
, flags
);
269 clienttablock
[client
->number
] = 0;
270 spin_unlock_irqrestore(&clients_lock
, flags
);
275 static void seq_free_client(struct snd_seq_client
* client
)
277 mutex_lock(®ister_mutex
);
278 switch (client
->type
) {
280 pr_warn("ALSA: seq: Trying to free unused client %d\n",
285 seq_free_client1(client
);
286 usage_free(&client_usage
, 1);
290 pr_err("ALSA: seq: Trying to free client %d with undefined type = %d\n",
291 client
->number
, client
->type
);
293 mutex_unlock(®ister_mutex
);
295 snd_seq_system_client_ev_client_exit(client
->number
);
300 /* -------------------------------------------------------- */
302 /* create a user client */
303 static int snd_seq_open(struct inode
*inode
, struct file
*file
)
305 int c
, mode
; /* client id */
306 struct snd_seq_client
*client
;
307 struct snd_seq_user_client
*user
;
310 err
= nonseekable_open(inode
, file
);
314 if (mutex_lock_interruptible(®ister_mutex
))
316 client
= seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS
);
317 if (client
== NULL
) {
318 mutex_unlock(®ister_mutex
);
319 return -ENOMEM
; /* failure code */
322 mode
= snd_seq_file_flags(file
);
323 if (mode
& SNDRV_SEQ_LFLG_INPUT
)
324 client
->accept_input
= 1;
325 if (mode
& SNDRV_SEQ_LFLG_OUTPUT
)
326 client
->accept_output
= 1;
328 user
= &client
->data
.user
;
330 user
->fifo_pool_size
= 0;
332 if (mode
& SNDRV_SEQ_LFLG_INPUT
) {
333 user
->fifo_pool_size
= SNDRV_SEQ_DEFAULT_CLIENT_EVENTS
;
334 user
->fifo
= snd_seq_fifo_new(user
->fifo_pool_size
);
335 if (user
->fifo
== NULL
) {
336 seq_free_client1(client
);
338 mutex_unlock(®ister_mutex
);
343 usage_alloc(&client_usage
, 1);
344 client
->type
= USER_CLIENT
;
345 mutex_unlock(®ister_mutex
);
348 file
->private_data
= client
;
350 /* fill client data */
352 sprintf(client
->name
, "Client-%d", c
);
353 client
->data
.user
.owner
= get_pid(task_pid(current
));
355 /* make others aware this new client */
356 snd_seq_system_client_ev_client_start(c
);
361 /* delete a user client */
362 static int snd_seq_release(struct inode
*inode
, struct file
*file
)
364 struct snd_seq_client
*client
= file
->private_data
;
367 seq_free_client(client
);
368 if (client
->data
.user
.fifo
)
369 snd_seq_fifo_delete(&client
->data
.user
.fifo
);
370 put_pid(client
->data
.user
.owner
);
378 /* handle client read() */
379 /* possible error values:
380 * -ENXIO invalid client or file open mode
381 * -ENOSPC FIFO overflow (the flag is cleared after this error report)
382 * -EINVAL no enough user-space buffer to write the whole event
383 * -EFAULT seg. fault during copy to user space
385 static ssize_t
snd_seq_read(struct file
*file
, char __user
*buf
, size_t count
,
388 struct snd_seq_client
*client
= file
->private_data
;
389 struct snd_seq_fifo
*fifo
;
392 struct snd_seq_event_cell
*cell
;
394 if (!(snd_seq_file_flags(file
) & SNDRV_SEQ_LFLG_INPUT
))
397 if (!access_ok(VERIFY_WRITE
, buf
, count
))
400 /* check client structures are in place */
401 if (snd_BUG_ON(!client
))
404 if (!client
->accept_input
|| (fifo
= client
->data
.user
.fifo
) == NULL
)
407 if (atomic_read(&fifo
->overflow
) > 0) {
408 /* buffer overflow is detected */
409 snd_seq_fifo_clear(fifo
);
410 /* return error code */
416 snd_seq_fifo_lock(fifo
);
418 /* while data available in queue */
419 while (count
>= sizeof(struct snd_seq_event
)) {
422 nonblock
= (file
->f_flags
& O_NONBLOCK
) || result
> 0;
423 if ((err
= snd_seq_fifo_cell_out(fifo
, &cell
, nonblock
)) < 0) {
426 if (snd_seq_ev_is_variable(&cell
->event
)) {
427 struct snd_seq_event tmpev
;
429 tmpev
.data
.ext
.len
&= ~SNDRV_SEQ_EXT_MASK
;
430 if (copy_to_user(buf
, &tmpev
, sizeof(struct snd_seq_event
))) {
434 count
-= sizeof(struct snd_seq_event
);
435 buf
+= sizeof(struct snd_seq_event
);
436 err
= snd_seq_expand_var_event(&cell
->event
, count
,
437 (char __force
*)buf
, 0,
438 sizeof(struct snd_seq_event
));
445 if (copy_to_user(buf
, &cell
->event
, sizeof(struct snd_seq_event
))) {
449 count
-= sizeof(struct snd_seq_event
);
450 buf
+= sizeof(struct snd_seq_event
);
452 snd_seq_cell_free(cell
);
453 cell
= NULL
; /* to be sure */
454 result
+= sizeof(struct snd_seq_event
);
459 snd_seq_fifo_cell_putback(fifo
, cell
);
460 if (err
== -EAGAIN
&& result
> 0)
463 snd_seq_fifo_unlock(fifo
);
465 return (err
< 0) ? err
: result
;
470 * check access permission to the port
472 static int check_port_perm(struct snd_seq_client_port
*port
, unsigned int flags
)
474 if ((port
->capability
& flags
) != flags
)
480 * check if the destination client is available, and return the pointer
481 * if filter is non-zero, client filter bitmap is tested.
483 static struct snd_seq_client
*get_event_dest_client(struct snd_seq_event
*event
,
486 struct snd_seq_client
*dest
;
488 dest
= snd_seq_client_use_ptr(event
->dest
.client
);
491 if (! dest
->accept_input
)
493 if ((dest
->filter
& SNDRV_SEQ_FILTER_USE_EVENT
) &&
494 ! test_bit(event
->type
, dest
->event_filter
))
496 if (filter
&& !(dest
->filter
& filter
))
499 return dest
; /* ok - accessible */
501 snd_seq_client_unlock(dest
);
507 * Return the error event.
509 * If the receiver client is a user client, the original event is
510 * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If
511 * the original event is also variable length, the external data is
512 * copied after the event record.
513 * If the receiver client is a kernel client, the original event is
514 * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra
517 static int bounce_error_event(struct snd_seq_client
*client
,
518 struct snd_seq_event
*event
,
519 int err
, int atomic
, int hop
)
521 struct snd_seq_event bounce_ev
;
524 if (client
== NULL
||
525 ! (client
->filter
& SNDRV_SEQ_FILTER_BOUNCE
) ||
526 ! client
->accept_input
)
527 return 0; /* ignored */
529 /* set up quoted error */
530 memset(&bounce_ev
, 0, sizeof(bounce_ev
));
531 bounce_ev
.type
= SNDRV_SEQ_EVENT_KERNEL_ERROR
;
532 bounce_ev
.flags
= SNDRV_SEQ_EVENT_LENGTH_FIXED
;
533 bounce_ev
.queue
= SNDRV_SEQ_QUEUE_DIRECT
;
534 bounce_ev
.source
.client
= SNDRV_SEQ_CLIENT_SYSTEM
;
535 bounce_ev
.source
.port
= SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE
;
536 bounce_ev
.dest
.client
= client
->number
;
537 bounce_ev
.dest
.port
= event
->source
.port
;
538 bounce_ev
.data
.quote
.origin
= event
->dest
;
539 bounce_ev
.data
.quote
.event
= event
;
540 bounce_ev
.data
.quote
.value
= -err
; /* use positive value */
541 result
= snd_seq_deliver_single_event(NULL
, &bounce_ev
, 0, atomic
, hop
+ 1);
543 client
->event_lost
++;
552 * rewrite the time-stamp of the event record with the curren time
553 * of the given queue.
554 * return non-zero if updated.
556 static int update_timestamp_of_queue(struct snd_seq_event
*event
,
557 int queue
, int real_time
)
559 struct snd_seq_queue
*q
;
564 event
->queue
= queue
;
565 event
->flags
&= ~SNDRV_SEQ_TIME_STAMP_MASK
;
567 event
->time
.time
= snd_seq_timer_get_cur_time(q
->timer
);
568 event
->flags
|= SNDRV_SEQ_TIME_STAMP_REAL
;
570 event
->time
.tick
= snd_seq_timer_get_cur_tick(q
->timer
);
571 event
->flags
|= SNDRV_SEQ_TIME_STAMP_TICK
;
579 * deliver an event to the specified destination.
580 * if filter is non-zero, client filter bitmap is tested.
582 * RETURN VALUE: 0 : if succeeded
585 static int snd_seq_deliver_single_event(struct snd_seq_client
*client
,
586 struct snd_seq_event
*event
,
587 int filter
, int atomic
, int hop
)
589 struct snd_seq_client
*dest
= NULL
;
590 struct snd_seq_client_port
*dest_port
= NULL
;
591 int result
= -ENOENT
;
594 direct
= snd_seq_ev_is_direct(event
);
596 dest
= get_event_dest_client(event
, filter
);
599 dest_port
= snd_seq_port_use_ptr(dest
, event
->dest
.port
);
600 if (dest_port
== NULL
)
603 /* check permission */
604 if (! check_port_perm(dest_port
, SNDRV_SEQ_PORT_CAP_WRITE
)) {
609 if (dest_port
->timestamping
)
610 update_timestamp_of_queue(event
, dest_port
->time_queue
,
611 dest_port
->time_real
);
613 switch (dest
->type
) {
615 if (dest
->data
.user
.fifo
)
616 result
= snd_seq_fifo_event_in(dest
->data
.user
.fifo
, event
);
620 if (dest_port
->event_input
== NULL
)
622 result
= dest_port
->event_input(event
, direct
,
623 dest_port
->private_data
,
632 snd_seq_port_unlock(dest_port
);
634 snd_seq_client_unlock(dest
);
636 if (result
< 0 && !direct
) {
637 result
= bounce_error_event(client
, event
, result
, atomic
, hop
);
644 * send the event to all subscribers:
646 static int deliver_to_subscribers(struct snd_seq_client
*client
,
647 struct snd_seq_event
*event
,
650 struct snd_seq_subscribers
*subs
;
651 int err
, result
= 0, num_ev
= 0;
652 struct snd_seq_event event_saved
;
653 struct snd_seq_client_port
*src_port
;
654 struct snd_seq_port_subs_info
*grp
;
656 src_port
= snd_seq_port_use_ptr(client
, event
->source
.port
);
657 if (src_port
== NULL
)
658 return -EINVAL
; /* invalid source port */
659 /* save original event record */
660 event_saved
= *event
;
661 grp
= &src_port
->c_src
;
665 read_lock(&grp
->list_lock
);
667 down_read_nested(&grp
->list_mutex
, hop
);
668 list_for_each_entry(subs
, &grp
->list_head
, src_list
) {
669 /* both ports ready? */
670 if (atomic_read(&subs
->ref_count
) != 2)
672 event
->dest
= subs
->info
.dest
;
673 if (subs
->info
.flags
& SNDRV_SEQ_PORT_SUBS_TIMESTAMP
)
674 /* convert time according to flag with subscription */
675 update_timestamp_of_queue(event
, subs
->info
.queue
,
676 subs
->info
.flags
& SNDRV_SEQ_PORT_SUBS_TIME_REAL
);
677 err
= snd_seq_deliver_single_event(client
, event
,
680 /* save first error that occurs and continue */
686 /* restore original event record */
687 *event
= event_saved
;
690 read_unlock(&grp
->list_lock
);
692 up_read(&grp
->list_mutex
);
693 *event
= event_saved
; /* restore */
694 snd_seq_port_unlock(src_port
);
695 return (result
< 0) ? result
: num_ev
;
699 #ifdef SUPPORT_BROADCAST
701 * broadcast to all ports:
703 static int port_broadcast_event(struct snd_seq_client
*client
,
704 struct snd_seq_event
*event
,
707 int num_ev
= 0, err
, result
= 0;
708 struct snd_seq_client
*dest_client
;
709 struct snd_seq_client_port
*port
;
711 dest_client
= get_event_dest_client(event
, SNDRV_SEQ_FILTER_BROADCAST
);
712 if (dest_client
== NULL
)
713 return 0; /* no matching destination */
715 read_lock(&dest_client
->ports_lock
);
716 list_for_each_entry(port
, &dest_client
->ports_list_head
, list
) {
717 event
->dest
.port
= port
->addr
.port
;
718 /* pass NULL as source client to avoid error bounce */
719 err
= snd_seq_deliver_single_event(NULL
, event
,
720 SNDRV_SEQ_FILTER_BROADCAST
,
723 /* save first error that occurs and continue */
730 read_unlock(&dest_client
->ports_lock
);
731 snd_seq_client_unlock(dest_client
);
732 event
->dest
.port
= SNDRV_SEQ_ADDRESS_BROADCAST
; /* restore */
733 return (result
< 0) ? result
: num_ev
;
737 * send the event to all clients:
738 * if destination port is also ADDRESS_BROADCAST, deliver to all ports.
740 static int broadcast_event(struct snd_seq_client
*client
,
741 struct snd_seq_event
*event
, int atomic
, int hop
)
743 int err
, result
= 0, num_ev
= 0;
745 struct snd_seq_addr addr
;
747 addr
= event
->dest
; /* save */
749 for (dest
= 0; dest
< SNDRV_SEQ_MAX_CLIENTS
; dest
++) {
750 /* don't send to itself */
751 if (dest
== client
->number
)
753 event
->dest
.client
= dest
;
754 event
->dest
.port
= addr
.port
;
755 if (addr
.port
== SNDRV_SEQ_ADDRESS_BROADCAST
)
756 err
= port_broadcast_event(client
, event
, atomic
, hop
);
758 /* pass NULL as source client to avoid error bounce */
759 err
= snd_seq_deliver_single_event(NULL
, event
,
760 SNDRV_SEQ_FILTER_BROADCAST
,
763 /* save first error that occurs and continue */
770 event
->dest
= addr
; /* restore */
771 return (result
< 0) ? result
: num_ev
;
775 /* multicast - not supported yet */
776 static int multicast_event(struct snd_seq_client
*client
, struct snd_seq_event
*event
,
779 pr_debug("ALSA: seq: multicast not supported yet.\n");
780 return 0; /* ignored */
782 #endif /* SUPPORT_BROADCAST */
785 /* deliver an event to the destination port(s).
786 * if the event is to subscribers or broadcast, the event is dispatched
787 * to multiple targets.
789 * RETURN VALUE: n > 0 : the number of delivered events.
790 * n == 0 : the event was not passed to any client.
791 * n < 0 : error - event was not processed.
793 static int snd_seq_deliver_event(struct snd_seq_client
*client
, struct snd_seq_event
*event
,
799 if (hop
>= SNDRV_SEQ_MAX_HOPS
) {
800 pr_debug("ALSA: seq: too long delivery path (%d:%d->%d:%d)\n",
801 event
->source
.client
, event
->source
.port
,
802 event
->dest
.client
, event
->dest
.port
);
806 if (snd_seq_ev_is_variable(event
) &&
807 snd_BUG_ON(atomic
&& (event
->data
.ext
.len
& SNDRV_SEQ_EXT_USRPTR
)))
810 if (event
->queue
== SNDRV_SEQ_ADDRESS_SUBSCRIBERS
||
811 event
->dest
.client
== SNDRV_SEQ_ADDRESS_SUBSCRIBERS
)
812 result
= deliver_to_subscribers(client
, event
, atomic
, hop
);
813 #ifdef SUPPORT_BROADCAST
814 else if (event
->queue
== SNDRV_SEQ_ADDRESS_BROADCAST
||
815 event
->dest
.client
== SNDRV_SEQ_ADDRESS_BROADCAST
)
816 result
= broadcast_event(client
, event
, atomic
, hop
);
817 else if (event
->dest
.client
>= SNDRV_SEQ_MAX_CLIENTS
)
818 result
= multicast_event(client
, event
, atomic
, hop
);
819 else if (event
->dest
.port
== SNDRV_SEQ_ADDRESS_BROADCAST
)
820 result
= port_broadcast_event(client
, event
, atomic
, hop
);
823 result
= snd_seq_deliver_single_event(client
, event
, 0, atomic
, hop
);
829 * dispatch an event cell:
830 * This function is called only from queue check routines in timer
831 * interrupts or after enqueued.
832 * The event cell shall be released or re-queued in this function.
834 * RETURN VALUE: n > 0 : the number of delivered events.
835 * n == 0 : the event was not passed to any client.
836 * n < 0 : error - event was not processed.
838 int snd_seq_dispatch_event(struct snd_seq_event_cell
*cell
, int atomic
, int hop
)
840 struct snd_seq_client
*client
;
843 if (snd_BUG_ON(!cell
))
846 client
= snd_seq_client_use_ptr(cell
->event
.source
.client
);
847 if (client
== NULL
) {
848 snd_seq_cell_free(cell
); /* release this cell */
852 if (cell
->event
.type
== SNDRV_SEQ_EVENT_NOTE
) {
854 * the event cell is re-used as a NOTE-OFF event and
857 struct snd_seq_event tmpev
, *ev
;
859 /* reserve this event to enqueue note-off later */
861 tmpev
.type
= SNDRV_SEQ_EVENT_NOTEON
;
862 result
= snd_seq_deliver_event(client
, &tmpev
, atomic
, hop
);
865 * This was originally a note event. We now re-use the
866 * cell for the note-off event.
870 ev
->type
= SNDRV_SEQ_EVENT_NOTEOFF
;
871 ev
->flags
|= SNDRV_SEQ_PRIORITY_HIGH
;
873 /* add the duration time */
874 switch (ev
->flags
& SNDRV_SEQ_TIME_STAMP_MASK
) {
875 case SNDRV_SEQ_TIME_STAMP_TICK
:
876 ev
->time
.tick
+= ev
->data
.note
.duration
;
878 case SNDRV_SEQ_TIME_STAMP_REAL
:
879 /* unit for duration is ms */
880 ev
->time
.time
.tv_nsec
+= 1000000 * (ev
->data
.note
.duration
% 1000);
881 ev
->time
.time
.tv_sec
+= ev
->data
.note
.duration
/ 1000 +
882 ev
->time
.time
.tv_nsec
/ 1000000000;
883 ev
->time
.time
.tv_nsec
%= 1000000000;
886 ev
->data
.note
.velocity
= ev
->data
.note
.off_velocity
;
888 /* Now queue this cell as the note off event */
889 if (snd_seq_enqueue_event(cell
, atomic
, hop
) < 0)
890 snd_seq_cell_free(cell
); /* release this cell */
894 * event cell is freed after processing the event
897 result
= snd_seq_deliver_event(client
, &cell
->event
, atomic
, hop
);
898 snd_seq_cell_free(cell
);
901 snd_seq_client_unlock(client
);
906 /* Allocate a cell from client pool and enqueue it to queue:
907 * if pool is empty and blocking is TRUE, sleep until a new cell is
910 static int snd_seq_client_enqueue_event(struct snd_seq_client
*client
,
911 struct snd_seq_event
*event
,
912 struct file
*file
, int blocking
,
914 struct mutex
*mutexp
)
916 struct snd_seq_event_cell
*cell
;
919 /* special queue values - force direct passing */
920 if (event
->queue
== SNDRV_SEQ_ADDRESS_SUBSCRIBERS
) {
921 event
->dest
.client
= SNDRV_SEQ_ADDRESS_SUBSCRIBERS
;
922 event
->queue
= SNDRV_SEQ_QUEUE_DIRECT
;
924 #ifdef SUPPORT_BROADCAST
925 if (event
->queue
== SNDRV_SEQ_ADDRESS_BROADCAST
) {
926 event
->dest
.client
= SNDRV_SEQ_ADDRESS_BROADCAST
;
927 event
->queue
= SNDRV_SEQ_QUEUE_DIRECT
;
930 if (event
->dest
.client
== SNDRV_SEQ_ADDRESS_SUBSCRIBERS
) {
931 /* check presence of source port */
932 struct snd_seq_client_port
*src_port
= snd_seq_port_use_ptr(client
, event
->source
.port
);
933 if (src_port
== NULL
)
935 snd_seq_port_unlock(src_port
);
938 /* direct event processing without enqueued */
939 if (snd_seq_ev_is_direct(event
)) {
940 if (event
->type
== SNDRV_SEQ_EVENT_NOTE
)
941 return -EINVAL
; /* this event must be enqueued! */
942 return snd_seq_deliver_event(client
, event
, atomic
, hop
);
945 /* Not direct, normal queuing */
946 if (snd_seq_queue_is_used(event
->queue
, client
->number
) <= 0)
947 return -EINVAL
; /* invalid queue */
948 if (! snd_seq_write_pool_allocated(client
))
949 return -ENXIO
; /* queue is not allocated */
951 /* allocate an event cell */
952 err
= snd_seq_event_dup(client
->pool
, event
, &cell
, !blocking
|| atomic
,
957 /* we got a cell. enqueue it. */
958 if ((err
= snd_seq_enqueue_event(cell
, atomic
, hop
)) < 0) {
959 snd_seq_cell_free(cell
);
968 * check validity of event type and data length.
969 * return non-zero if invalid.
971 static int check_event_type_and_length(struct snd_seq_event
*ev
)
973 switch (snd_seq_ev_length_type(ev
)) {
974 case SNDRV_SEQ_EVENT_LENGTH_FIXED
:
975 if (snd_seq_ev_is_variable_type(ev
))
978 case SNDRV_SEQ_EVENT_LENGTH_VARIABLE
:
979 if (! snd_seq_ev_is_variable_type(ev
) ||
980 (ev
->data
.ext
.len
& ~SNDRV_SEQ_EXT_MASK
) >= SNDRV_SEQ_MAX_EVENT_LEN
)
983 case SNDRV_SEQ_EVENT_LENGTH_VARUSR
:
984 if (! snd_seq_ev_is_direct(ev
))
993 /* possible error values:
994 * -ENXIO invalid client or file open mode
995 * -ENOMEM malloc failed
996 * -EFAULT seg. fault during copy from user space
997 * -EINVAL invalid event
998 * -EAGAIN no space in output pool
999 * -EINTR interrupts while sleep
1000 * -EMLINK too many hops
1001 * others depends on return value from driver callback
1003 static ssize_t
snd_seq_write(struct file
*file
, const char __user
*buf
,
1004 size_t count
, loff_t
*offset
)
1006 struct snd_seq_client
*client
= file
->private_data
;
1007 int written
= 0, len
;
1009 struct snd_seq_event event
;
1011 if (!(snd_seq_file_flags(file
) & SNDRV_SEQ_LFLG_OUTPUT
))
1014 /* check client structures are in place */
1015 if (snd_BUG_ON(!client
))
1018 if (!client
->accept_output
|| client
->pool
== NULL
)
1021 /* allocate the pool now if the pool is not allocated yet */
1022 mutex_lock(&client
->ioctl_mutex
);
1023 if (client
->pool
->size
> 0 && !snd_seq_write_pool_allocated(client
)) {
1024 err
= snd_seq_pool_init(client
->pool
);
1029 /* only process whole events */
1031 while (count
>= sizeof(struct snd_seq_event
)) {
1032 /* Read in the event header from the user */
1033 len
= sizeof(event
);
1034 if (copy_from_user(&event
, buf
, len
)) {
1038 event
.source
.client
= client
->number
; /* fill in client number */
1039 /* Check for extension data length */
1040 if (check_event_type_and_length(&event
)) {
1045 /* check for special events */
1046 if (event
.type
== SNDRV_SEQ_EVENT_NONE
)
1048 else if (snd_seq_ev_is_reserved(&event
)) {
1053 if (snd_seq_ev_is_variable(&event
)) {
1054 int extlen
= event
.data
.ext
.len
& ~SNDRV_SEQ_EXT_MASK
;
1055 if ((size_t)(extlen
+ len
) > count
) {
1056 /* back out, will get an error this time or next */
1060 /* set user space pointer */
1061 event
.data
.ext
.len
= extlen
| SNDRV_SEQ_EXT_USRPTR
;
1062 event
.data
.ext
.ptr
= (char __force
*)buf
1063 + sizeof(struct snd_seq_event
);
1064 len
+= extlen
; /* increment data length */
1066 #ifdef CONFIG_COMPAT
1067 if (client
->convert32
&& snd_seq_ev_is_varusr(&event
)) {
1068 void *ptr
= (void __force
*)compat_ptr(event
.data
.raw32
.d
[1]);
1069 event
.data
.ext
.ptr
= ptr
;
1074 /* ok, enqueue it */
1075 err
= snd_seq_client_enqueue_event(client
, &event
, file
,
1076 !(file
->f_flags
& O_NONBLOCK
),
1077 0, 0, &client
->ioctl_mutex
);
1082 /* Update pointers and counts */
1089 mutex_unlock(&client
->ioctl_mutex
);
1090 return written
? written
: err
;
1097 static __poll_t
snd_seq_poll(struct file
*file
, poll_table
* wait
)
1099 struct snd_seq_client
*client
= file
->private_data
;
1102 /* check client structures are in place */
1103 if (snd_BUG_ON(!client
))
1106 if ((snd_seq_file_flags(file
) & SNDRV_SEQ_LFLG_INPUT
) &&
1107 client
->data
.user
.fifo
) {
1109 /* check if data is available in the outqueue */
1110 if (snd_seq_fifo_poll_wait(client
->data
.user
.fifo
, file
, wait
))
1111 mask
|= EPOLLIN
| EPOLLRDNORM
;
1114 if (snd_seq_file_flags(file
) & SNDRV_SEQ_LFLG_OUTPUT
) {
1116 /* check if data is available in the pool */
1117 if (!snd_seq_write_pool_allocated(client
) ||
1118 snd_seq_pool_poll_wait(client
->pool
, file
, wait
))
1119 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1126 /*-----------------------------------------------------*/
1128 static int snd_seq_ioctl_pversion(struct snd_seq_client
*client
, void *arg
)
1130 int *pversion
= arg
;
1132 *pversion
= SNDRV_SEQ_VERSION
;
1136 static int snd_seq_ioctl_client_id(struct snd_seq_client
*client
, void *arg
)
1138 int *client_id
= arg
;
1140 *client_id
= client
->number
;
1144 /* SYSTEM_INFO ioctl() */
1145 static int snd_seq_ioctl_system_info(struct snd_seq_client
*client
, void *arg
)
1147 struct snd_seq_system_info
*info
= arg
;
1149 memset(info
, 0, sizeof(*info
));
1150 /* fill the info fields */
1151 info
->queues
= SNDRV_SEQ_MAX_QUEUES
;
1152 info
->clients
= SNDRV_SEQ_MAX_CLIENTS
;
1153 info
->ports
= SNDRV_SEQ_MAX_PORTS
;
1154 info
->channels
= 256; /* fixed limit */
1155 info
->cur_clients
= client_usage
.cur
;
1156 info
->cur_queues
= snd_seq_queue_get_cur_queues();
1162 /* RUNNING_MODE ioctl() */
1163 static int snd_seq_ioctl_running_mode(struct snd_seq_client
*client
, void *arg
)
1165 struct snd_seq_running_info
*info
= arg
;
1166 struct snd_seq_client
*cptr
;
1169 /* requested client number */
1170 cptr
= snd_seq_client_use_ptr(info
->client
);
1172 return -ENOENT
; /* don't change !!! */
1174 #ifdef SNDRV_BIG_ENDIAN
1175 if (!info
->big_endian
) {
1180 if (info
->big_endian
) {
1186 if (info
->cpu_mode
> sizeof(long)) {
1190 cptr
->convert32
= (info
->cpu_mode
< sizeof(long));
1192 snd_seq_client_unlock(cptr
);
1196 /* CLIENT_INFO ioctl() */
1197 static void get_client_info(struct snd_seq_client
*cptr
,
1198 struct snd_seq_client_info
*info
)
1200 info
->client
= cptr
->number
;
1202 /* fill the info fields */
1203 info
->type
= cptr
->type
;
1204 strcpy(info
->name
, cptr
->name
);
1205 info
->filter
= cptr
->filter
;
1206 info
->event_lost
= cptr
->event_lost
;
1207 memcpy(info
->event_filter
, cptr
->event_filter
, 32);
1208 info
->num_ports
= cptr
->num_ports
;
1210 if (cptr
->type
== USER_CLIENT
)
1211 info
->pid
= pid_vnr(cptr
->data
.user
.owner
);
1215 if (cptr
->type
== KERNEL_CLIENT
)
1216 info
->card
= cptr
->data
.kernel
.card
? cptr
->data
.kernel
.card
->number
: -1;
1220 memset(info
->reserved
, 0, sizeof(info
->reserved
));
1223 static int snd_seq_ioctl_get_client_info(struct snd_seq_client
*client
,
1226 struct snd_seq_client_info
*client_info
= arg
;
1227 struct snd_seq_client
*cptr
;
1229 /* requested client number */
1230 cptr
= snd_seq_client_use_ptr(client_info
->client
);
1232 return -ENOENT
; /* don't change !!! */
1234 get_client_info(cptr
, client_info
);
1235 snd_seq_client_unlock(cptr
);
1241 /* CLIENT_INFO ioctl() */
1242 static int snd_seq_ioctl_set_client_info(struct snd_seq_client
*client
,
1245 struct snd_seq_client_info
*client_info
= arg
;
1247 /* it is not allowed to set the info fields for an another client */
1248 if (client
->number
!= client_info
->client
)
1250 /* also client type must be set now */
1251 if (client
->type
!= client_info
->type
)
1254 /* fill the info fields */
1255 if (client_info
->name
[0])
1256 strlcpy(client
->name
, client_info
->name
, sizeof(client
->name
));
1258 client
->filter
= client_info
->filter
;
1259 client
->event_lost
= client_info
->event_lost
;
1260 memcpy(client
->event_filter
, client_info
->event_filter
, 32);
1267 * CREATE PORT ioctl()
1269 static int snd_seq_ioctl_create_port(struct snd_seq_client
*client
, void *arg
)
1271 struct snd_seq_port_info
*info
= arg
;
1272 struct snd_seq_client_port
*port
;
1273 struct snd_seq_port_callback
*callback
;
1276 /* it is not allowed to create the port for an another client */
1277 if (info
->addr
.client
!= client
->number
)
1280 port
= snd_seq_create_port(client
, (info
->flags
& SNDRV_SEQ_PORT_FLG_GIVEN_PORT
) ? info
->addr
.port
: -1);
1284 if (client
->type
== USER_CLIENT
&& info
->kernel
) {
1285 port_idx
= port
->addr
.port
;
1286 snd_seq_port_unlock(port
);
1287 snd_seq_delete_port(client
, port_idx
);
1290 if (client
->type
== KERNEL_CLIENT
) {
1291 if ((callback
= info
->kernel
) != NULL
) {
1292 if (callback
->owner
)
1293 port
->owner
= callback
->owner
;
1294 port
->private_data
= callback
->private_data
;
1295 port
->private_free
= callback
->private_free
;
1296 port
->event_input
= callback
->event_input
;
1297 port
->c_src
.open
= callback
->subscribe
;
1298 port
->c_src
.close
= callback
->unsubscribe
;
1299 port
->c_dest
.open
= callback
->use
;
1300 port
->c_dest
.close
= callback
->unuse
;
1304 info
->addr
= port
->addr
;
1306 snd_seq_set_port_info(port
, info
);
1307 snd_seq_system_client_ev_port_start(port
->addr
.client
, port
->addr
.port
);
1308 snd_seq_port_unlock(port
);
1314 * DELETE PORT ioctl()
1316 static int snd_seq_ioctl_delete_port(struct snd_seq_client
*client
, void *arg
)
1318 struct snd_seq_port_info
*info
= arg
;
1321 /* it is not allowed to remove the port for an another client */
1322 if (info
->addr
.client
!= client
->number
)
1325 err
= snd_seq_delete_port(client
, info
->addr
.port
);
1327 snd_seq_system_client_ev_port_exit(client
->number
, info
->addr
.port
);
1333 * GET_PORT_INFO ioctl() (on any client)
1335 static int snd_seq_ioctl_get_port_info(struct snd_seq_client
*client
, void *arg
)
1337 struct snd_seq_port_info
*info
= arg
;
1338 struct snd_seq_client
*cptr
;
1339 struct snd_seq_client_port
*port
;
1341 cptr
= snd_seq_client_use_ptr(info
->addr
.client
);
1345 port
= snd_seq_port_use_ptr(cptr
, info
->addr
.port
);
1347 snd_seq_client_unlock(cptr
);
1348 return -ENOENT
; /* don't change */
1352 snd_seq_get_port_info(port
, info
);
1353 snd_seq_port_unlock(port
);
1354 snd_seq_client_unlock(cptr
);
1361 * SET_PORT_INFO ioctl() (only ports on this/own client)
1363 static int snd_seq_ioctl_set_port_info(struct snd_seq_client
*client
, void *arg
)
1365 struct snd_seq_port_info
*info
= arg
;
1366 struct snd_seq_client_port
*port
;
1368 if (info
->addr
.client
!= client
->number
) /* only set our own ports ! */
1370 port
= snd_seq_port_use_ptr(client
, info
->addr
.port
);
1372 snd_seq_set_port_info(port
, info
);
1373 snd_seq_port_unlock(port
);
1380 * port subscription (connection)
1382 #define PERM_RD (SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ)
1383 #define PERM_WR (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE)
1385 static int check_subscription_permission(struct snd_seq_client
*client
,
1386 struct snd_seq_client_port
*sport
,
1387 struct snd_seq_client_port
*dport
,
1388 struct snd_seq_port_subscribe
*subs
)
1390 if (client
->number
!= subs
->sender
.client
&&
1391 client
->number
!= subs
->dest
.client
) {
1392 /* connection by third client - check export permission */
1393 if (check_port_perm(sport
, SNDRV_SEQ_PORT_CAP_NO_EXPORT
))
1395 if (check_port_perm(dport
, SNDRV_SEQ_PORT_CAP_NO_EXPORT
))
1399 /* check read permission */
1400 /* if sender or receiver is the subscribing client itself,
1401 * no permission check is necessary
1403 if (client
->number
!= subs
->sender
.client
) {
1404 if (! check_port_perm(sport
, PERM_RD
))
1407 /* check write permission */
1408 if (client
->number
!= subs
->dest
.client
) {
1409 if (! check_port_perm(dport
, PERM_WR
))
1416 * send an subscription notify event to user client:
1417 * client must be user client.
1419 int snd_seq_client_notify_subscription(int client
, int port
,
1420 struct snd_seq_port_subscribe
*info
,
1423 struct snd_seq_event event
;
1425 memset(&event
, 0, sizeof(event
));
1426 event
.type
= evtype
;
1427 event
.data
.connect
.dest
= info
->dest
;
1428 event
.data
.connect
.sender
= info
->sender
;
1430 return snd_seq_system_notify(client
, port
, &event
); /* non-atomic */
1435 * add to port's subscription list IOCTL interface
1437 static int snd_seq_ioctl_subscribe_port(struct snd_seq_client
*client
,
1440 struct snd_seq_port_subscribe
*subs
= arg
;
1441 int result
= -EINVAL
;
1442 struct snd_seq_client
*receiver
= NULL
, *sender
= NULL
;
1443 struct snd_seq_client_port
*sport
= NULL
, *dport
= NULL
;
1445 if ((receiver
= snd_seq_client_use_ptr(subs
->dest
.client
)) == NULL
)
1447 if ((sender
= snd_seq_client_use_ptr(subs
->sender
.client
)) == NULL
)
1449 if ((sport
= snd_seq_port_use_ptr(sender
, subs
->sender
.port
)) == NULL
)
1451 if ((dport
= snd_seq_port_use_ptr(receiver
, subs
->dest
.port
)) == NULL
)
1454 result
= check_subscription_permission(client
, sport
, dport
, subs
);
1459 result
= snd_seq_port_connect(client
, sender
, sport
, receiver
, dport
, subs
);
1460 if (! result
) /* broadcast announce */
1461 snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS
, 0,
1462 subs
, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED
);
1465 snd_seq_port_unlock(sport
);
1467 snd_seq_port_unlock(dport
);
1469 snd_seq_client_unlock(sender
);
1471 snd_seq_client_unlock(receiver
);
1477 * remove from port's subscription list
1479 static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client
*client
,
1482 struct snd_seq_port_subscribe
*subs
= arg
;
1483 int result
= -ENXIO
;
1484 struct snd_seq_client
*receiver
= NULL
, *sender
= NULL
;
1485 struct snd_seq_client_port
*sport
= NULL
, *dport
= NULL
;
1487 if ((receiver
= snd_seq_client_use_ptr(subs
->dest
.client
)) == NULL
)
1489 if ((sender
= snd_seq_client_use_ptr(subs
->sender
.client
)) == NULL
)
1491 if ((sport
= snd_seq_port_use_ptr(sender
, subs
->sender
.port
)) == NULL
)
1493 if ((dport
= snd_seq_port_use_ptr(receiver
, subs
->dest
.port
)) == NULL
)
1496 result
= check_subscription_permission(client
, sport
, dport
, subs
);
1500 result
= snd_seq_port_disconnect(client
, sender
, sport
, receiver
, dport
, subs
);
1501 if (! result
) /* broadcast announce */
1502 snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS
, 0,
1503 subs
, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED
);
1506 snd_seq_port_unlock(sport
);
1508 snd_seq_port_unlock(dport
);
1510 snd_seq_client_unlock(sender
);
1512 snd_seq_client_unlock(receiver
);
1517 /* CREATE_QUEUE ioctl() */
1518 static int snd_seq_ioctl_create_queue(struct snd_seq_client
*client
, void *arg
)
1520 struct snd_seq_queue_info
*info
= arg
;
1521 struct snd_seq_queue
*q
;
1523 q
= snd_seq_queue_alloc(client
->number
, info
->locked
, info
->flags
);
1527 info
->queue
= q
->queue
;
1528 info
->locked
= q
->locked
;
1529 info
->owner
= q
->owner
;
1531 /* set queue name */
1533 snprintf(info
->name
, sizeof(info
->name
), "Queue-%d", q
->queue
);
1534 strlcpy(q
->name
, info
->name
, sizeof(q
->name
));
1535 snd_use_lock_free(&q
->use_lock
);
1540 /* DELETE_QUEUE ioctl() */
1541 static int snd_seq_ioctl_delete_queue(struct snd_seq_client
*client
, void *arg
)
1543 struct snd_seq_queue_info
*info
= arg
;
1545 return snd_seq_queue_delete(client
->number
, info
->queue
);
1548 /* GET_QUEUE_INFO ioctl() */
1549 static int snd_seq_ioctl_get_queue_info(struct snd_seq_client
*client
,
1552 struct snd_seq_queue_info
*info
= arg
;
1553 struct snd_seq_queue
*q
;
1555 q
= queueptr(info
->queue
);
1559 memset(info
, 0, sizeof(*info
));
1560 info
->queue
= q
->queue
;
1561 info
->owner
= q
->owner
;
1562 info
->locked
= q
->locked
;
1563 strlcpy(info
->name
, q
->name
, sizeof(info
->name
));
1569 /* SET_QUEUE_INFO ioctl() */
1570 static int snd_seq_ioctl_set_queue_info(struct snd_seq_client
*client
,
1573 struct snd_seq_queue_info
*info
= arg
;
1574 struct snd_seq_queue
*q
;
1576 if (info
->owner
!= client
->number
)
1579 /* change owner/locked permission */
1580 if (snd_seq_queue_check_access(info
->queue
, client
->number
)) {
1581 if (snd_seq_queue_set_owner(info
->queue
, client
->number
, info
->locked
) < 0)
1584 snd_seq_queue_use(info
->queue
, client
->number
, 1);
1589 q
= queueptr(info
->queue
);
1592 if (q
->owner
!= client
->number
) {
1596 strlcpy(q
->name
, info
->name
, sizeof(q
->name
));
1602 /* GET_NAMED_QUEUE ioctl() */
1603 static int snd_seq_ioctl_get_named_queue(struct snd_seq_client
*client
,
1606 struct snd_seq_queue_info
*info
= arg
;
1607 struct snd_seq_queue
*q
;
1609 q
= snd_seq_queue_find_name(info
->name
);
1612 info
->queue
= q
->queue
;
1613 info
->owner
= q
->owner
;
1614 info
->locked
= q
->locked
;
1620 /* GET_QUEUE_STATUS ioctl() */
1621 static int snd_seq_ioctl_get_queue_status(struct snd_seq_client
*client
,
1624 struct snd_seq_queue_status
*status
= arg
;
1625 struct snd_seq_queue
*queue
;
1626 struct snd_seq_timer
*tmr
;
1628 queue
= queueptr(status
->queue
);
1631 memset(status
, 0, sizeof(*status
));
1632 status
->queue
= queue
->queue
;
1635 status
->events
= queue
->tickq
->cells
+ queue
->timeq
->cells
;
1637 status
->time
= snd_seq_timer_get_cur_time(tmr
);
1638 status
->tick
= snd_seq_timer_get_cur_tick(tmr
);
1640 status
->running
= tmr
->running
;
1642 status
->flags
= queue
->flags
;
1649 /* GET_QUEUE_TEMPO ioctl() */
1650 static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client
*client
,
1653 struct snd_seq_queue_tempo
*tempo
= arg
;
1654 struct snd_seq_queue
*queue
;
1655 struct snd_seq_timer
*tmr
;
1657 queue
= queueptr(tempo
->queue
);
1660 memset(tempo
, 0, sizeof(*tempo
));
1661 tempo
->queue
= queue
->queue
;
1665 tempo
->tempo
= tmr
->tempo
;
1666 tempo
->ppq
= tmr
->ppq
;
1667 tempo
->skew_value
= tmr
->skew
;
1668 tempo
->skew_base
= tmr
->skew_base
;
1675 /* SET_QUEUE_TEMPO ioctl() */
1676 int snd_seq_set_queue_tempo(int client
, struct snd_seq_queue_tempo
*tempo
)
1678 if (!snd_seq_queue_check_access(tempo
->queue
, client
))
1680 return snd_seq_queue_timer_set_tempo(tempo
->queue
, client
, tempo
);
1682 EXPORT_SYMBOL(snd_seq_set_queue_tempo
);
1684 static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client
*client
,
1687 struct snd_seq_queue_tempo
*tempo
= arg
;
1690 result
= snd_seq_set_queue_tempo(client
->number
, tempo
);
1691 return result
< 0 ? result
: 0;
1695 /* GET_QUEUE_TIMER ioctl() */
1696 static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client
*client
,
1699 struct snd_seq_queue_timer
*timer
= arg
;
1700 struct snd_seq_queue
*queue
;
1701 struct snd_seq_timer
*tmr
;
1703 queue
= queueptr(timer
->queue
);
1707 if (mutex_lock_interruptible(&queue
->timer_mutex
)) {
1709 return -ERESTARTSYS
;
1712 memset(timer
, 0, sizeof(*timer
));
1713 timer
->queue
= queue
->queue
;
1715 timer
->type
= tmr
->type
;
1716 if (tmr
->type
== SNDRV_SEQ_TIMER_ALSA
) {
1717 timer
->u
.alsa
.id
= tmr
->alsa_id
;
1718 timer
->u
.alsa
.resolution
= tmr
->preferred_resolution
;
1720 mutex_unlock(&queue
->timer_mutex
);
1727 /* SET_QUEUE_TIMER ioctl() */
1728 static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client
*client
,
1731 struct snd_seq_queue_timer
*timer
= arg
;
1734 if (timer
->type
!= SNDRV_SEQ_TIMER_ALSA
)
1737 if (snd_seq_queue_check_access(timer
->queue
, client
->number
)) {
1738 struct snd_seq_queue
*q
;
1739 struct snd_seq_timer
*tmr
;
1741 q
= queueptr(timer
->queue
);
1744 if (mutex_lock_interruptible(&q
->timer_mutex
)) {
1746 return -ERESTARTSYS
;
1749 snd_seq_queue_timer_close(timer
->queue
);
1750 tmr
->type
= timer
->type
;
1751 if (tmr
->type
== SNDRV_SEQ_TIMER_ALSA
) {
1752 tmr
->alsa_id
= timer
->u
.alsa
.id
;
1753 tmr
->preferred_resolution
= timer
->u
.alsa
.resolution
;
1755 result
= snd_seq_queue_timer_open(timer
->queue
);
1756 mutex_unlock(&q
->timer_mutex
);
1766 /* GET_QUEUE_CLIENT ioctl() */
1767 static int snd_seq_ioctl_get_queue_client(struct snd_seq_client
*client
,
1770 struct snd_seq_queue_client
*info
= arg
;
1773 used
= snd_seq_queue_is_used(info
->queue
, client
->number
);
1777 info
->client
= client
->number
;
1783 /* SET_QUEUE_CLIENT ioctl() */
1784 static int snd_seq_ioctl_set_queue_client(struct snd_seq_client
*client
,
1787 struct snd_seq_queue_client
*info
= arg
;
1790 if (info
->used
>= 0) {
1791 err
= snd_seq_queue_use(info
->queue
, client
->number
, info
->used
);
1796 return snd_seq_ioctl_get_queue_client(client
, arg
);
1800 /* GET_CLIENT_POOL ioctl() */
1801 static int snd_seq_ioctl_get_client_pool(struct snd_seq_client
*client
,
1804 struct snd_seq_client_pool
*info
= arg
;
1805 struct snd_seq_client
*cptr
;
1807 cptr
= snd_seq_client_use_ptr(info
->client
);
1810 memset(info
, 0, sizeof(*info
));
1811 info
->client
= cptr
->number
;
1812 info
->output_pool
= cptr
->pool
->size
;
1813 info
->output_room
= cptr
->pool
->room
;
1814 info
->output_free
= info
->output_pool
;
1815 info
->output_free
= snd_seq_unused_cells(cptr
->pool
);
1816 if (cptr
->type
== USER_CLIENT
) {
1817 info
->input_pool
= cptr
->data
.user
.fifo_pool_size
;
1818 info
->input_free
= info
->input_pool
;
1819 if (cptr
->data
.user
.fifo
)
1820 info
->input_free
= snd_seq_unused_cells(cptr
->data
.user
.fifo
->pool
);
1822 info
->input_pool
= 0;
1823 info
->input_free
= 0;
1825 snd_seq_client_unlock(cptr
);
1830 /* SET_CLIENT_POOL ioctl() */
1831 static int snd_seq_ioctl_set_client_pool(struct snd_seq_client
*client
,
1834 struct snd_seq_client_pool
*info
= arg
;
1837 if (client
->number
!= info
->client
)
1838 return -EINVAL
; /* can't change other clients */
1840 if (info
->output_pool
>= 1 && info
->output_pool
<= SNDRV_SEQ_MAX_EVENTS
&&
1841 (! snd_seq_write_pool_allocated(client
) ||
1842 info
->output_pool
!= client
->pool
->size
)) {
1843 if (snd_seq_write_pool_allocated(client
)) {
1844 /* is the pool in use? */
1845 if (atomic_read(&client
->pool
->counter
))
1847 /* remove all existing cells */
1848 snd_seq_pool_mark_closing(client
->pool
);
1849 snd_seq_pool_done(client
->pool
);
1851 client
->pool
->size
= info
->output_pool
;
1852 rc
= snd_seq_pool_init(client
->pool
);
1856 if (client
->type
== USER_CLIENT
&& client
->data
.user
.fifo
!= NULL
&&
1857 info
->input_pool
>= 1 &&
1858 info
->input_pool
<= SNDRV_SEQ_MAX_CLIENT_EVENTS
&&
1859 info
->input_pool
!= client
->data
.user
.fifo_pool_size
) {
1860 /* change pool size */
1861 rc
= snd_seq_fifo_resize(client
->data
.user
.fifo
, info
->input_pool
);
1864 client
->data
.user
.fifo_pool_size
= info
->input_pool
;
1866 if (info
->output_room
>= 1 &&
1867 info
->output_room
<= client
->pool
->size
) {
1868 client
->pool
->room
= info
->output_room
;
1871 return snd_seq_ioctl_get_client_pool(client
, arg
);
1875 /* REMOVE_EVENTS ioctl() */
1876 static int snd_seq_ioctl_remove_events(struct snd_seq_client
*client
,
1879 struct snd_seq_remove_events
*info
= arg
;
1882 * Input mostly not implemented XXX.
1884 if (info
->remove_mode
& SNDRV_SEQ_REMOVE_INPUT
) {
1886 * No restrictions so for a user client we can clear
1889 if (client
->type
== USER_CLIENT
&& client
->data
.user
.fifo
)
1890 snd_seq_fifo_clear(client
->data
.user
.fifo
);
1893 if (info
->remove_mode
& SNDRV_SEQ_REMOVE_OUTPUT
)
1894 snd_seq_queue_remove_cells(client
->number
, info
);
1901 * get subscription info
1903 static int snd_seq_ioctl_get_subscription(struct snd_seq_client
*client
,
1906 struct snd_seq_port_subscribe
*subs
= arg
;
1908 struct snd_seq_client
*sender
= NULL
;
1909 struct snd_seq_client_port
*sport
= NULL
;
1910 struct snd_seq_subscribers
*p
;
1913 if ((sender
= snd_seq_client_use_ptr(subs
->sender
.client
)) == NULL
)
1915 if ((sport
= snd_seq_port_use_ptr(sender
, subs
->sender
.port
)) == NULL
)
1917 p
= snd_seq_port_get_subscription(&sport
->c_src
, &subs
->dest
);
1926 snd_seq_port_unlock(sport
);
1928 snd_seq_client_unlock(sender
);
1935 * get subscription info - check only its presence
1937 static int snd_seq_ioctl_query_subs(struct snd_seq_client
*client
, void *arg
)
1939 struct snd_seq_query_subs
*subs
= arg
;
1940 int result
= -ENXIO
;
1941 struct snd_seq_client
*cptr
= NULL
;
1942 struct snd_seq_client_port
*port
= NULL
;
1943 struct snd_seq_port_subs_info
*group
;
1944 struct list_head
*p
;
1947 if ((cptr
= snd_seq_client_use_ptr(subs
->root
.client
)) == NULL
)
1949 if ((port
= snd_seq_port_use_ptr(cptr
, subs
->root
.port
)) == NULL
)
1952 switch (subs
->type
) {
1953 case SNDRV_SEQ_QUERY_SUBS_READ
:
1954 group
= &port
->c_src
;
1956 case SNDRV_SEQ_QUERY_SUBS_WRITE
:
1957 group
= &port
->c_dest
;
1963 down_read(&group
->list_mutex
);
1964 /* search for the subscriber */
1965 subs
->num_subs
= group
->count
;
1968 list_for_each(p
, &group
->list_head
) {
1969 if (i
++ == subs
->index
) {
1971 struct snd_seq_subscribers
*s
;
1972 if (subs
->type
== SNDRV_SEQ_QUERY_SUBS_READ
) {
1973 s
= list_entry(p
, struct snd_seq_subscribers
, src_list
);
1974 subs
->addr
= s
->info
.dest
;
1976 s
= list_entry(p
, struct snd_seq_subscribers
, dest_list
);
1977 subs
->addr
= s
->info
.sender
;
1979 subs
->flags
= s
->info
.flags
;
1980 subs
->queue
= s
->info
.queue
;
1985 up_read(&group
->list_mutex
);
1989 snd_seq_port_unlock(port
);
1991 snd_seq_client_unlock(cptr
);
2000 static int snd_seq_ioctl_query_next_client(struct snd_seq_client
*client
,
2003 struct snd_seq_client_info
*info
= arg
;
2004 struct snd_seq_client
*cptr
= NULL
;
2006 /* search for next client */
2008 if (info
->client
< 0)
2010 for (; info
->client
< SNDRV_SEQ_MAX_CLIENTS
; info
->client
++) {
2011 cptr
= snd_seq_client_use_ptr(info
->client
);
2018 get_client_info(cptr
, info
);
2019 snd_seq_client_unlock(cptr
);
2027 static int snd_seq_ioctl_query_next_port(struct snd_seq_client
*client
,
2030 struct snd_seq_port_info
*info
= arg
;
2031 struct snd_seq_client
*cptr
;
2032 struct snd_seq_client_port
*port
= NULL
;
2034 cptr
= snd_seq_client_use_ptr(info
->addr
.client
);
2038 /* search for next port */
2040 port
= snd_seq_port_query_nearest(cptr
, info
);
2042 snd_seq_client_unlock(cptr
);
2047 info
->addr
= port
->addr
;
2048 snd_seq_get_port_info(port
, info
);
2049 snd_seq_port_unlock(port
);
2050 snd_seq_client_unlock(cptr
);
2055 /* -------------------------------------------------------- */
2057 static const struct ioctl_handler
{
2059 int (*func
)(struct snd_seq_client
*client
, void *arg
);
2060 } ioctl_handlers
[] = {
2061 { SNDRV_SEQ_IOCTL_PVERSION
, snd_seq_ioctl_pversion
},
2062 { SNDRV_SEQ_IOCTL_CLIENT_ID
, snd_seq_ioctl_client_id
},
2063 { SNDRV_SEQ_IOCTL_SYSTEM_INFO
, snd_seq_ioctl_system_info
},
2064 { SNDRV_SEQ_IOCTL_RUNNING_MODE
, snd_seq_ioctl_running_mode
},
2065 { SNDRV_SEQ_IOCTL_GET_CLIENT_INFO
, snd_seq_ioctl_get_client_info
},
2066 { SNDRV_SEQ_IOCTL_SET_CLIENT_INFO
, snd_seq_ioctl_set_client_info
},
2067 { SNDRV_SEQ_IOCTL_CREATE_PORT
, snd_seq_ioctl_create_port
},
2068 { SNDRV_SEQ_IOCTL_DELETE_PORT
, snd_seq_ioctl_delete_port
},
2069 { SNDRV_SEQ_IOCTL_GET_PORT_INFO
, snd_seq_ioctl_get_port_info
},
2070 { SNDRV_SEQ_IOCTL_SET_PORT_INFO
, snd_seq_ioctl_set_port_info
},
2071 { SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT
, snd_seq_ioctl_subscribe_port
},
2072 { SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT
, snd_seq_ioctl_unsubscribe_port
},
2073 { SNDRV_SEQ_IOCTL_CREATE_QUEUE
, snd_seq_ioctl_create_queue
},
2074 { SNDRV_SEQ_IOCTL_DELETE_QUEUE
, snd_seq_ioctl_delete_queue
},
2075 { SNDRV_SEQ_IOCTL_GET_QUEUE_INFO
, snd_seq_ioctl_get_queue_info
},
2076 { SNDRV_SEQ_IOCTL_SET_QUEUE_INFO
, snd_seq_ioctl_set_queue_info
},
2077 { SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE
, snd_seq_ioctl_get_named_queue
},
2078 { SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS
, snd_seq_ioctl_get_queue_status
},
2079 { SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO
, snd_seq_ioctl_get_queue_tempo
},
2080 { SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO
, snd_seq_ioctl_set_queue_tempo
},
2081 { SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER
, snd_seq_ioctl_get_queue_timer
},
2082 { SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER
, snd_seq_ioctl_set_queue_timer
},
2083 { SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT
, snd_seq_ioctl_get_queue_client
},
2084 { SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT
, snd_seq_ioctl_set_queue_client
},
2085 { SNDRV_SEQ_IOCTL_GET_CLIENT_POOL
, snd_seq_ioctl_get_client_pool
},
2086 { SNDRV_SEQ_IOCTL_SET_CLIENT_POOL
, snd_seq_ioctl_set_client_pool
},
2087 { SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION
, snd_seq_ioctl_get_subscription
},
2088 { SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT
, snd_seq_ioctl_query_next_client
},
2089 { SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT
, snd_seq_ioctl_query_next_port
},
2090 { SNDRV_SEQ_IOCTL_REMOVE_EVENTS
, snd_seq_ioctl_remove_events
},
2091 { SNDRV_SEQ_IOCTL_QUERY_SUBS
, snd_seq_ioctl_query_subs
},
2095 static long snd_seq_ioctl(struct file
*file
, unsigned int cmd
,
2098 struct snd_seq_client
*client
= file
->private_data
;
2099 /* To use kernel stack for ioctl data. */
2103 struct snd_seq_system_info system_info
;
2104 struct snd_seq_running_info running_info
;
2105 struct snd_seq_client_info client_info
;
2106 struct snd_seq_port_info port_info
;
2107 struct snd_seq_port_subscribe port_subscribe
;
2108 struct snd_seq_queue_info queue_info
;
2109 struct snd_seq_queue_status queue_status
;
2110 struct snd_seq_queue_tempo tempo
;
2111 struct snd_seq_queue_timer queue_timer
;
2112 struct snd_seq_queue_client queue_client
;
2113 struct snd_seq_client_pool client_pool
;
2114 struct snd_seq_remove_events remove_events
;
2115 struct snd_seq_query_subs query_subs
;
2117 const struct ioctl_handler
*handler
;
2121 if (snd_BUG_ON(!client
))
2124 for (handler
= ioctl_handlers
; handler
->cmd
> 0; ++handler
) {
2125 if (handler
->cmd
== cmd
)
2128 if (handler
->cmd
== 0)
2131 memset(&buf
, 0, sizeof(buf
));
2134 * All of ioctl commands for ALSA sequencer get an argument of size
2135 * within 13 bits. We can safely pick up the size from the command.
2137 size
= _IOC_SIZE(handler
->cmd
);
2138 if (handler
->cmd
& IOC_IN
) {
2139 if (copy_from_user(&buf
, (const void __user
*)arg
, size
))
2143 mutex_lock(&client
->ioctl_mutex
);
2144 err
= handler
->func(client
, &buf
);
2145 mutex_unlock(&client
->ioctl_mutex
);
2147 /* Some commands includes a bug in 'dir' field. */
2148 if (handler
->cmd
== SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT
||
2149 handler
->cmd
== SNDRV_SEQ_IOCTL_SET_CLIENT_POOL
||
2150 (handler
->cmd
& IOC_OUT
))
2151 if (copy_to_user((void __user
*)arg
, &buf
, size
))
2158 #ifdef CONFIG_COMPAT
2159 #include "seq_compat.c"
2161 #define snd_seq_ioctl_compat NULL
2164 /* -------------------------------------------------------- */
2167 /* exported to kernel modules */
2168 int snd_seq_create_kernel_client(struct snd_card
*card
, int client_index
,
2169 const char *name_fmt
, ...)
2171 struct snd_seq_client
*client
;
2174 if (snd_BUG_ON(in_interrupt()))
2177 if (card
&& client_index
>= SNDRV_SEQ_CLIENTS_PER_CARD
)
2179 if (card
== NULL
&& client_index
>= SNDRV_SEQ_GLOBAL_CLIENTS
)
2182 if (mutex_lock_interruptible(®ister_mutex
))
2183 return -ERESTARTSYS
;
2186 client_index
+= SNDRV_SEQ_GLOBAL_CLIENTS
2187 + card
->number
* SNDRV_SEQ_CLIENTS_PER_CARD
;
2188 if (client_index
>= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN
)
2192 /* empty write queue as default */
2193 client
= seq_create_client1(client_index
, 0);
2194 if (client
== NULL
) {
2195 mutex_unlock(®ister_mutex
);
2196 return -EBUSY
; /* failure code */
2198 usage_alloc(&client_usage
, 1);
2200 client
->accept_input
= 1;
2201 client
->accept_output
= 1;
2202 client
->data
.kernel
.card
= card
;
2204 va_start(args
, name_fmt
);
2205 vsnprintf(client
->name
, sizeof(client
->name
), name_fmt
, args
);
2208 client
->type
= KERNEL_CLIENT
;
2209 mutex_unlock(®ister_mutex
);
2211 /* make others aware this new client */
2212 snd_seq_system_client_ev_client_start(client
->number
);
2214 /* return client number to caller */
2215 return client
->number
;
2217 EXPORT_SYMBOL(snd_seq_create_kernel_client
);
2219 /* exported to kernel modules */
2220 int snd_seq_delete_kernel_client(int client
)
2222 struct snd_seq_client
*ptr
;
2224 if (snd_BUG_ON(in_interrupt()))
2227 ptr
= clientptr(client
);
2231 seq_free_client(ptr
);
2235 EXPORT_SYMBOL(snd_seq_delete_kernel_client
);
2237 /* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
2238 * and snd_seq_kernel_client_enqueue_blocking
2240 static int kernel_client_enqueue(int client
, struct snd_seq_event
*ev
,
2241 struct file
*file
, int blocking
,
2242 int atomic
, int hop
)
2244 struct snd_seq_client
*cptr
;
2247 if (snd_BUG_ON(!ev
))
2250 if (ev
->type
== SNDRV_SEQ_EVENT_NONE
)
2251 return 0; /* ignore this */
2252 if (ev
->type
== SNDRV_SEQ_EVENT_KERNEL_ERROR
)
2253 return -EINVAL
; /* quoted events can't be enqueued */
2255 /* fill in client number */
2256 ev
->source
.client
= client
;
2258 if (check_event_type_and_length(ev
))
2261 cptr
= snd_seq_client_use_ptr(client
);
2265 if (! cptr
->accept_output
)
2268 result
= snd_seq_client_enqueue_event(cptr
, ev
, file
, blocking
,
2271 snd_seq_client_unlock(cptr
);
2276 * exported, called by kernel clients to enqueue events (w/o blocking)
2278 * RETURN VALUE: zero if succeed, negative if error
2280 int snd_seq_kernel_client_enqueue(int client
, struct snd_seq_event
* ev
,
2281 int atomic
, int hop
)
2283 return kernel_client_enqueue(client
, ev
, NULL
, 0, atomic
, hop
);
2285 EXPORT_SYMBOL(snd_seq_kernel_client_enqueue
);
2288 * exported, called by kernel clients to enqueue events (with blocking)
2290 * RETURN VALUE: zero if succeed, negative if error
2292 int snd_seq_kernel_client_enqueue_blocking(int client
, struct snd_seq_event
* ev
,
2294 int atomic
, int hop
)
2296 return kernel_client_enqueue(client
, ev
, file
, 1, atomic
, hop
);
2298 EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking
);
2301 * exported, called by kernel clients to dispatch events directly to other
2302 * clients, bypassing the queues. Event time-stamp will be updated.
2304 * RETURN VALUE: negative = delivery failed,
2305 * zero, or positive: the number of delivered events
2307 int snd_seq_kernel_client_dispatch(int client
, struct snd_seq_event
* ev
,
2308 int atomic
, int hop
)
2310 struct snd_seq_client
*cptr
;
2313 if (snd_BUG_ON(!ev
))
2316 /* fill in client number */
2317 ev
->queue
= SNDRV_SEQ_QUEUE_DIRECT
;
2318 ev
->source
.client
= client
;
2320 if (check_event_type_and_length(ev
))
2323 cptr
= snd_seq_client_use_ptr(client
);
2327 if (!cptr
->accept_output
)
2330 result
= snd_seq_deliver_event(cptr
, ev
, atomic
, hop
);
2332 snd_seq_client_unlock(cptr
);
2335 EXPORT_SYMBOL(snd_seq_kernel_client_dispatch
);
2338 * snd_seq_kernel_client_ctl - operate a command for a client with data in
2340 * @clientid: A numerical ID for a client.
2341 * @cmd: An ioctl(2) command for ALSA sequencer operation.
2342 * @arg: A pointer to data in kernel space.
2344 * Against its name, both kernel/application client can be handled by this
2345 * kernel API. A pointer of 'arg' argument should be in kernel space.
2347 * Return: 0 at success. Negative error code at failure.
2349 int snd_seq_kernel_client_ctl(int clientid
, unsigned int cmd
, void *arg
)
2351 const struct ioctl_handler
*handler
;
2352 struct snd_seq_client
*client
;
2354 client
= clientptr(clientid
);
2358 for (handler
= ioctl_handlers
; handler
->cmd
> 0; ++handler
) {
2359 if (handler
->cmd
== cmd
)
2360 return handler
->func(client
, arg
);
2363 pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
2364 cmd
, _IOC_TYPE(cmd
), _IOC_NR(cmd
));
2367 EXPORT_SYMBOL(snd_seq_kernel_client_ctl
);
2369 /* exported (for OSS emulator) */
2370 int snd_seq_kernel_client_write_poll(int clientid
, struct file
*file
, poll_table
*wait
)
2372 struct snd_seq_client
*client
;
2374 client
= clientptr(clientid
);
2378 if (! snd_seq_write_pool_allocated(client
))
2380 if (snd_seq_pool_poll_wait(client
->pool
, file
, wait
))
2384 EXPORT_SYMBOL(snd_seq_kernel_client_write_poll
);
2386 /*---------------------------------------------------------------------------*/
2388 #ifdef CONFIG_SND_PROC_FS
2392 static void snd_seq_info_dump_subscribers(struct snd_info_buffer
*buffer
,
2393 struct snd_seq_port_subs_info
*group
,
2394 int is_src
, char *msg
)
2396 struct list_head
*p
;
2397 struct snd_seq_subscribers
*s
;
2400 down_read(&group
->list_mutex
);
2401 if (list_empty(&group
->list_head
)) {
2402 up_read(&group
->list_mutex
);
2405 snd_iprintf(buffer
, msg
);
2406 list_for_each(p
, &group
->list_head
) {
2408 s
= list_entry(p
, struct snd_seq_subscribers
, src_list
);
2410 s
= list_entry(p
, struct snd_seq_subscribers
, dest_list
);
2412 snd_iprintf(buffer
, ", ");
2413 snd_iprintf(buffer
, "%d:%d",
2414 is_src
? s
->info
.dest
.client
: s
->info
.sender
.client
,
2415 is_src
? s
->info
.dest
.port
: s
->info
.sender
.port
);
2416 if (s
->info
.flags
& SNDRV_SEQ_PORT_SUBS_TIMESTAMP
)
2417 snd_iprintf(buffer
, "[%c:%d]", ((s
->info
.flags
& SNDRV_SEQ_PORT_SUBS_TIME_REAL
) ? 'r' : 't'), s
->info
.queue
);
2418 if (group
->exclusive
)
2419 snd_iprintf(buffer
, "[ex]");
2421 up_read(&group
->list_mutex
);
2422 snd_iprintf(buffer
, "\n");
2425 #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-')
2426 #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-')
2427 #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e')
2429 #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-')
2431 static void snd_seq_info_dump_ports(struct snd_info_buffer
*buffer
,
2432 struct snd_seq_client
*client
)
2434 struct snd_seq_client_port
*p
;
2436 mutex_lock(&client
->ports_mutex
);
2437 list_for_each_entry(p
, &client
->ports_list_head
, list
) {
2438 snd_iprintf(buffer
, " Port %3d : \"%s\" (%c%c%c%c)\n",
2439 p
->addr
.port
, p
->name
,
2440 FLAG_PERM_RD(p
->capability
),
2441 FLAG_PERM_WR(p
->capability
),
2442 FLAG_PERM_EX(p
->capability
),
2443 FLAG_PERM_DUPLEX(p
->capability
));
2444 snd_seq_info_dump_subscribers(buffer
, &p
->c_src
, 1, " Connecting To: ");
2445 snd_seq_info_dump_subscribers(buffer
, &p
->c_dest
, 0, " Connected From: ");
2447 mutex_unlock(&client
->ports_mutex
);
2451 /* exported to seq_info.c */
2452 void snd_seq_info_clients_read(struct snd_info_entry
*entry
,
2453 struct snd_info_buffer
*buffer
)
2456 struct snd_seq_client
*client
;
2458 snd_iprintf(buffer
, "Client info\n");
2459 snd_iprintf(buffer
, " cur clients : %d\n", client_usage
.cur
);
2460 snd_iprintf(buffer
, " peak clients : %d\n", client_usage
.peak
);
2461 snd_iprintf(buffer
, " max clients : %d\n", SNDRV_SEQ_MAX_CLIENTS
);
2462 snd_iprintf(buffer
, "\n");
2464 /* list the client table */
2465 for (c
= 0; c
< SNDRV_SEQ_MAX_CLIENTS
; c
++) {
2466 client
= snd_seq_client_use_ptr(c
);
2469 if (client
->type
== NO_CLIENT
) {
2470 snd_seq_client_unlock(client
);
2474 snd_iprintf(buffer
, "Client %3d : \"%s\" [%s]\n",
2476 client
->type
== USER_CLIENT
? "User" : "Kernel");
2477 snd_seq_info_dump_ports(buffer
, client
);
2478 if (snd_seq_write_pool_allocated(client
)) {
2479 snd_iprintf(buffer
, " Output pool :\n");
2480 snd_seq_info_pool(buffer
, client
->pool
, " ");
2482 if (client
->type
== USER_CLIENT
&& client
->data
.user
.fifo
&&
2483 client
->data
.user
.fifo
->pool
) {
2484 snd_iprintf(buffer
, " Input pool :\n");
2485 snd_seq_info_pool(buffer
, client
->data
.user
.fifo
->pool
, " ");
2487 snd_seq_client_unlock(client
);
2490 #endif /* CONFIG_SND_PROC_FS */
2492 /*---------------------------------------------------------------------------*/
2499 static const struct file_operations snd_seq_f_ops
=
2501 .owner
= THIS_MODULE
,
2502 .read
= snd_seq_read
,
2503 .write
= snd_seq_write
,
2504 .open
= snd_seq_open
,
2505 .release
= snd_seq_release
,
2506 .llseek
= no_llseek
,
2507 .poll
= snd_seq_poll
,
2508 .unlocked_ioctl
= snd_seq_ioctl
,
2509 .compat_ioctl
= snd_seq_ioctl_compat
,
2512 static struct device seq_dev
;
2515 * register sequencer device
2517 int __init
snd_sequencer_device_init(void)
2521 snd_device_initialize(&seq_dev
, NULL
);
2522 dev_set_name(&seq_dev
, "seq");
2524 if (mutex_lock_interruptible(®ister_mutex
))
2525 return -ERESTARTSYS
;
2527 err
= snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER
, NULL
, 0,
2528 &snd_seq_f_ops
, NULL
, &seq_dev
);
2530 mutex_unlock(®ister_mutex
);
2531 put_device(&seq_dev
);
2535 mutex_unlock(®ister_mutex
);
2543 * unregister sequencer device
2545 void __exit
snd_sequencer_device_done(void)
2547 snd_unregister_device(&seq_dev
);
2548 put_device(&seq_dev
);