2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
6 #include <linux/stddef.h>
7 #include <linux/kernel.h>
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/tty.h>
11 #include <linux/string.h>
12 #include <linux/tty_flip.h>
14 #include "chan_kern.h"
21 #ifdef CONFIG_NOCONFIG_CHAN
22 static void *not_configged_init(char *str
, int device
,
23 const struct chan_opts
*opts
)
25 printk("Using a channel type which is configured out of "
30 static int not_configged_open(int input
, int output
, int primary
, void *data
,
33 printk("Using a channel type which is configured out of "
38 static void not_configged_close(int fd
, void *data
)
40 printk("Using a channel type which is configured out of "
44 static int not_configged_read(int fd
, char *c_out
, void *data
)
46 printk("Using a channel type which is configured out of "
51 static int not_configged_write(int fd
, const char *buf
, int len
, void *data
)
53 printk("Using a channel type which is configured out of "
58 static int not_configged_console_write(int fd
, const char *buf
, int len
)
60 printk("Using a channel type which is configured out of "
65 static int not_configged_window_size(int fd
, void *data
, unsigned short *rows
,
68 printk("Using a channel type which is configured out of "
73 static void not_configged_free(void *data
)
75 printk("Using a channel type which is configured out of "
79 static const struct chan_ops not_configged_ops
= {
80 .init
= not_configged_init
,
81 .open
= not_configged_open
,
82 .close
= not_configged_close
,
83 .read
= not_configged_read
,
84 .write
= not_configged_write
,
85 .console_write
= not_configged_console_write
,
86 .window_size
= not_configged_window_size
,
87 .free
= not_configged_free
,
90 #endif /* CONFIG_NOCONFIG_CHAN */
92 void generic_close(int fd
, void *unused
)
97 int generic_read(int fd
, char *c_out
, void *unused
)
101 n
= os_read_file(fd
, c_out
, sizeof(*c_out
));
110 /* XXX Trivial wrapper around os_write_file */
112 int generic_write(int fd
, const char *buf
, int n
, void *unused
)
114 return os_write_file(fd
, buf
, n
);
117 int generic_window_size(int fd
, void *unused
, unsigned short *rows_out
,
118 unsigned short *cols_out
)
123 ret
= os_window_size(fd
, &rows
, &cols
);
127 ret
= ((*rows_out
!= rows
) || (*cols_out
!= cols
));
135 void generic_free(void *data
)
140 static void tty_receive_char(struct tty_struct
*tty
, char ch
)
142 if(tty
== NULL
) return;
144 if(I_IXON(tty
) && !I_IXOFF(tty
) && !tty
->raw
) {
145 if(ch
== STOP_CHAR(tty
)){
149 else if(ch
== START_CHAR(tty
)){
155 tty_insert_flip_char(tty
, ch
, TTY_NORMAL
);
158 static int open_one_chan(struct chan
*chan
)
165 if(chan
->ops
->open
== NULL
)
167 else fd
= (*chan
->ops
->open
)(chan
->input
, chan
->output
, chan
->primary
,
168 chan
->data
, &chan
->dev
);
177 int open_chan(struct list_head
*chans
)
179 struct list_head
*ele
;
183 list_for_each(ele
, chans
){
184 chan
= list_entry(ele
, struct chan
, list
);
185 ret
= open_one_chan(chan
);
192 void chan_enable_winch(struct list_head
*chans
, struct tty_struct
*tty
)
194 struct list_head
*ele
;
197 list_for_each(ele
, chans
){
198 chan
= list_entry(ele
, struct chan
, list
);
199 if(chan
->primary
&& chan
->output
&& chan
->ops
->winch
){
200 register_winch(chan
->fd
, tty
);
206 void enable_chan(struct line
*line
)
208 struct list_head
*ele
;
211 list_for_each(ele
, &line
->chan_list
){
212 chan
= list_entry(ele
, struct chan
, list
);
213 if(open_one_chan(chan
))
218 line_setup_irq(chan
->fd
, chan
->input
, chan
->output
, line
,
224 /* Items are added in IRQ context, when free_irq can't be called, and
225 * removed in process context, when it can.
226 * This handles interrupt sources which disappear, and which need to
227 * be permanently disabled. This is discovered in IRQ context, but
228 * the freeing of the IRQ must be done later.
230 static DEFINE_SPINLOCK(irqs_to_free_lock
);
231 static LIST_HEAD(irqs_to_free
);
237 struct list_head
*ele
;
240 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
241 list_splice_init(&irqs_to_free
, &list
);
242 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
244 list_for_each(ele
, &list
){
245 chan
= list_entry(ele
, struct chan
, free_list
);
248 free_irq(chan
->line
->driver
->read_irq
, chan
);
250 free_irq(chan
->line
->driver
->write_irq
, chan
);
255 static void close_one_chan(struct chan
*chan
, int delay_free_irq
)
263 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
264 list_add(&chan
->free_list
, &irqs_to_free
);
265 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
269 free_irq(chan
->line
->driver
->read_irq
, chan
);
271 free_irq(chan
->line
->driver
->write_irq
, chan
);
274 if(chan
->ops
->close
!= NULL
)
275 (*chan
->ops
->close
)(chan
->fd
, chan
->data
);
281 void close_chan(struct list_head
*chans
, int delay_free_irq
)
285 /* Close in reverse order as open in case more than one of them
286 * refers to the same device and they save and restore that device's
287 * state. Then, the first one opened will have the original state,
288 * so it must be the last closed.
290 list_for_each_entry_reverse(chan
, chans
, list
) {
291 close_one_chan(chan
, delay_free_irq
);
295 void deactivate_chan(struct list_head
*chans
, int irq
)
297 struct list_head
*ele
;
300 list_for_each(ele
, chans
) {
301 chan
= list_entry(ele
, struct chan
, list
);
303 if(chan
->enabled
&& chan
->input
)
304 deactivate_fd(chan
->fd
, irq
);
308 void reactivate_chan(struct list_head
*chans
, int irq
)
310 struct list_head
*ele
;
313 list_for_each(ele
, chans
) {
314 chan
= list_entry(ele
, struct chan
, list
);
316 if(chan
->enabled
&& chan
->input
)
317 reactivate_fd(chan
->fd
, irq
);
321 int write_chan(struct list_head
*chans
, const char *buf
, int len
,
324 struct list_head
*ele
;
325 struct chan
*chan
= NULL
;
328 list_for_each(ele
, chans
) {
329 chan
= list_entry(ele
, struct chan
, list
);
330 if (!chan
->output
|| (chan
->ops
->write
== NULL
))
332 n
= chan
->ops
->write(chan
->fd
, buf
, len
, chan
->data
);
335 if ((ret
== -EAGAIN
) || ((ret
>= 0) && (ret
< len
)))
336 reactivate_fd(chan
->fd
, write_irq
);
342 int console_write_chan(struct list_head
*chans
, const char *buf
, int len
)
344 struct list_head
*ele
;
348 list_for_each(ele
, chans
){
349 chan
= list_entry(ele
, struct chan
, list
);
350 if(!chan
->output
|| (chan
->ops
->console_write
== NULL
))
352 n
= chan
->ops
->console_write(chan
->fd
, buf
, len
);
353 if(chan
->primary
) ret
= n
;
358 int console_open_chan(struct line
*line
, struct console
*co
)
362 err
= open_chan(&line
->chan_list
);
366 printk("Console initialized on /dev/%s%d\n", co
->name
, co
->index
);
370 int chan_window_size(struct list_head
*chans
, unsigned short *rows_out
,
371 unsigned short *cols_out
)
373 struct list_head
*ele
;
376 list_for_each(ele
, chans
){
377 chan
= list_entry(ele
, struct chan
, list
);
379 if(chan
->ops
->window_size
== NULL
)
381 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
388 static void free_one_chan(struct chan
*chan
, int delay_free_irq
)
390 list_del(&chan
->list
);
392 close_one_chan(chan
, delay_free_irq
);
394 if(chan
->ops
->free
!= NULL
)
395 (*chan
->ops
->free
)(chan
->data
);
397 if(chan
->primary
&& chan
->output
) ignore_sigio_fd(chan
->fd
);
401 static void free_chan(struct list_head
*chans
, int delay_free_irq
)
403 struct list_head
*ele
, *next
;
406 list_for_each_safe(ele
, next
, chans
){
407 chan
= list_entry(ele
, struct chan
, list
);
408 free_one_chan(chan
, delay_free_irq
);
412 static int one_chan_config_string(struct chan
*chan
, char *str
, int size
,
418 CONFIG_CHUNK(str
, size
, n
, "none", 1);
422 CONFIG_CHUNK(str
, size
, n
, chan
->ops
->type
, 0);
424 if(chan
->dev
== NULL
){
425 CONFIG_CHUNK(str
, size
, n
, "", 1);
429 CONFIG_CHUNK(str
, size
, n
, ":", 0);
430 CONFIG_CHUNK(str
, size
, n
, chan
->dev
, 0);
435 static int chan_pair_config_string(struct chan
*in
, struct chan
*out
,
436 char *str
, int size
, char **error_out
)
440 n
= one_chan_config_string(in
, str
, size
, error_out
);
445 CONFIG_CHUNK(str
, size
, n
, "", 1);
449 CONFIG_CHUNK(str
, size
, n
, ",", 1);
450 n
= one_chan_config_string(out
, str
, size
, error_out
);
453 CONFIG_CHUNK(str
, size
, n
, "", 1);
458 int chan_config_string(struct list_head
*chans
, char *str
, int size
,
461 struct list_head
*ele
;
462 struct chan
*chan
, *in
= NULL
, *out
= NULL
;
464 list_for_each(ele
, chans
){
465 chan
= list_entry(ele
, struct chan
, list
);
474 return chan_pair_config_string(in
, out
, str
, size
, error_out
);
479 const struct chan_ops
*ops
;
482 static const struct chan_type chan_table
[] = {
485 #ifdef CONFIG_NULL_CHAN
486 { "null", &null_ops
},
488 { "null", ¬_configged_ops
},
491 #ifdef CONFIG_PORT_CHAN
492 { "port", &port_ops
},
494 { "port", ¬_configged_ops
},
497 #ifdef CONFIG_PTY_CHAN
501 { "pty", ¬_configged_ops
},
502 { "pts", ¬_configged_ops
},
505 #ifdef CONFIG_TTY_CHAN
508 { "tty", ¬_configged_ops
},
511 #ifdef CONFIG_XTERM_CHAN
512 { "xterm", &xterm_ops
},
514 { "xterm", ¬_configged_ops
},
518 static struct chan
*parse_chan(struct line
*line
, char *str
, int device
,
519 const struct chan_opts
*opts
, char **error_out
)
521 const struct chan_type
*entry
;
522 const struct chan_ops
*ops
;
529 for(i
= 0; i
< ARRAY_SIZE(chan_table
); i
++){
530 entry
= &chan_table
[i
];
531 if(!strncmp(str
, entry
->key
, strlen(entry
->key
))){
533 str
+= strlen(entry
->key
);
538 *error_out
= "No match for configured backends";
542 data
= (*ops
->init
)(str
, device
, opts
);
544 *error_out
= "Configuration failed";
548 chan
= kmalloc(sizeof(*chan
), GFP_ATOMIC
);
550 *error_out
= "Memory allocation failed";
553 *chan
= ((struct chan
) { .list
= LIST_HEAD_INIT(chan
->list
),
555 LIST_HEAD_INIT(chan
->free_list
),
568 int parse_chan_pair(char *str
, struct line
*line
, int device
,
569 const struct chan_opts
*opts
, char **error_out
)
571 struct list_head
*chans
= &line
->chan_list
;
572 struct chan
*new, *chan
;
575 if(!list_empty(chans
)){
576 chan
= list_entry(chans
->next
, struct chan
, list
);
578 INIT_LIST_HEAD(chans
);
581 out
= strchr(str
, ',');
586 new = parse_chan(line
, in
, device
, opts
, error_out
);
591 list_add(&new->list
, chans
);
593 new = parse_chan(line
, out
, device
, opts
, error_out
);
597 list_add(&new->list
, chans
);
601 new = parse_chan(line
, str
, device
, opts
, error_out
);
605 list_add(&new->list
, chans
);
612 int chan_out_fd(struct list_head
*chans
)
614 struct list_head
*ele
;
617 list_for_each(ele
, chans
){
618 chan
= list_entry(ele
, struct chan
, list
);
619 if(chan
->primary
&& chan
->output
)
625 void chan_interrupt(struct list_head
*chans
, struct delayed_work
*task
,
626 struct tty_struct
*tty
, int irq
)
628 struct list_head
*ele
, *next
;
633 list_for_each_safe(ele
, next
, chans
){
634 chan
= list_entry(ele
, struct chan
, list
);
635 if(!chan
->input
|| (chan
->ops
->read
== NULL
)) continue;
637 if (tty
&& !tty_buffer_request_room(tty
, 1)) {
638 schedule_delayed_work(task
, 1);
641 err
= chan
->ops
->read(chan
->fd
, &c
, chan
->data
);
643 tty_receive_char(tty
, c
);
646 if(err
== 0) reactivate_fd(chan
->fd
, irq
);
651 close_chan(chans
, 1);
654 else close_one_chan(chan
, 1);
658 if(tty
) tty_flip_buffer_push(tty
);