2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
6 #include <linux/slab.h>
8 #include <linux/tty_flip.h>
12 #ifdef CONFIG_NOCONFIG_CHAN
13 static void *not_configged_init(char *str
, int device
,
14 const struct chan_opts
*opts
)
16 printk(KERN_ERR
"Using a channel type which is configured out of "
21 static int not_configged_open(int input
, int output
, int primary
, void *data
,
24 printk(KERN_ERR
"Using a channel type which is configured out of "
29 static void not_configged_close(int fd
, void *data
)
31 printk(KERN_ERR
"Using a channel type which is configured out of "
35 static int not_configged_read(int fd
, char *c_out
, void *data
)
37 printk(KERN_ERR
"Using a channel type which is configured out of "
42 static int not_configged_write(int fd
, const char *buf
, int len
, void *data
)
44 printk(KERN_ERR
"Using a channel type which is configured out of "
49 static int not_configged_console_write(int fd
, const char *buf
, int len
)
51 printk(KERN_ERR
"Using a channel type which is configured out of "
56 static int not_configged_window_size(int fd
, void *data
, unsigned short *rows
,
59 printk(KERN_ERR
"Using a channel type which is configured out of "
64 static void not_configged_free(void *data
)
66 printk(KERN_ERR
"Using a channel type which is configured out of "
70 static const struct chan_ops not_configged_ops
= {
71 .init
= not_configged_init
,
72 .open
= not_configged_open
,
73 .close
= not_configged_close
,
74 .read
= not_configged_read
,
75 .write
= not_configged_write
,
76 .console_write
= not_configged_console_write
,
77 .window_size
= not_configged_window_size
,
78 .free
= not_configged_free
,
81 #endif /* CONFIG_NOCONFIG_CHAN */
83 static void tty_receive_char(struct tty_struct
*tty
, char ch
)
88 if (I_IXON(tty
) && !I_IXOFF(tty
) && !tty
->raw
) {
89 if (ch
== STOP_CHAR(tty
)) {
93 else if (ch
== START_CHAR(tty
)) {
99 tty_insert_flip_char(tty
, ch
, TTY_NORMAL
);
102 static int open_one_chan(struct chan
*chan
)
109 if (chan
->ops
->open
== NULL
)
111 else fd
= (*chan
->ops
->open
)(chan
->input
, chan
->output
, chan
->primary
,
112 chan
->data
, &chan
->dev
);
116 err
= os_set_fd_block(fd
, 0);
118 (*chan
->ops
->close
)(fd
, chan
->data
);
128 int open_chan(struct list_head
*chans
)
130 struct list_head
*ele
;
134 list_for_each(ele
, chans
) {
135 chan
= list_entry(ele
, struct chan
, list
);
136 ret
= open_one_chan(chan
);
143 void chan_enable_winch(struct list_head
*chans
, struct tty_struct
*tty
)
145 struct list_head
*ele
;
148 list_for_each(ele
, chans
) {
149 chan
= list_entry(ele
, struct chan
, list
);
150 if (chan
->primary
&& chan
->output
&& chan
->ops
->winch
) {
151 register_winch(chan
->fd
, tty
);
157 int enable_chan(struct line
*line
)
159 struct list_head
*ele
;
163 list_for_each(ele
, &line
->chan_list
) {
164 chan
= list_entry(ele
, struct chan
, list
);
165 err
= open_one_chan(chan
);
175 err
= line_setup_irq(chan
->fd
, chan
->input
, chan
->output
, line
,
186 close_chan(&line
->chan_list
, 0);
190 /* Items are added in IRQ context, when free_irq can't be called, and
191 * removed in process context, when it can.
192 * This handles interrupt sources which disappear, and which need to
193 * be permanently disabled. This is discovered in IRQ context, but
194 * the freeing of the IRQ must be done later.
196 static DEFINE_SPINLOCK(irqs_to_free_lock
);
197 static LIST_HEAD(irqs_to_free
);
203 struct list_head
*ele
;
206 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
207 list_splice_init(&irqs_to_free
, &list
);
208 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
210 list_for_each(ele
, &list
) {
211 chan
= list_entry(ele
, struct chan
, free_list
);
214 free_irq(chan
->line
->driver
->read_irq
, chan
);
216 free_irq(chan
->line
->driver
->write_irq
, chan
);
221 static void close_one_chan(struct chan
*chan
, int delay_free_irq
)
228 if (delay_free_irq
) {
229 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
230 list_add(&chan
->free_list
, &irqs_to_free
);
231 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
235 free_irq(chan
->line
->driver
->read_irq
, chan
);
237 free_irq(chan
->line
->driver
->write_irq
, chan
);
240 if (chan
->ops
->close
!= NULL
)
241 (*chan
->ops
->close
)(chan
->fd
, chan
->data
);
247 void close_chan(struct list_head
*chans
, int delay_free_irq
)
251 /* Close in reverse order as open in case more than one of them
252 * refers to the same device and they save and restore that device's
253 * state. Then, the first one opened will have the original state,
254 * so it must be the last closed.
256 list_for_each_entry_reverse(chan
, chans
, list
) {
257 close_one_chan(chan
, delay_free_irq
);
261 void deactivate_chan(struct list_head
*chans
, int irq
)
263 struct list_head
*ele
;
266 list_for_each(ele
, chans
) {
267 chan
= list_entry(ele
, struct chan
, list
);
269 if (chan
->enabled
&& chan
->input
)
270 deactivate_fd(chan
->fd
, irq
);
274 void reactivate_chan(struct list_head
*chans
, int irq
)
276 struct list_head
*ele
;
279 list_for_each(ele
, chans
) {
280 chan
= list_entry(ele
, struct chan
, list
);
282 if (chan
->enabled
&& chan
->input
)
283 reactivate_fd(chan
->fd
, irq
);
287 int write_chan(struct list_head
*chans
, const char *buf
, int len
,
290 struct list_head
*ele
;
291 struct chan
*chan
= NULL
;
297 list_for_each(ele
, chans
) {
298 chan
= list_entry(ele
, struct chan
, list
);
299 if (!chan
->output
|| (chan
->ops
->write
== NULL
))
302 n
= chan
->ops
->write(chan
->fd
, buf
, len
, chan
->data
);
305 if ((ret
== -EAGAIN
) || ((ret
>= 0) && (ret
< len
)))
306 reactivate_fd(chan
->fd
, write_irq
);
312 int console_write_chan(struct list_head
*chans
, const char *buf
, int len
)
314 struct list_head
*ele
;
318 list_for_each(ele
, chans
) {
319 chan
= list_entry(ele
, struct chan
, list
);
320 if (!chan
->output
|| (chan
->ops
->console_write
== NULL
))
323 n
= chan
->ops
->console_write(chan
->fd
, buf
, len
);
330 int console_open_chan(struct line
*line
, struct console
*co
)
334 err
= open_chan(&line
->chan_list
);
338 printk(KERN_INFO
"Console initialized on /dev/%s%d\n", co
->name
,
343 int chan_window_size(struct list_head
*chans
, unsigned short *rows_out
,
344 unsigned short *cols_out
)
346 struct list_head
*ele
;
349 list_for_each(ele
, chans
) {
350 chan
= list_entry(ele
, struct chan
, list
);
352 if (chan
->ops
->window_size
== NULL
)
354 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
361 static void free_one_chan(struct chan
*chan
, int delay_free_irq
)
363 list_del(&chan
->list
);
365 close_one_chan(chan
, delay_free_irq
);
367 if (chan
->ops
->free
!= NULL
)
368 (*chan
->ops
->free
)(chan
->data
);
370 if (chan
->primary
&& chan
->output
)
371 ignore_sigio_fd(chan
->fd
);
375 static void free_chan(struct list_head
*chans
, int delay_free_irq
)
377 struct list_head
*ele
, *next
;
380 list_for_each_safe(ele
, next
, chans
) {
381 chan
= list_entry(ele
, struct chan
, list
);
382 free_one_chan(chan
, delay_free_irq
);
386 static int one_chan_config_string(struct chan
*chan
, char *str
, int size
,
392 CONFIG_CHUNK(str
, size
, n
, "none", 1);
396 CONFIG_CHUNK(str
, size
, n
, chan
->ops
->type
, 0);
398 if (chan
->dev
== NULL
) {
399 CONFIG_CHUNK(str
, size
, n
, "", 1);
403 CONFIG_CHUNK(str
, size
, n
, ":", 0);
404 CONFIG_CHUNK(str
, size
, n
, chan
->dev
, 0);
409 static int chan_pair_config_string(struct chan
*in
, struct chan
*out
,
410 char *str
, int size
, char **error_out
)
414 n
= one_chan_config_string(in
, str
, size
, error_out
);
419 CONFIG_CHUNK(str
, size
, n
, "", 1);
423 CONFIG_CHUNK(str
, size
, n
, ",", 1);
424 n
= one_chan_config_string(out
, str
, size
, error_out
);
427 CONFIG_CHUNK(str
, size
, n
, "", 1);
432 int chan_config_string(struct list_head
*chans
, char *str
, int size
,
435 struct list_head
*ele
;
436 struct chan
*chan
, *in
= NULL
, *out
= NULL
;
438 list_for_each(ele
, chans
) {
439 chan
= list_entry(ele
, struct chan
, list
);
448 return chan_pair_config_string(in
, out
, str
, size
, error_out
);
453 const struct chan_ops
*ops
;
456 static const struct chan_type chan_table
[] = {
459 #ifdef CONFIG_NULL_CHAN
460 { "null", &null_ops
},
462 { "null", ¬_configged_ops
},
465 #ifdef CONFIG_PORT_CHAN
466 { "port", &port_ops
},
468 { "port", ¬_configged_ops
},
471 #ifdef CONFIG_PTY_CHAN
475 { "pty", ¬_configged_ops
},
476 { "pts", ¬_configged_ops
},
479 #ifdef CONFIG_TTY_CHAN
482 { "tty", ¬_configged_ops
},
485 #ifdef CONFIG_XTERM_CHAN
486 { "xterm", &xterm_ops
},
488 { "xterm", ¬_configged_ops
},
492 static struct chan
*parse_chan(struct line
*line
, char *str
, int device
,
493 const struct chan_opts
*opts
, char **error_out
)
495 const struct chan_type
*entry
;
496 const struct chan_ops
*ops
;
503 for(i
= 0; i
< ARRAY_SIZE(chan_table
); i
++) {
504 entry
= &chan_table
[i
];
505 if (!strncmp(str
, entry
->key
, strlen(entry
->key
))) {
507 str
+= strlen(entry
->key
);
512 *error_out
= "No match for configured backends";
516 data
= (*ops
->init
)(str
, device
, opts
);
518 *error_out
= "Configuration failed";
522 chan
= kmalloc(sizeof(*chan
), GFP_ATOMIC
);
524 *error_out
= "Memory allocation failed";
527 *chan
= ((struct chan
) { .list
= LIST_HEAD_INIT(chan
->list
),
529 LIST_HEAD_INIT(chan
->free_list
),
542 int parse_chan_pair(char *str
, struct line
*line
, int device
,
543 const struct chan_opts
*opts
, char **error_out
)
545 struct list_head
*chans
= &line
->chan_list
;
546 struct chan
*new, *chan
;
549 if (!list_empty(chans
)) {
550 chan
= list_entry(chans
->next
, struct chan
, list
);
552 INIT_LIST_HEAD(chans
);
555 out
= strchr(str
, ',');
560 new = parse_chan(line
, in
, device
, opts
, error_out
);
565 list_add(&new->list
, chans
);
567 new = parse_chan(line
, out
, device
, opts
, error_out
);
571 list_add(&new->list
, chans
);
575 new = parse_chan(line
, str
, device
, opts
, error_out
);
579 list_add(&new->list
, chans
);
586 int chan_out_fd(struct list_head
*chans
)
588 struct list_head
*ele
;
591 list_for_each(ele
, chans
) {
592 chan
= list_entry(ele
, struct chan
, list
);
593 if (chan
->primary
&& chan
->output
)
599 void chan_interrupt(struct list_head
*chans
, struct delayed_work
*task
,
600 struct tty_struct
*tty
, int irq
)
602 struct list_head
*ele
, *next
;
607 list_for_each_safe(ele
, next
, chans
) {
608 chan
= list_entry(ele
, struct chan
, list
);
609 if (!chan
->input
|| (chan
->ops
->read
== NULL
))
612 if (tty
&& !tty_buffer_request_room(tty
, 1)) {
613 schedule_delayed_work(task
, 1);
616 err
= chan
->ops
->read(chan
->fd
, &c
, chan
->data
);
618 tty_receive_char(tty
, c
);
622 reactivate_fd(chan
->fd
, irq
);
627 close_chan(chans
, 1);
630 else close_one_chan(chan
, 1);
635 tty_flip_buffer_push(tty
);