2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
6 #include <linux/slab.h>
8 #include <linux/tty_flip.h>
13 #ifdef CONFIG_NOCONFIG_CHAN
14 static void *not_configged_init(char *str
, int device
,
15 const struct chan_opts
*opts
)
17 printk(KERN_ERR
"Using a channel type which is configured out of "
22 static int not_configged_open(int input
, int output
, int primary
, void *data
,
25 printk(KERN_ERR
"Using a channel type which is configured out of "
30 static void not_configged_close(int fd
, void *data
)
32 printk(KERN_ERR
"Using a channel type which is configured out of "
36 static int not_configged_read(int fd
, char *c_out
, void *data
)
38 printk(KERN_ERR
"Using a channel type which is configured out of "
43 static int not_configged_write(int fd
, const char *buf
, int len
, void *data
)
45 printk(KERN_ERR
"Using a channel type which is configured out of "
50 static int not_configged_console_write(int fd
, const char *buf
, int len
)
52 printk(KERN_ERR
"Using a channel type which is configured out of "
57 static int not_configged_window_size(int fd
, void *data
, unsigned short *rows
,
60 printk(KERN_ERR
"Using a channel type which is configured out of "
65 static void not_configged_free(void *data
)
67 printk(KERN_ERR
"Using a channel type which is configured out of "
71 static const struct chan_ops not_configged_ops
= {
72 .init
= not_configged_init
,
73 .open
= not_configged_open
,
74 .close
= not_configged_close
,
75 .read
= not_configged_read
,
76 .write
= not_configged_write
,
77 .console_write
= not_configged_console_write
,
78 .window_size
= not_configged_window_size
,
79 .free
= not_configged_free
,
82 #endif /* CONFIG_NOCONFIG_CHAN */
84 static void tty_receive_char(struct tty_struct
*tty
, char ch
)
89 if (I_IXON(tty
) && !I_IXOFF(tty
) && !tty
->raw
) {
90 if (ch
== STOP_CHAR(tty
)) {
94 else if (ch
== START_CHAR(tty
)) {
100 tty_insert_flip_char(tty
, ch
, TTY_NORMAL
);
103 static int open_one_chan(struct chan
*chan
)
110 if (chan
->ops
->open
== NULL
)
112 else fd
= (*chan
->ops
->open
)(chan
->input
, chan
->output
, chan
->primary
,
113 chan
->data
, &chan
->dev
);
117 err
= os_set_fd_block(fd
, 0);
119 (*chan
->ops
->close
)(fd
, chan
->data
);
129 static int open_chan(struct list_head
*chans
)
131 struct list_head
*ele
;
135 list_for_each(ele
, chans
) {
136 chan
= list_entry(ele
, struct chan
, list
);
137 ret
= open_one_chan(chan
);
144 void chan_enable_winch(struct chan
*chan
, struct tty_struct
*tty
)
146 if (chan
&& chan
->primary
&& chan
->ops
->winch
)
147 register_winch(chan
->fd
, tty
);
150 static void line_timer_cb(struct work_struct
*work
)
152 struct line
*line
= container_of(work
, struct line
, task
.work
);
153 struct tty_struct
*tty
= tty_port_tty_get(&line
->port
);
155 if (!line
->throttled
)
156 chan_interrupt(line
, tty
, line
->driver
->read_irq
);
160 int enable_chan(struct line
*line
)
162 struct list_head
*ele
;
166 INIT_DELAYED_WORK(&line
->task
, line_timer_cb
);
168 list_for_each(ele
, &line
->chan_list
) {
169 chan
= list_entry(ele
, struct chan
, list
);
170 err
= open_one_chan(chan
);
180 err
= line_setup_irq(chan
->fd
, chan
->input
, chan
->output
, line
,
195 /* Items are added in IRQ context, when free_irq can't be called, and
196 * removed in process context, when it can.
197 * This handles interrupt sources which disappear, and which need to
198 * be permanently disabled. This is discovered in IRQ context, but
199 * the freeing of the IRQ must be done later.
201 static DEFINE_SPINLOCK(irqs_to_free_lock
);
202 static LIST_HEAD(irqs_to_free
);
208 struct list_head
*ele
;
211 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
212 list_splice_init(&irqs_to_free
, &list
);
213 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
215 list_for_each(ele
, &list
) {
216 chan
= list_entry(ele
, struct chan
, free_list
);
218 if (chan
->input
&& chan
->enabled
)
219 um_free_irq(chan
->line
->driver
->read_irq
, chan
);
220 if (chan
->output
&& chan
->enabled
)
221 um_free_irq(chan
->line
->driver
->write_irq
, chan
);
226 static void close_one_chan(struct chan
*chan
, int delay_free_irq
)
233 if (delay_free_irq
) {
234 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
235 list_add(&chan
->free_list
, &irqs_to_free
);
236 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
239 if (chan
->input
&& chan
->enabled
)
240 um_free_irq(chan
->line
->driver
->read_irq
, chan
);
241 if (chan
->output
&& chan
->enabled
)
242 um_free_irq(chan
->line
->driver
->write_irq
, chan
);
245 if (chan
->ops
->close
!= NULL
)
246 (*chan
->ops
->close
)(chan
->fd
, chan
->data
);
252 void close_chan(struct line
*line
)
256 /* Close in reverse order as open in case more than one of them
257 * refers to the same device and they save and restore that device's
258 * state. Then, the first one opened will have the original state,
259 * so it must be the last closed.
261 list_for_each_entry_reverse(chan
, &line
->chan_list
, list
) {
262 close_one_chan(chan
, 0);
266 void deactivate_chan(struct chan
*chan
, int irq
)
268 if (chan
&& chan
->enabled
)
269 deactivate_fd(chan
->fd
, irq
);
272 void reactivate_chan(struct chan
*chan
, int irq
)
274 if (chan
&& chan
->enabled
)
275 reactivate_fd(chan
->fd
, irq
);
278 int write_chan(struct chan
*chan
, const char *buf
, int len
,
283 if (len
== 0 || !chan
|| !chan
->ops
->write
)
286 n
= chan
->ops
->write(chan
->fd
, buf
, len
, chan
->data
);
289 if ((ret
== -EAGAIN
) || ((ret
>= 0) && (ret
< len
)))
290 reactivate_fd(chan
->fd
, write_irq
);
295 int console_write_chan(struct chan
*chan
, const char *buf
, int len
)
299 if (!chan
|| !chan
->ops
->console_write
)
302 n
= chan
->ops
->console_write(chan
->fd
, buf
, len
);
308 int console_open_chan(struct line
*line
, struct console
*co
)
312 err
= open_chan(&line
->chan_list
);
316 printk(KERN_INFO
"Console initialized on /dev/%s%d\n", co
->name
,
321 int chan_window_size(struct line
*line
, unsigned short *rows_out
,
322 unsigned short *cols_out
)
326 chan
= line
->chan_in
;
327 if (chan
&& chan
->primary
) {
328 if (chan
->ops
->window_size
== NULL
)
330 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
333 chan
= line
->chan_out
;
334 if (chan
&& chan
->primary
) {
335 if (chan
->ops
->window_size
== NULL
)
337 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
343 static void free_one_chan(struct chan
*chan
)
345 list_del(&chan
->list
);
347 close_one_chan(chan
, 0);
349 if (chan
->ops
->free
!= NULL
)
350 (*chan
->ops
->free
)(chan
->data
);
352 if (chan
->primary
&& chan
->output
)
353 ignore_sigio_fd(chan
->fd
);
357 static void free_chan(struct list_head
*chans
)
359 struct list_head
*ele
, *next
;
362 list_for_each_safe(ele
, next
, chans
) {
363 chan
= list_entry(ele
, struct chan
, list
);
368 static int one_chan_config_string(struct chan
*chan
, char *str
, int size
,
374 CONFIG_CHUNK(str
, size
, n
, "none", 1);
378 CONFIG_CHUNK(str
, size
, n
, chan
->ops
->type
, 0);
380 if (chan
->dev
== NULL
) {
381 CONFIG_CHUNK(str
, size
, n
, "", 1);
385 CONFIG_CHUNK(str
, size
, n
, ":", 0);
386 CONFIG_CHUNK(str
, size
, n
, chan
->dev
, 0);
391 static int chan_pair_config_string(struct chan
*in
, struct chan
*out
,
392 char *str
, int size
, char **error_out
)
396 n
= one_chan_config_string(in
, str
, size
, error_out
);
401 CONFIG_CHUNK(str
, size
, n
, "", 1);
405 CONFIG_CHUNK(str
, size
, n
, ",", 1);
406 n
= one_chan_config_string(out
, str
, size
, error_out
);
409 CONFIG_CHUNK(str
, size
, n
, "", 1);
414 int chan_config_string(struct line
*line
, char *str
, int size
,
417 struct chan
*in
= line
->chan_in
, *out
= line
->chan_out
;
419 if (in
&& !in
->primary
)
421 if (out
&& !out
->primary
)
424 return chan_pair_config_string(in
, out
, str
, size
, error_out
);
429 const struct chan_ops
*ops
;
432 static const struct chan_type chan_table
[] = {
435 #ifdef CONFIG_NULL_CHAN
436 { "null", &null_ops
},
438 { "null", ¬_configged_ops
},
441 #ifdef CONFIG_PORT_CHAN
442 { "port", &port_ops
},
444 { "port", ¬_configged_ops
},
447 #ifdef CONFIG_PTY_CHAN
451 { "pty", ¬_configged_ops
},
452 { "pts", ¬_configged_ops
},
455 #ifdef CONFIG_TTY_CHAN
458 { "tty", ¬_configged_ops
},
461 #ifdef CONFIG_XTERM_CHAN
462 { "xterm", &xterm_ops
},
464 { "xterm", ¬_configged_ops
},
468 static struct chan
*parse_chan(struct line
*line
, char *str
, int device
,
469 const struct chan_opts
*opts
, char **error_out
)
471 const struct chan_type
*entry
;
472 const struct chan_ops
*ops
;
479 for(i
= 0; i
< ARRAY_SIZE(chan_table
); i
++) {
480 entry
= &chan_table
[i
];
481 if (!strncmp(str
, entry
->key
, strlen(entry
->key
))) {
483 str
+= strlen(entry
->key
);
488 *error_out
= "No match for configured backends";
492 data
= (*ops
->init
)(str
, device
, opts
);
494 *error_out
= "Configuration failed";
498 chan
= kmalloc(sizeof(*chan
), GFP_ATOMIC
);
500 *error_out
= "Memory allocation failed";
503 *chan
= ((struct chan
) { .list
= LIST_HEAD_INIT(chan
->list
),
505 LIST_HEAD_INIT(chan
->free_list
),
518 int parse_chan_pair(char *str
, struct line
*line
, int device
,
519 const struct chan_opts
*opts
, char **error_out
)
521 struct list_head
*chans
= &line
->chan_list
;
525 if (!list_empty(chans
)) {
526 line
->chan_in
= line
->chan_out
= NULL
;
528 INIT_LIST_HEAD(chans
);
534 out
= strchr(str
, ',');
539 new = parse_chan(line
, in
, device
, opts
, error_out
);
544 list_add(&new->list
, chans
);
547 new = parse_chan(line
, out
, device
, opts
, error_out
);
551 list_add(&new->list
, chans
);
553 line
->chan_out
= new;
556 new = parse_chan(line
, str
, device
, opts
, error_out
);
560 list_add(&new->list
, chans
);
563 line
->chan_in
= line
->chan_out
= new;
568 void chan_interrupt(struct line
*line
, struct tty_struct
*tty
, int irq
)
570 struct chan
*chan
= line
->chan_in
;
574 if (!chan
|| !chan
->ops
->read
)
578 if (tty
&& !tty_buffer_request_room(tty
, 1)) {
579 schedule_delayed_work(&line
->task
, 1);
582 err
= chan
->ops
->read(chan
->fd
, &c
, chan
->data
);
584 tty_receive_char(tty
, c
);
588 reactivate_fd(chan
->fd
, irq
);
593 if (line
->chan_out
!= chan
)
594 close_one_chan(line
->chan_out
, 1);
596 close_one_chan(chan
, 1);
602 tty_flip_buffer_push(tty
);