2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
6 #include <linux/slab.h>
8 #include <linux/tty_flip.h>
13 #ifdef CONFIG_NOCONFIG_CHAN
14 static void *not_configged_init(char *str
, int device
,
15 const struct chan_opts
*opts
)
17 printk(KERN_ERR
"Using a channel type which is configured out of "
22 static int not_configged_open(int input
, int output
, int primary
, void *data
,
25 printk(KERN_ERR
"Using a channel type which is configured out of "
30 static void not_configged_close(int fd
, void *data
)
32 printk(KERN_ERR
"Using a channel type which is configured out of "
36 static int not_configged_read(int fd
, char *c_out
, void *data
)
38 printk(KERN_ERR
"Using a channel type which is configured out of "
43 static int not_configged_write(int fd
, const char *buf
, int len
, void *data
)
45 printk(KERN_ERR
"Using a channel type which is configured out of "
50 static int not_configged_console_write(int fd
, const char *buf
, int len
)
52 printk(KERN_ERR
"Using a channel type which is configured out of "
57 static int not_configged_window_size(int fd
, void *data
, unsigned short *rows
,
60 printk(KERN_ERR
"Using a channel type which is configured out of "
65 static void not_configged_free(void *data
)
67 printk(KERN_ERR
"Using a channel type which is configured out of "
71 static const struct chan_ops not_configged_ops
= {
72 .init
= not_configged_init
,
73 .open
= not_configged_open
,
74 .close
= not_configged_close
,
75 .read
= not_configged_read
,
76 .write
= not_configged_write
,
77 .console_write
= not_configged_console_write
,
78 .window_size
= not_configged_window_size
,
79 .free
= not_configged_free
,
82 #endif /* CONFIG_NOCONFIG_CHAN */
84 static int open_one_chan(struct chan
*chan
)
91 if (chan
->ops
->open
== NULL
)
93 else fd
= (*chan
->ops
->open
)(chan
->input
, chan
->output
, chan
->primary
,
94 chan
->data
, &chan
->dev
);
98 err
= os_set_fd_block(fd
, 0);
100 (*chan
->ops
->close
)(fd
, chan
->data
);
110 static int open_chan(struct list_head
*chans
)
112 struct list_head
*ele
;
116 list_for_each(ele
, chans
) {
117 chan
= list_entry(ele
, struct chan
, list
);
118 ret
= open_one_chan(chan
);
125 void chan_enable_winch(struct chan
*chan
, struct tty_port
*port
)
127 if (chan
&& chan
->primary
&& chan
->ops
->winch
)
128 register_winch(chan
->fd
, port
);
131 static void line_timer_cb(struct work_struct
*work
)
133 struct line
*line
= container_of(work
, struct line
, task
.work
);
135 if (!line
->throttled
)
136 chan_interrupt(line
, line
->driver
->read_irq
);
139 int enable_chan(struct line
*line
)
141 struct list_head
*ele
;
145 INIT_DELAYED_WORK(&line
->task
, line_timer_cb
);
147 list_for_each(ele
, &line
->chan_list
) {
148 chan
= list_entry(ele
, struct chan
, list
);
149 err
= open_one_chan(chan
);
159 err
= line_setup_irq(chan
->fd
, chan
->input
, chan
->output
, line
,
174 /* Items are added in IRQ context, when free_irq can't be called, and
175 * removed in process context, when it can.
176 * This handles interrupt sources which disappear, and which need to
177 * be permanently disabled. This is discovered in IRQ context, but
178 * the freeing of the IRQ must be done later.
180 static DEFINE_SPINLOCK(irqs_to_free_lock
);
181 static LIST_HEAD(irqs_to_free
);
187 struct list_head
*ele
;
190 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
191 list_splice_init(&irqs_to_free
, &list
);
192 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
194 list_for_each(ele
, &list
) {
195 chan
= list_entry(ele
, struct chan
, free_list
);
197 if (chan
->input
&& chan
->enabled
)
198 um_free_irq(chan
->line
->driver
->read_irq
, chan
);
199 if (chan
->output
&& chan
->enabled
)
200 um_free_irq(chan
->line
->driver
->write_irq
, chan
);
205 static void close_one_chan(struct chan
*chan
, int delay_free_irq
)
212 if (delay_free_irq
) {
213 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
214 list_add(&chan
->free_list
, &irqs_to_free
);
215 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
218 if (chan
->input
&& chan
->enabled
)
219 um_free_irq(chan
->line
->driver
->read_irq
, chan
);
220 if (chan
->output
&& chan
->enabled
)
221 um_free_irq(chan
->line
->driver
->write_irq
, chan
);
224 if (chan
->ops
->close
!= NULL
)
225 (*chan
->ops
->close
)(chan
->fd
, chan
->data
);
231 void close_chan(struct line
*line
)
235 /* Close in reverse order as open in case more than one of them
236 * refers to the same device and they save and restore that device's
237 * state. Then, the first one opened will have the original state,
238 * so it must be the last closed.
240 list_for_each_entry_reverse(chan
, &line
->chan_list
, list
) {
241 close_one_chan(chan
, 0);
245 void deactivate_chan(struct chan
*chan
, int irq
)
247 if (chan
&& chan
->enabled
)
248 deactivate_fd(chan
->fd
, irq
);
251 void reactivate_chan(struct chan
*chan
, int irq
)
253 if (chan
&& chan
->enabled
)
254 reactivate_fd(chan
->fd
, irq
);
257 int write_chan(struct chan
*chan
, const char *buf
, int len
,
262 if (len
== 0 || !chan
|| !chan
->ops
->write
)
265 n
= chan
->ops
->write(chan
->fd
, buf
, len
, chan
->data
);
268 if ((ret
== -EAGAIN
) || ((ret
>= 0) && (ret
< len
)))
269 reactivate_fd(chan
->fd
, write_irq
);
274 int console_write_chan(struct chan
*chan
, const char *buf
, int len
)
278 if (!chan
|| !chan
->ops
->console_write
)
281 n
= chan
->ops
->console_write(chan
->fd
, buf
, len
);
287 int console_open_chan(struct line
*line
, struct console
*co
)
291 err
= open_chan(&line
->chan_list
);
295 printk(KERN_INFO
"Console initialized on /dev/%s%d\n", co
->name
,
300 int chan_window_size(struct line
*line
, unsigned short *rows_out
,
301 unsigned short *cols_out
)
305 chan
= line
->chan_in
;
306 if (chan
&& chan
->primary
) {
307 if (chan
->ops
->window_size
== NULL
)
309 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
312 chan
= line
->chan_out
;
313 if (chan
&& chan
->primary
) {
314 if (chan
->ops
->window_size
== NULL
)
316 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
322 static void free_one_chan(struct chan
*chan
)
324 list_del(&chan
->list
);
326 close_one_chan(chan
, 0);
328 if (chan
->ops
->free
!= NULL
)
329 (*chan
->ops
->free
)(chan
->data
);
331 if (chan
->primary
&& chan
->output
)
332 ignore_sigio_fd(chan
->fd
);
336 static void free_chan(struct list_head
*chans
)
338 struct list_head
*ele
, *next
;
341 list_for_each_safe(ele
, next
, chans
) {
342 chan
= list_entry(ele
, struct chan
, list
);
347 static int one_chan_config_string(struct chan
*chan
, char *str
, int size
,
353 CONFIG_CHUNK(str
, size
, n
, "none", 1);
357 CONFIG_CHUNK(str
, size
, n
, chan
->ops
->type
, 0);
359 if (chan
->dev
== NULL
) {
360 CONFIG_CHUNK(str
, size
, n
, "", 1);
364 CONFIG_CHUNK(str
, size
, n
, ":", 0);
365 CONFIG_CHUNK(str
, size
, n
, chan
->dev
, 0);
370 static int chan_pair_config_string(struct chan
*in
, struct chan
*out
,
371 char *str
, int size
, char **error_out
)
375 n
= one_chan_config_string(in
, str
, size
, error_out
);
380 CONFIG_CHUNK(str
, size
, n
, "", 1);
384 CONFIG_CHUNK(str
, size
, n
, ",", 1);
385 n
= one_chan_config_string(out
, str
, size
, error_out
);
388 CONFIG_CHUNK(str
, size
, n
, "", 1);
393 int chan_config_string(struct line
*line
, char *str
, int size
,
396 struct chan
*in
= line
->chan_in
, *out
= line
->chan_out
;
398 if (in
&& !in
->primary
)
400 if (out
&& !out
->primary
)
403 return chan_pair_config_string(in
, out
, str
, size
, error_out
);
408 const struct chan_ops
*ops
;
411 static const struct chan_type chan_table
[] = {
414 #ifdef CONFIG_NULL_CHAN
415 { "null", &null_ops
},
417 { "null", ¬_configged_ops
},
420 #ifdef CONFIG_PORT_CHAN
421 { "port", &port_ops
},
423 { "port", ¬_configged_ops
},
426 #ifdef CONFIG_PTY_CHAN
430 { "pty", ¬_configged_ops
},
431 { "pts", ¬_configged_ops
},
434 #ifdef CONFIG_TTY_CHAN
437 { "tty", ¬_configged_ops
},
440 #ifdef CONFIG_XTERM_CHAN
441 { "xterm", &xterm_ops
},
443 { "xterm", ¬_configged_ops
},
447 static struct chan
*parse_chan(struct line
*line
, char *str
, int device
,
448 const struct chan_opts
*opts
, char **error_out
)
450 const struct chan_type
*entry
;
451 const struct chan_ops
*ops
;
458 for(i
= 0; i
< ARRAY_SIZE(chan_table
); i
++) {
459 entry
= &chan_table
[i
];
460 if (!strncmp(str
, entry
->key
, strlen(entry
->key
))) {
462 str
+= strlen(entry
->key
);
467 *error_out
= "No match for configured backends";
471 data
= (*ops
->init
)(str
, device
, opts
);
473 *error_out
= "Configuration failed";
477 chan
= kmalloc(sizeof(*chan
), GFP_ATOMIC
);
479 *error_out
= "Memory allocation failed";
482 *chan
= ((struct chan
) { .list
= LIST_HEAD_INIT(chan
->list
),
484 LIST_HEAD_INIT(chan
->free_list
),
497 int parse_chan_pair(char *str
, struct line
*line
, int device
,
498 const struct chan_opts
*opts
, char **error_out
)
500 struct list_head
*chans
= &line
->chan_list
;
504 if (!list_empty(chans
)) {
505 line
->chan_in
= line
->chan_out
= NULL
;
507 INIT_LIST_HEAD(chans
);
513 out
= strchr(str
, ',');
518 new = parse_chan(line
, in
, device
, opts
, error_out
);
523 list_add(&new->list
, chans
);
526 new = parse_chan(line
, out
, device
, opts
, error_out
);
530 list_add(&new->list
, chans
);
532 line
->chan_out
= new;
535 new = parse_chan(line
, str
, device
, opts
, error_out
);
539 list_add(&new->list
, chans
);
542 line
->chan_in
= line
->chan_out
= new;
547 void chan_interrupt(struct line
*line
, int irq
)
549 struct tty_port
*port
= &line
->port
;
550 struct chan
*chan
= line
->chan_in
;
554 if (!chan
|| !chan
->ops
->read
)
558 if (!tty_buffer_request_room(port
, 1)) {
559 schedule_delayed_work(&line
->task
, 1);
562 err
= chan
->ops
->read(chan
->fd
, &c
, chan
->data
);
564 tty_insert_flip_char(port
, c
, TTY_NORMAL
);
568 reactivate_fd(chan
->fd
, irq
);
571 tty_port_tty_hangup(&line
->port
, false);
572 if (line
->chan_out
!= chan
)
573 close_one_chan(line
->chan_out
, 1);
575 close_one_chan(chan
, 1);
580 tty_flip_buffer_push(port
);