1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
6 #include <linux/slab.h>
8 #include <linux/tty_flip.h>
13 #ifdef CONFIG_NOCONFIG_CHAN
14 static void *not_configged_init(char *str
, int device
,
15 const struct chan_opts
*opts
)
17 printk(KERN_ERR
"Using a channel type which is configured out of "
22 static int not_configged_open(int input
, int output
, int primary
, void *data
,
25 printk(KERN_ERR
"Using a channel type which is configured out of "
30 static void not_configged_close(int fd
, void *data
)
32 printk(KERN_ERR
"Using a channel type which is configured out of "
36 static int not_configged_read(int fd
, char *c_out
, void *data
)
38 printk(KERN_ERR
"Using a channel type which is configured out of "
43 static int not_configged_write(int fd
, const char *buf
, int len
, void *data
)
45 printk(KERN_ERR
"Using a channel type which is configured out of "
50 static int not_configged_console_write(int fd
, const char *buf
, int len
)
52 printk(KERN_ERR
"Using a channel type which is configured out of "
57 static int not_configged_window_size(int fd
, void *data
, unsigned short *rows
,
60 printk(KERN_ERR
"Using a channel type which is configured out of "
65 static void not_configged_free(void *data
)
67 printk(KERN_ERR
"Using a channel type which is configured out of "
71 static const struct chan_ops not_configged_ops
= {
72 .init
= not_configged_init
,
73 .open
= not_configged_open
,
74 .close
= not_configged_close
,
75 .read
= not_configged_read
,
76 .write
= not_configged_write
,
77 .console_write
= not_configged_console_write
,
78 .window_size
= not_configged_window_size
,
79 .free
= not_configged_free
,
82 #endif /* CONFIG_NOCONFIG_CHAN */
84 static int open_one_chan(struct chan
*chan
)
91 if (chan
->ops
->open
== NULL
)
93 else fd
= (*chan
->ops
->open
)(chan
->input
, chan
->output
, chan
->primary
,
94 chan
->data
, &chan
->dev
);
98 err
= os_set_fd_block(fd
, 0);
100 (*chan
->ops
->close
)(fd
, chan
->data
);
110 static int open_chan(struct list_head
*chans
)
112 struct list_head
*ele
;
116 list_for_each(ele
, chans
) {
117 chan
= list_entry(ele
, struct chan
, list
);
118 ret
= open_one_chan(chan
);
125 void chan_enable_winch(struct chan
*chan
, struct tty_port
*port
)
127 if (chan
&& chan
->primary
&& chan
->ops
->winch
)
128 register_winch(chan
->fd
, port
);
131 static void line_timer_cb(struct work_struct
*work
)
133 struct line
*line
= container_of(work
, struct line
, task
.work
);
135 if (!line
->throttled
)
136 chan_interrupt(line
, line
->driver
->read_irq
);
139 int enable_chan(struct line
*line
)
141 struct list_head
*ele
;
145 INIT_DELAYED_WORK(&line
->task
, line_timer_cb
);
147 list_for_each(ele
, &line
->chan_list
) {
148 chan
= list_entry(ele
, struct chan
, list
);
149 err
= open_one_chan(chan
);
159 err
= line_setup_irq(chan
->fd
, chan
->input
, chan
->output
, line
,
174 /* Items are added in IRQ context, when free_irq can't be called, and
175 * removed in process context, when it can.
176 * This handles interrupt sources which disappear, and which need to
177 * be permanently disabled. This is discovered in IRQ context, but
178 * the freeing of the IRQ must be done later.
180 static DEFINE_SPINLOCK(irqs_to_free_lock
);
181 static LIST_HEAD(irqs_to_free
);
187 struct list_head
*ele
;
190 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
191 list_splice_init(&irqs_to_free
, &list
);
192 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
194 list_for_each(ele
, &list
) {
195 chan
= list_entry(ele
, struct chan
, free_list
);
197 if (chan
->input
&& chan
->enabled
)
198 um_free_irq(chan
->line
->driver
->read_irq
, chan
);
199 if (chan
->output
&& chan
->enabled
)
200 um_free_irq(chan
->line
->driver
->write_irq
, chan
);
205 static void close_one_chan(struct chan
*chan
, int delay_free_irq
)
212 if (delay_free_irq
) {
213 spin_lock_irqsave(&irqs_to_free_lock
, flags
);
214 list_add(&chan
->free_list
, &irqs_to_free
);
215 spin_unlock_irqrestore(&irqs_to_free_lock
, flags
);
217 if (chan
->input
&& chan
->enabled
)
218 um_free_irq(chan
->line
->driver
->read_irq
, chan
);
219 if (chan
->output
&& chan
->enabled
)
220 um_free_irq(chan
->line
->driver
->write_irq
, chan
);
223 if (chan
->ops
->close
!= NULL
)
224 (*chan
->ops
->close
)(chan
->fd
, chan
->data
);
230 void close_chan(struct line
*line
)
234 /* Close in reverse order as open in case more than one of them
235 * refers to the same device and they save and restore that device's
236 * state. Then, the first one opened will have the original state,
237 * so it must be the last closed.
239 list_for_each_entry_reverse(chan
, &line
->chan_list
, list
) {
240 close_one_chan(chan
, 0);
244 void deactivate_chan(struct chan
*chan
, int irq
)
246 if (chan
&& chan
->enabled
)
247 deactivate_fd(chan
->fd
, irq
);
250 int write_chan(struct chan
*chan
, const char *buf
, int len
,
255 if (len
== 0 || !chan
|| !chan
->ops
->write
)
258 n
= chan
->ops
->write(chan
->fd
, buf
, len
, chan
->data
);
265 int console_write_chan(struct chan
*chan
, const char *buf
, int len
)
269 if (!chan
|| !chan
->ops
->console_write
)
272 n
= chan
->ops
->console_write(chan
->fd
, buf
, len
);
278 int console_open_chan(struct line
*line
, struct console
*co
)
282 err
= open_chan(&line
->chan_list
);
286 printk(KERN_INFO
"Console initialized on /dev/%s%d\n", co
->name
,
291 int chan_window_size(struct line
*line
, unsigned short *rows_out
,
292 unsigned short *cols_out
)
296 chan
= line
->chan_in
;
297 if (chan
&& chan
->primary
) {
298 if (chan
->ops
->window_size
== NULL
)
300 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
303 chan
= line
->chan_out
;
304 if (chan
&& chan
->primary
) {
305 if (chan
->ops
->window_size
== NULL
)
307 return chan
->ops
->window_size(chan
->fd
, chan
->data
,
313 static void free_one_chan(struct chan
*chan
)
315 list_del(&chan
->list
);
317 close_one_chan(chan
, 0);
319 if (chan
->ops
->free
!= NULL
)
320 (*chan
->ops
->free
)(chan
->data
);
322 if (chan
->primary
&& chan
->output
)
323 ignore_sigio_fd(chan
->fd
);
327 static void free_chan(struct list_head
*chans
)
329 struct list_head
*ele
, *next
;
332 list_for_each_safe(ele
, next
, chans
) {
333 chan
= list_entry(ele
, struct chan
, list
);
338 static int one_chan_config_string(struct chan
*chan
, char *str
, int size
,
344 CONFIG_CHUNK(str
, size
, n
, "none", 1);
348 CONFIG_CHUNK(str
, size
, n
, chan
->ops
->type
, 0);
350 if (chan
->dev
== NULL
) {
351 CONFIG_CHUNK(str
, size
, n
, "", 1);
355 CONFIG_CHUNK(str
, size
, n
, ":", 0);
356 CONFIG_CHUNK(str
, size
, n
, chan
->dev
, 0);
361 static int chan_pair_config_string(struct chan
*in
, struct chan
*out
,
362 char *str
, int size
, char **error_out
)
366 n
= one_chan_config_string(in
, str
, size
, error_out
);
371 CONFIG_CHUNK(str
, size
, n
, "", 1);
375 CONFIG_CHUNK(str
, size
, n
, ",", 1);
376 n
= one_chan_config_string(out
, str
, size
, error_out
);
379 CONFIG_CHUNK(str
, size
, n
, "", 1);
384 int chan_config_string(struct line
*line
, char *str
, int size
,
387 struct chan
*in
= line
->chan_in
, *out
= line
->chan_out
;
389 if (in
&& !in
->primary
)
391 if (out
&& !out
->primary
)
394 return chan_pair_config_string(in
, out
, str
, size
, error_out
);
399 const struct chan_ops
*ops
;
402 static const struct chan_type chan_table
[] = {
405 #ifdef CONFIG_NULL_CHAN
406 { "null", &null_ops
},
408 { "null", ¬_configged_ops
},
411 #ifdef CONFIG_PORT_CHAN
412 { "port", &port_ops
},
414 { "port", ¬_configged_ops
},
417 #ifdef CONFIG_PTY_CHAN
421 { "pty", ¬_configged_ops
},
422 { "pts", ¬_configged_ops
},
425 #ifdef CONFIG_TTY_CHAN
428 { "tty", ¬_configged_ops
},
431 #ifdef CONFIG_XTERM_CHAN
432 { "xterm", &xterm_ops
},
434 { "xterm", ¬_configged_ops
},
438 static struct chan
*parse_chan(struct line
*line
, char *str
, int device
,
439 const struct chan_opts
*opts
, char **error_out
)
441 const struct chan_type
*entry
;
442 const struct chan_ops
*ops
;
449 for(i
= 0; i
< ARRAY_SIZE(chan_table
); i
++) {
450 entry
= &chan_table
[i
];
451 if (!strncmp(str
, entry
->key
, strlen(entry
->key
))) {
453 str
+= strlen(entry
->key
);
458 *error_out
= "No match for configured backends";
462 data
= (*ops
->init
)(str
, device
, opts
);
464 *error_out
= "Configuration failed";
468 chan
= kmalloc(sizeof(*chan
), GFP_ATOMIC
);
470 *error_out
= "Memory allocation failed";
473 *chan
= ((struct chan
) { .list
= LIST_HEAD_INIT(chan
->list
),
475 LIST_HEAD_INIT(chan
->free_list
),
488 int parse_chan_pair(char *str
, struct line
*line
, int device
,
489 const struct chan_opts
*opts
, char **error_out
)
491 struct list_head
*chans
= &line
->chan_list
;
495 if (!list_empty(chans
)) {
496 line
->chan_in
= line
->chan_out
= NULL
;
498 INIT_LIST_HEAD(chans
);
504 out
= strchr(str
, ',');
509 new = parse_chan(line
, in
, device
, opts
, error_out
);
514 list_add(&new->list
, chans
);
517 new = parse_chan(line
, out
, device
, opts
, error_out
);
521 list_add(&new->list
, chans
);
523 line
->chan_out
= new;
526 new = parse_chan(line
, str
, device
, opts
, error_out
);
530 list_add(&new->list
, chans
);
533 line
->chan_in
= line
->chan_out
= new;
538 void chan_interrupt(struct line
*line
, int irq
)
540 struct tty_port
*port
= &line
->port
;
541 struct chan
*chan
= line
->chan_in
;
545 if (!chan
|| !chan
->ops
->read
)
549 if (!tty_buffer_request_room(port
, 1)) {
550 schedule_delayed_work(&line
->task
, 1);
553 err
= chan
->ops
->read(chan
->fd
, &c
, chan
->data
);
555 tty_insert_flip_char(port
, c
, TTY_NORMAL
);
560 tty_port_tty_hangup(&line
->port
, false);
561 if (line
->chan_out
!= chan
)
562 close_one_chan(line
->chan_out
, 1);
564 close_one_chan(chan
, 1);
569 tty_flip_buffer_push(port
);