Linux 2.6.26-rc5
[linux-2.6/openmoko-kernel/knife-kernel.git] / arch / um / os-Linux / sigio.c
blobeb8f2e4be1929c8f731b8349bbf161afea27c751
1 /*
2 * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
6 #include <unistd.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <poll.h>
10 #include <pty.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <string.h>
14 #include "kern_constants.h"
15 #include "kern_util.h"
16 #include "init.h"
17 #include "os.h"
18 #include "process.h"
19 #include "sigio.h"
20 #include "um_malloc.h"
21 #include "user.h"
24 * Protected by sigio_lock(), also used by sigio_cleanup, which is an
25 * exitcall.
27 static int write_sigio_pid = -1;
28 static unsigned long write_sigio_stack;
31 * These arrays are initialized before the sigio thread is started, and
32 * the descriptors closed after it is killed. So, it can't see them change.
33 * On the UML side, they are changed under the sigio_lock.
35 #define SIGIO_FDS_INIT {-1, -1}
37 static int write_sigio_fds[2] = SIGIO_FDS_INIT;
38 static int sigio_private[2] = SIGIO_FDS_INIT;
40 struct pollfds {
41 struct pollfd *poll;
42 int size;
43 int used;
47 * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
48 * synchronizes with it.
50 static struct pollfds current_poll;
51 static struct pollfds next_poll;
52 static struct pollfds all_sigio_fds;
54 static int write_sigio_thread(void *unused)
56 struct pollfds *fds, tmp;
57 struct pollfd *p;
58 int i, n, respond_fd;
59 char c;
61 signal(SIGWINCH, SIG_IGN);
62 fds = &current_poll;
63 while (1) {
64 n = poll(fds->poll, fds->used, -1);
65 if (n < 0) {
66 if (errno == EINTR)
67 continue;
68 printk(UM_KERN_ERR "write_sigio_thread : poll returned "
69 "%d, errno = %d\n", n, errno);
71 for (i = 0; i < fds->used; i++) {
72 p = &fds->poll[i];
73 if (p->revents == 0)
74 continue;
75 if (p->fd == sigio_private[1]) {
76 CATCH_EINTR(n = read(sigio_private[1], &c,
77 sizeof(c)));
78 if (n != sizeof(c))
79 printk(UM_KERN_ERR
80 "write_sigio_thread : "
81 "read on socket failed, "
82 "err = %d\n", errno);
83 tmp = current_poll;
84 current_poll = next_poll;
85 next_poll = tmp;
86 respond_fd = sigio_private[1];
88 else {
89 respond_fd = write_sigio_fds[1];
90 fds->used--;
91 memmove(&fds->poll[i], &fds->poll[i + 1],
92 (fds->used - i) * sizeof(*fds->poll));
95 CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
96 if (n != sizeof(c))
97 printk(UM_KERN_ERR "write_sigio_thread : "
98 "write on socket failed, err = %d\n",
99 errno);
103 return 0;
106 static int need_poll(struct pollfds *polls, int n)
108 struct pollfd *new;
110 if (n <= polls->size)
111 return 0;
113 new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
114 if (new == NULL) {
115 printk(UM_KERN_ERR "need_poll : failed to allocate new "
116 "pollfds\n");
117 return -ENOMEM;
120 memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
121 kfree(polls->poll);
123 polls->poll = new;
124 polls->size = n;
125 return 0;
129 * Must be called with sigio_lock held, because it's needed by the marked
130 * critical section.
132 static void update_thread(void)
134 unsigned long flags;
135 int n;
136 char c;
138 flags = set_signals(0);
139 CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
140 if (n != sizeof(c)) {
141 printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
142 errno);
143 goto fail;
146 CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
147 if (n != sizeof(c)) {
148 printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
149 errno);
150 goto fail;
153 set_signals(flags);
154 return;
155 fail:
156 /* Critical section start */
157 if (write_sigio_pid != -1) {
158 os_kill_process(write_sigio_pid, 1);
159 free_stack(write_sigio_stack, 0);
161 write_sigio_pid = -1;
162 close(sigio_private[0]);
163 close(sigio_private[1]);
164 close(write_sigio_fds[0]);
165 close(write_sigio_fds[1]);
166 /* Critical section end */
167 set_signals(flags);
170 int add_sigio_fd(int fd)
172 struct pollfd *p;
173 int err = 0, i, n;
175 sigio_lock();
176 for (i = 0; i < all_sigio_fds.used; i++) {
177 if (all_sigio_fds.poll[i].fd == fd)
178 break;
180 if (i == all_sigio_fds.used)
181 goto out;
183 p = &all_sigio_fds.poll[i];
185 for (i = 0; i < current_poll.used; i++) {
186 if (current_poll.poll[i].fd == fd)
187 goto out;
190 n = current_poll.used;
191 err = need_poll(&next_poll, n + 1);
192 if (err)
193 goto out;
195 memcpy(next_poll.poll, current_poll.poll,
196 current_poll.used * sizeof(struct pollfd));
197 next_poll.poll[n] = *p;
198 next_poll.used = n + 1;
199 update_thread();
200 out:
201 sigio_unlock();
202 return err;
205 int ignore_sigio_fd(int fd)
207 struct pollfd *p;
208 int err = 0, i, n = 0;
211 * This is called from exitcalls elsewhere in UML - if
212 * sigio_cleanup has already run, then update_thread will hang
213 * or fail because the thread is no longer running.
215 if (write_sigio_pid == -1)
216 return -EIO;
218 sigio_lock();
219 for (i = 0; i < current_poll.used; i++) {
220 if (current_poll.poll[i].fd == fd)
221 break;
223 if (i == current_poll.used)
224 goto out;
226 err = need_poll(&next_poll, current_poll.used - 1);
227 if (err)
228 goto out;
230 for (i = 0; i < current_poll.used; i++) {
231 p = &current_poll.poll[i];
232 if (p->fd != fd)
233 next_poll.poll[n++] = *p;
235 next_poll.used = current_poll.used - 1;
237 update_thread();
238 out:
239 sigio_unlock();
240 return err;
243 static struct pollfd *setup_initial_poll(int fd)
245 struct pollfd *p;
247 p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
248 if (p == NULL) {
249 printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
250 "poll\n");
251 return NULL;
253 *p = ((struct pollfd) { .fd = fd,
254 .events = POLLIN,
255 .revents = 0 });
256 return p;
259 static void write_sigio_workaround(void)
261 struct pollfd *p;
262 int err;
263 int l_write_sigio_fds[2];
264 int l_sigio_private[2];
265 int l_write_sigio_pid;
267 /* We call this *tons* of times - and most ones we must just fail. */
268 sigio_lock();
269 l_write_sigio_pid = write_sigio_pid;
270 sigio_unlock();
272 if (l_write_sigio_pid != -1)
273 return;
275 err = os_pipe(l_write_sigio_fds, 1, 1);
276 if (err < 0) {
277 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
278 "err = %d\n", -err);
279 return;
281 err = os_pipe(l_sigio_private, 1, 1);
282 if (err < 0) {
283 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
284 "err = %d\n", -err);
285 goto out_close1;
288 p = setup_initial_poll(l_sigio_private[1]);
289 if (!p)
290 goto out_close2;
292 sigio_lock();
295 * Did we race? Don't try to optimize this, please, it's not so likely
296 * to happen, and no more than once at the boot.
298 if (write_sigio_pid != -1)
299 goto out_free;
301 current_poll = ((struct pollfds) { .poll = p,
302 .used = 1,
303 .size = 1 });
305 if (write_sigio_irq(l_write_sigio_fds[0]))
306 goto out_clear_poll;
308 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
309 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
311 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
312 CLONE_FILES | CLONE_VM,
313 &write_sigio_stack);
315 if (write_sigio_pid < 0)
316 goto out_clear;
318 sigio_unlock();
319 return;
321 out_clear:
322 write_sigio_pid = -1;
323 write_sigio_fds[0] = -1;
324 write_sigio_fds[1] = -1;
325 sigio_private[0] = -1;
326 sigio_private[1] = -1;
327 out_clear_poll:
328 current_poll = ((struct pollfds) { .poll = NULL,
329 .size = 0,
330 .used = 0 });
331 out_free:
332 sigio_unlock();
333 kfree(p);
334 out_close2:
335 close(l_sigio_private[0]);
336 close(l_sigio_private[1]);
337 out_close1:
338 close(l_write_sigio_fds[0]);
339 close(l_write_sigio_fds[1]);
342 void sigio_broken(int fd, int read)
344 int err;
346 write_sigio_workaround();
348 sigio_lock();
349 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
350 if (err) {
351 printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
352 "for descriptor %d\n", fd);
353 goto out;
356 all_sigio_fds.poll[all_sigio_fds.used++] =
357 ((struct pollfd) { .fd = fd,
358 .events = read ? POLLIN : POLLOUT,
359 .revents = 0 });
360 out:
361 sigio_unlock();
364 /* Changed during early boot */
365 static int pty_output_sigio;
366 static int pty_close_sigio;
368 void maybe_sigio_broken(int fd, int read)
370 if (!isatty(fd))
371 return;
373 if ((read || pty_output_sigio) && (!read || pty_close_sigio))
374 return;
376 sigio_broken(fd, read);
379 static void sigio_cleanup(void)
381 if (write_sigio_pid == -1)
382 return;
384 os_kill_process(write_sigio_pid, 1);
385 free_stack(write_sigio_stack, 0);
386 write_sigio_pid = -1;
389 __uml_exitcall(sigio_cleanup);
391 /* Used as a flag during SIGIO testing early in boot */
392 static int got_sigio;
394 static void __init handler(int sig)
396 got_sigio = 1;
399 struct openpty_arg {
400 int master;
401 int slave;
402 int err;
405 static void openpty_cb(void *arg)
407 struct openpty_arg *info = arg;
409 info->err = 0;
410 if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
411 info->err = -errno;
414 static int async_pty(int master, int slave)
416 int flags;
418 flags = fcntl(master, F_GETFL);
419 if (flags < 0)
420 return -errno;
422 if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
423 (fcntl(master, F_SETOWN, os_getpid()) < 0))
424 return -errno;
426 if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
427 return -errno;
429 return 0;
432 static void __init check_one_sigio(void (*proc)(int, int))
434 struct sigaction old, new;
435 struct openpty_arg pty = { .master = -1, .slave = -1 };
436 int master, slave, err;
438 initial_thread_cb(openpty_cb, &pty);
439 if (pty.err) {
440 printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
441 -pty.err);
442 return;
445 master = pty.master;
446 slave = pty.slave;
448 if ((master == -1) || (slave == -1)) {
449 printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
450 "pty\n");
451 return;
454 /* Not now, but complain so we now where we failed. */
455 err = raw(master);
456 if (err < 0) {
457 printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
458 -err);
459 return;
462 err = async_pty(master, slave);
463 if (err < 0) {
464 printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
465 "err = %d\n", -err);
466 return;
469 if (sigaction(SIGIO, NULL, &old) < 0) {
470 printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
471 "errno = %d\n", errno);
472 return;
475 new = old;
476 new.sa_handler = handler;
477 if (sigaction(SIGIO, &new, NULL) < 0) {
478 printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
479 "errno = %d\n", errno);
480 return;
483 got_sigio = 0;
484 (*proc)(master, slave);
486 close(master);
487 close(slave);
489 if (sigaction(SIGIO, &old, NULL) < 0)
490 printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
491 "errno = %d\n", errno);
494 static void tty_output(int master, int slave)
496 int n;
497 char buf[512];
499 printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
501 memset(buf, 0, sizeof(buf));
503 while (write(master, buf, sizeof(buf)) > 0) ;
504 if (errno != EAGAIN)
505 printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
506 errno);
507 while (((n = read(slave, buf, sizeof(buf))) > 0) &&
508 !({ barrier(); got_sigio; }))
511 if (got_sigio) {
512 printk(UM_KERN_CONT "Yes\n");
513 pty_output_sigio = 1;
514 } else if (n == -EAGAIN)
515 printk(UM_KERN_CONT "No, enabling workaround\n");
516 else
517 printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
520 static void tty_close(int master, int slave)
522 printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
523 "close...");
525 close(slave);
526 if (got_sigio) {
527 printk(UM_KERN_CONT "Yes\n");
528 pty_close_sigio = 1;
529 } else
530 printk(UM_KERN_CONT "No, enabling workaround\n");
533 void __init check_sigio(void)
535 if ((access("/dev/ptmx", R_OK) < 0) &&
536 (access("/dev/ptyp0", R_OK) < 0)) {
537 printk(UM_KERN_WARNING "No pseudo-terminals available - "
538 "skipping pty SIGIO check\n");
539 return;
541 check_one_sigio(tty_output);
542 check_one_sigio(tty_close);
545 /* Here because it only does the SIGIO testing for now */
546 void __init os_check_bugs(void)
548 check_sigio();