bfin: remove inline keyword
[xenomai-head.git] / ksrc / nucleus / pipe.c
blobdf587ab3b443be1ba557dbd7d57003f2737e48fb
1 /*
2 * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
3 * Copyright (C) 2005 Dmitry Adamushko <dmitry.adamushko@gmail.com>
5 * Xenomai is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published
7 * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA
8 * 02139, USA; either version 2 of the License, or (at your option)
9 * any later version.
11 * Xenomai is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
19 * 02111-1307, USA.
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/init.h>
25 #include <linux/fcntl.h>
26 #include <linux/poll.h>
27 #include <linux/termios.h>
28 #include <linux/spinlock.h>
29 #include <linux/device.h>
30 #include <asm/io.h>
31 #include <asm/uaccess.h>
32 #include <nucleus/pod.h>
33 #include <nucleus/heap.h>
34 #include <nucleus/pipe.h>
36 static int xnpipe_asyncsig = SIGIO;
38 struct xnpipe_state xnpipe_states[XNPIPE_NDEVS];
40 #define XNPIPE_BITMAP_SIZE ((XNPIPE_NDEVS + BITS_PER_LONG - 1) / BITS_PER_LONG)
41 static unsigned long xnpipe_bitmap[XNPIPE_BITMAP_SIZE];
43 struct xnqueue xnpipe_sleepq, xnpipe_asyncq;
45 int xnpipe_wakeup_apc;
47 static DECLARE_DEVCLASS(xnpipe_class);
49 /* Allocation of minor values */
51 static inline int xnpipe_minor_alloc(int minor)
53 spl_t s;
55 if ((minor < 0 && minor != XNPIPE_MINOR_AUTO) || minor >= XNPIPE_NDEVS)
56 return -ENODEV;
58 xnlock_get_irqsave(&nklock, s);
60 if (minor == XNPIPE_MINOR_AUTO)
61 minor = find_first_zero_bit(xnpipe_bitmap, XNPIPE_NDEVS);
63 if (minor == XNPIPE_NDEVS ||
64 testbits(xnpipe_bitmap[minor / BITS_PER_LONG],
65 1UL << (minor % BITS_PER_LONG)))
66 minor = -EBUSY;
67 else
68 __setbits(xnpipe_bitmap[minor / BITS_PER_LONG],
69 1UL << (minor % BITS_PER_LONG));
71 xnlock_put_irqrestore(&nklock, s);
73 return minor;
76 static inline void xnpipe_minor_free(int minor)
78 __clrbits(xnpipe_bitmap[minor / BITS_PER_LONG],
79 1UL << (minor % BITS_PER_LONG));
82 static inline void xnpipe_enqueue_wait(struct xnpipe_state *state, int mask)
84 if (state->wcount != 0x7fffffff && state->wcount++ == 0)
85 appendq(&xnpipe_sleepq, &state->slink);
87 __setbits(state->status, mask);
90 static inline void xnpipe_dequeue_wait(struct xnpipe_state *state, int mask)
92 if (testbits(state->status, mask))
93 if (--state->wcount == 0) {
94 removeq(&xnpipe_sleepq, &state->slink);
95 __clrbits(state->status, mask);
99 static inline void xnpipe_dequeue_all(struct xnpipe_state *state, int mask)
101 if (testbits(state->status, mask)) {
102 if (state->wcount) {
103 state->wcount = 0;
104 removeq(&xnpipe_sleepq, &state->slink);
105 __clrbits(state->status, mask);
110 /* Must be entered with nklock held, interrupts off. */
111 #define xnpipe_wait(__state, __mask, __s, __cond) \
112 ({ \
113 wait_queue_head_t *__waitq; \
114 DEFINE_WAIT(__wait); \
115 int __sigpending; \
117 if ((__mask) & XNPIPE_USER_WREAD) \
118 __waitq = &(__state)->readq; \
119 else \
120 __waitq = &(__state)->syncq; \
122 xnpipe_enqueue_wait(__state, __mask); \
123 xnlock_put_irqrestore(&nklock, __s); \
125 for (;;) { \
126 __sigpending = signal_pending(current); \
127 if (__sigpending) \
128 break; \
129 prepare_to_wait_exclusive(__waitq, &__wait, TASK_INTERRUPTIBLE); \
130 if (__cond) \
131 break; \
132 schedule(); \
135 finish_wait(__waitq, &__wait); \
137 /* Restore the interrupt state initially set by the caller. */ \
138 xnlock_get_irqsave(&nklock, __s); \
139 xnpipe_dequeue_wait(__state, __mask); \
141 __sigpending; \
144 static void xnpipe_wakeup_proc(void *cookie)
146 struct xnpipe_state *state;
147 struct xnholder *h, *nh;
148 u_long rbits;
149 spl_t s;
151 xnlock_get_irqsave(&nklock, s);
153 nh = getheadq(&xnpipe_sleepq);
154 while ((h = nh) != NULL) {
155 nh = nextq(&xnpipe_sleepq, h);
156 state = link2xnpipe(h, slink);
157 rbits = testbits(state->status, XNPIPE_USER_ALL_READY);
158 if (rbits) {
159 __clrbits(state->status, rbits);
161 * We could be switched out as a result of
162 * waking up a waiter, so we need the
163 * housekeeping and release the nklock before
164 * calling wake_up_interruptible().
166 if ((rbits & XNPIPE_USER_WREAD_READY) != 0) {
167 if (waitqueue_active(&state->readq)) {
168 xnlock_put_irqrestore(&nklock, s);
169 wake_up_interruptible(&state->readq);
170 xnlock_get_irqsave(&nklock, s);
173 if ((rbits & XNPIPE_USER_WSYNC_READY) != 0) {
174 if (waitqueue_active(&state->syncq)) {
175 xnlock_put_irqrestore(&nklock, s);
176 wake_up_interruptible(&state->syncq);
177 xnlock_get_irqsave(&nklock, s);
180 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
182 * Assume a waiter might have entered/left the
183 * queue, so we need to refetch the sleep
184 * queue head to be safe.
186 nh = getheadq(&xnpipe_sleepq);
187 #endif
192 * Scan the async queue, sending the proper signal to
193 * subscribers.
195 nh = getheadq(&xnpipe_asyncq);
196 while ((h = nh) != NULL) {
197 nh = nextq(&xnpipe_asyncq, h);
198 state = link2xnpipe(h, alink);
200 if (testbits(state->status, XNPIPE_USER_SIGIO)) {
201 __clrbits(state->status, XNPIPE_USER_SIGIO);
202 xnlock_put_irqrestore(&nklock, s);
203 kill_fasync(&state->asyncq, xnpipe_asyncsig, POLL_IN);
204 xnlock_get_irqsave(&nklock, s);
205 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
206 nh = getheadq(&xnpipe_asyncq);
207 #endif
211 xnlock_put_irqrestore(&nklock, s);
214 static inline void xnpipe_schedule_request(void) /* hw IRQs off */
216 __rthal_apc_schedule(xnpipe_wakeup_apc);
219 static inline ssize_t xnpipe_flush_bufq(void (*fn)(void *buf, void *xstate),
220 struct xnqueue *q,
221 void *xstate)
223 struct xnpipe_mh *mh;
224 struct xnholder *h;
225 ssize_t n = 0;
227 /* Queue is private, no locking is required. */
228 while ((h = getq(q)) != NULL) {
229 mh = link2mh(h);
230 n += xnpipe_m_size(mh);
231 fn(mh, xstate);
234 /* We must return the overall count of bytes flushed. */
235 return n;
239 * Move the specified queue contents to a private queue, then call the
240 * flush handler to purge it. The latter is run without locking.
241 * Returns the number of bytes flushed. Must be entered with nklock
242 * held, interrupts off.
244 #define xnpipe_flushq(__state, __q, __f, __s) \
245 ({ \
246 struct xnqueue __privq; \
247 ssize_t n; \
249 initq(&__privq); \
250 moveq(&__privq, &(state)->__q); \
251 xnlock_put_irqrestore(&nklock, (__s)); \
252 n = xnpipe_flush_bufq((__state)->ops.__f, &__privq, (__state)->xstate); \
253 xnlock_get_irqsave(&nklock, (__s)); \
255 n; \
258 static void *xnpipe_default_alloc_ibuf(size_t size, void *xstate)
260 void *buf;
262 buf = xnmalloc(size);
263 if (likely(buf != NULL))
264 return buf;
266 if (size > xnheap_max_contiguous(&kheap))
267 /* Request will never succeed. */
268 return (struct xnpipe_mh *)-1;
270 return NULL;
273 static void xnpipe_default_free_ibuf(void *buf, void *xstate)
275 xnfree(buf);
278 static void xnpipe_default_release(void *xstate)
282 static inline int xnpipe_set_ops(struct xnpipe_state *state,
283 struct xnpipe_operations *ops)
285 state->ops = *ops;
287 if (ops->free_obuf == NULL)
289 * Caller must provide a way to free unread outgoing
290 * buffers.
292 return -EINVAL;
294 /* Set some default handlers for common usage. */
295 if (ops->alloc_ibuf == NULL)
296 state->ops.alloc_ibuf = xnpipe_default_alloc_ibuf;
297 if (ops->free_ibuf == NULL)
298 state->ops.free_ibuf = xnpipe_default_free_ibuf;
299 if (ops->release == NULL)
300 state->ops.release = xnpipe_default_release;
302 return 0;
305 int xnpipe_connect(int minor, struct xnpipe_operations *ops, void *xstate)
307 struct xnpipe_state *state;
308 int need_sched = 0, ret;
309 spl_t s;
311 minor = xnpipe_minor_alloc(minor);
312 if (minor < 0)
313 return minor;
315 state = &xnpipe_states[minor];
317 xnlock_get_irqsave(&nklock, s);
319 ret = xnpipe_set_ops(state, ops);
320 if (ret) {
321 xnlock_put_irqrestore(&nklock, s);
322 return ret;
325 __setbits(state->status, XNPIPE_KERN_CONN);
326 xnsynch_init(&state->synchbase, XNSYNCH_FIFO, NULL);
327 state->xstate = xstate;
328 state->ionrd = 0;
330 if (testbits(state->status, XNPIPE_USER_CONN)) {
331 if (testbits(state->status, XNPIPE_USER_WREAD)) {
333 * Wake up the regular Linux task waiting for
334 * the kernel side to connect (xnpipe_open).
336 __setbits(state->status, XNPIPE_USER_WREAD_READY);
337 need_sched = 1;
340 if (state->asyncq) { /* Schedule asynch sig. */
341 __setbits(state->status, XNPIPE_USER_SIGIO);
342 need_sched = 1;
346 if (need_sched)
347 xnpipe_schedule_request();
349 xnlock_put_irqrestore(&nklock, s);
351 return minor;
353 EXPORT_SYMBOL_GPL(xnpipe_connect);
355 int xnpipe_disconnect(int minor)
357 struct xnpipe_state *state;
358 int need_sched = 0;
359 spl_t s;
361 if (minor < 0 || minor >= XNPIPE_NDEVS)
362 return -ENODEV;
364 state = &xnpipe_states[minor];
366 xnlock_get_irqsave(&nklock, s);
368 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
369 xnlock_put_irqrestore(&nklock, s);
370 return -EBADF;
373 __clrbits(state->status, XNPIPE_KERN_CONN);
375 state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);
377 if (!testbits(state->status, XNPIPE_USER_CONN))
378 goto cleanup;
380 xnpipe_flushq(state, inq, free_ibuf, s);
382 if (xnsynch_destroy(&state->synchbase) == XNSYNCH_RESCHED)
383 xnpod_schedule();
385 if (testbits(state->status, XNPIPE_USER_WREAD)) {
387 * Wake up the regular Linux task waiting for some
388 * operation from the Xenomai side (read/write or
389 * poll).
391 __setbits(state->status, XNPIPE_USER_WREAD_READY);
392 need_sched = 1;
395 if (state->asyncq) { /* Schedule asynch sig. */
396 __setbits(state->status, XNPIPE_USER_SIGIO);
397 need_sched = 1;
400 cleanup:
402 * If xnpipe_release() has not fully run, enter lingering
403 * close. This will prevent the extra state from being wiped
404 * out until then.
406 if (testbits(state->status, XNPIPE_USER_CONN))
407 __setbits(state->status, XNPIPE_KERN_LCLOSE);
408 else {
409 xnlock_put_irqrestore(&nklock, s);
410 state->ops.release(state->xstate);
411 xnlock_get_irqsave(&nklock, s);
412 xnpipe_minor_free(minor);
415 if (need_sched)
416 xnpipe_schedule_request();
418 xnlock_put_irqrestore(&nklock, s);
420 return 0;
422 EXPORT_SYMBOL_GPL(xnpipe_disconnect);
424 ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t size, int flags)
426 struct xnpipe_state *state;
427 int need_sched = 0;
428 spl_t s;
430 if (minor < 0 || minor >= XNPIPE_NDEVS)
431 return -ENODEV;
433 if (size <= sizeof(*mh))
434 return -EINVAL;
436 state = &xnpipe_states[minor];
438 xnlock_get_irqsave(&nklock, s);
440 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
441 xnlock_put_irqrestore(&nklock, s);
442 return -EBADF;
445 inith(xnpipe_m_link(mh));
446 xnpipe_m_size(mh) = size - sizeof(*mh);
447 xnpipe_m_rdoff(mh) = 0;
448 state->ionrd += xnpipe_m_size(mh);
450 if (flags & XNPIPE_URGENT)
451 prependq(&state->outq, xnpipe_m_link(mh));
452 else
453 appendq(&state->outq, xnpipe_m_link(mh));
455 if (!testbits(state->status, XNPIPE_USER_CONN)) {
456 xnlock_put_irqrestore(&nklock, s);
457 return (ssize_t) size;
460 if (testbits(state->status, XNPIPE_USER_WREAD)) {
462 * Wake up the regular Linux task waiting for input
463 * from the Xenomai side.
465 __setbits(state->status, XNPIPE_USER_WREAD_READY);
466 need_sched = 1;
469 if (state->asyncq) { /* Schedule asynch sig. */
470 __setbits(state->status, XNPIPE_USER_SIGIO);
471 need_sched = 1;
474 if (need_sched)
475 xnpipe_schedule_request();
477 xnlock_put_irqrestore(&nklock, s);
479 return (ssize_t) size;
481 EXPORT_SYMBOL_GPL(xnpipe_send);
483 ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size)
485 struct xnpipe_state *state;
486 spl_t s;
488 if (minor < 0 || minor >= XNPIPE_NDEVS)
489 return -ENODEV;
491 if (size < 0)
492 return -EINVAL;
494 state = &xnpipe_states[minor];
496 xnlock_get_irqsave(&nklock, s);
498 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
499 xnlock_put_irqrestore(&nklock, s);
500 return -EBADF;
503 xnpipe_m_size(mh) += size;
504 state->ionrd += size;
506 xnlock_put_irqrestore(&nklock, s);
508 return (ssize_t) size;
510 EXPORT_SYMBOL_GPL(xnpipe_mfixup);
512 ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, xnticks_t timeout)
514 struct xnpipe_state *state;
515 struct xnholder *h;
516 xnthread_t *curr;
517 ssize_t ret;
518 spl_t s;
520 if (minor < 0 || minor >= XNPIPE_NDEVS)
521 return -ENODEV;
523 if (xnpod_asynch_p())
524 return -EPERM;
526 state = &xnpipe_states[minor];
528 xnlock_get_irqsave(&nklock, s);
530 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
531 ret = -EBADF;
532 goto unlock_and_exit;
535 curr = xnpod_current_thread();
537 while ((h = getq(&state->inq)) == NULL) {
538 if (timeout == XN_NONBLOCK) {
539 ret = -EWOULDBLOCK;
540 goto unlock_and_exit;
543 xnsynch_sleep_on(&state->synchbase, timeout, XN_RELATIVE);
545 if (xnthread_test_info(curr, XNTIMEO)) {
546 ret = -ETIMEDOUT;
547 goto unlock_and_exit;
549 if (xnthread_test_info(curr, XNBREAK)) {
550 ret = -EINTR;
551 goto unlock_and_exit;
553 if (xnthread_test_info(curr, XNRMID)) {
554 ret = -EIDRM;
555 goto unlock_and_exit;
558 /* remaining timeout */
559 timeout = xnthread_timeout(curr);
562 *pmh = link2mh(h);
564 ret = (ssize_t) xnpipe_m_size(*pmh);
566 if (testbits(state->status, XNPIPE_USER_WSYNC)) {
567 __setbits(state->status, XNPIPE_USER_WSYNC_READY);
568 xnpipe_schedule_request();
571 unlock_and_exit:
573 xnlock_put_irqrestore(&nklock, s);
575 return ret;
577 EXPORT_SYMBOL_GPL(xnpipe_recv);
579 int xnpipe_flush(int minor, int mode)
581 struct xnpipe_state *state;
582 int msgcount;
583 spl_t s;
585 if (minor < 0 || minor >= XNPIPE_NDEVS)
586 return -ENODEV;
588 state = &xnpipe_states[minor];
590 xnlock_get_irqsave(&nklock, s);
592 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
593 xnlock_put_irqrestore(&nklock, s);
594 return -EBADF;
597 msgcount = countq(&state->outq) + countq(&state->inq);
599 if (mode & XNPIPE_OFLUSH)
600 state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);
602 if (mode & XNPIPE_IFLUSH)
603 xnpipe_flushq(state, inq, free_ibuf, s);
605 if (testbits(state->status, XNPIPE_USER_WSYNC) &&
606 msgcount > countq(&state->outq) + countq(&state->inq)) {
607 __setbits(state->status, XNPIPE_USER_WSYNC_READY);
608 xnpipe_schedule_request();
611 xnlock_put_irqrestore(&nklock, s);
613 return 0;
615 EXPORT_SYMBOL_GPL(xnpipe_flush);
617 /* Must be entered with nklock held, interrupts off. */
618 #define xnpipe_cleanup_user_conn(__state, __s) \
619 do { \
620 xnpipe_flushq((__state), outq, free_obuf, (__s)); \
621 xnpipe_flushq((__state), inq, free_ibuf, (__s)); \
622 __clrbits((__state)->status, XNPIPE_USER_CONN); \
623 if (testbits((__state)->status, XNPIPE_KERN_LCLOSE)) { \
624 clrbits((__state)->status, XNPIPE_KERN_LCLOSE); \
625 xnlock_put_irqrestore(&nklock, (__s)); \
626 (__state)->ops.release((__state)->xstate); \
627 xnlock_get_irqsave(&nklock, (__s)); \
628 xnpipe_minor_free(xnminor_from_state(__state)); \
630 } while(0)
633 * Open the pipe from user-space.
636 static int xnpipe_open(struct inode *inode, struct file *file)
638 int minor, err = 0, sigpending;
639 struct xnpipe_state *state;
640 spl_t s;
642 minor = MINOR(inode->i_rdev);
644 if (minor >= XNPIPE_NDEVS)
645 return -ENXIO; /* TssTss... stop playing with mknod() ;o) */
647 state = &xnpipe_states[minor];
649 xnlock_get_irqsave(&nklock, s);
651 /* Enforce exclusive open for the message queues. */
652 if (testbits(state->status, XNPIPE_USER_CONN)) {
653 xnlock_put_irqrestore(&nklock, s);
654 return -EBUSY;
657 __setbits(state->status, XNPIPE_USER_CONN);
659 file->private_data = state;
660 init_waitqueue_head(&state->readq);
661 init_waitqueue_head(&state->syncq);
662 state->wcount = 0;
664 __clrbits(state->status,
665 XNPIPE_USER_ALL_WAIT | XNPIPE_USER_ALL_READY |
666 XNPIPE_USER_SIGIO);
668 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
669 if (testbits(file->f_flags, O_NONBLOCK)) {
670 xnpipe_cleanup_user_conn(state, s);
671 xnlock_put_irqrestore(&nklock, s);
672 return -EWOULDBLOCK;
675 sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
676 testbits(state->status,
677 XNPIPE_KERN_CONN));
678 if (sigpending) {
679 xnpipe_cleanup_user_conn(state, s);
680 xnlock_put_irqrestore(&nklock, s);
681 return -ERESTARTSYS;
685 if (err)
686 xnpipe_cleanup_user_conn(state, s);
688 xnlock_put_irqrestore(&nklock, s);
690 return err;
693 static int xnpipe_release(struct inode *inode, struct file *file)
695 struct xnpipe_state *state = file->private_data;
696 spl_t s;
698 xnlock_get_irqsave(&nklock, s);
700 xnpipe_dequeue_all(state, XNPIPE_USER_WREAD);
701 xnpipe_dequeue_all(state, XNPIPE_USER_WSYNC);
703 if (testbits(state->status, XNPIPE_KERN_CONN)) {
704 /* Unblock waiters. */
705 if (xnsynch_nsleepers(&state->synchbase) > 0) {
706 xnsynch_flush(&state->synchbase, XNRMID);
707 xnpod_schedule();
711 if (state->ops.input)
712 state->ops.input(NULL, -EPIPE, state->xstate);
714 if (state->asyncq) { /* Clear the async queue */
715 removeq(&xnpipe_asyncq, &state->alink);
716 __clrbits(state->status, XNPIPE_USER_SIGIO);
717 xnlock_put_irqrestore(&nklock, s);
718 fasync_helper(-1, file, 0, &state->asyncq);
719 xnlock_get_irqsave(&nklock, s);
722 xnpipe_cleanup_user_conn(state, s);
724 * The extra state may not be available from now on, if
725 * xnpipe_disconnect() entered lingering close before we got
726 * there; so calling xnpipe_cleanup_user_conn() should be the
727 * last thing we do.
729 xnlock_put_irqrestore(&nklock, s);
731 return 0;
734 static ssize_t xnpipe_read(struct file *file,
735 char *buf, size_t count, loff_t *ppos)
737 struct xnpipe_state *state = file->private_data;
738 int sigpending, err = 0;
739 size_t nbytes, inbytes;
740 struct xnpipe_mh *mh;
741 struct xnholder *h;
742 ssize_t ret;
743 spl_t s;
745 if (!access_ok(VERIFY_WRITE, buf, count))
746 return -EFAULT;
748 xnlock_get_irqsave(&nklock, s);
750 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
751 xnlock_put_irqrestore(&nklock, s);
752 return -EPIPE;
755 * Queue probe and proc enqueuing must be seen atomically,
756 * including from the Xenomai side.
758 h = getq(&state->outq);
759 mh = link2mh(h);
761 if (mh == NULL) {
762 if (file->f_flags & O_NONBLOCK) {
763 xnlock_put_irqrestore(&nklock, s);
764 return -EWOULDBLOCK;
767 sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
768 !emptyq_p(&state->outq));
769 h = getq(&state->outq);
770 mh = link2mh(h);
772 if (mh == NULL) {
773 xnlock_put_irqrestore(&nklock, s);
774 return sigpending ? -ERESTARTSYS : 0;
779 * We allow more data to be appended to the current message
780 * bucket while its contents is being copied to the user
781 * buffer, therefore, we need to loop until: 1) all the data
782 * has been copied, 2) we consumed the user buffer space
783 * entirely.
786 inbytes = 0;
788 for (;;) {
789 nbytes = xnpipe_m_size(mh) - xnpipe_m_rdoff(mh);
791 if (nbytes + inbytes > count)
792 nbytes = count - inbytes;
794 if (nbytes == 0)
795 break;
797 xnlock_put_irqrestore(&nklock, s);
798 /* More data could be appended while doing this: */
799 err =
800 __copy_to_user(buf + inbytes,
801 xnpipe_m_data(mh) + xnpipe_m_rdoff(mh),
802 nbytes);
803 xnlock_get_irqsave(&nklock, s);
805 if (err) {
806 err = -EFAULT;
807 break;
810 inbytes += nbytes;
811 xnpipe_m_rdoff(mh) += nbytes;
814 state->ionrd -= inbytes;
815 ret = inbytes;
817 if (xnpipe_m_size(mh) > xnpipe_m_rdoff(mh))
818 prependq(&state->outq, &mh->link);
819 else {
821 * We always want to fire the output handler because
822 * whatever the error state is for userland (e.g
823 * -EFAULT), we did pull a message from our output
824 * queue.
826 if (state->ops.output)
827 state->ops.output(mh, state->xstate);
828 xnlock_put_irqrestore(&nklock, s);
829 state->ops.free_obuf(mh, state->xstate);
830 xnlock_get_irqsave(&nklock, s);
831 if (testbits(state->status, XNPIPE_USER_WSYNC)) {
832 __setbits(state->status, XNPIPE_USER_WSYNC_READY);
833 xnpipe_schedule_request();
837 xnlock_put_irqrestore(&nklock, s);
839 return err ? : ret;
842 static ssize_t xnpipe_write(struct file *file,
843 const char *buf, size_t count, loff_t *ppos)
845 struct xnpipe_state *state = file->private_data;
846 struct xnpipe_mh *mh;
847 int pollnum, ret;
848 spl_t s;
850 if (count == 0)
851 return 0;
853 if (!access_ok(VERIFY_READ, buf, count))
854 return -EFAULT;
856 xnlock_get_irqsave(&nklock, s);
858 retry:
860 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
861 xnlock_put_irqrestore(&nklock, s);
862 return -EPIPE;
865 pollnum = countq(&state->inq) + countq(&state->outq);
866 xnlock_put_irqrestore(&nklock, s);
868 mh = state->ops.alloc_ibuf(count + sizeof(*mh), state->xstate);
869 if (mh == (struct xnpipe_mh *)-1)
870 return -ENOMEM;
872 if (mh == NULL) {
873 if (file->f_flags & O_NONBLOCK)
874 return -EWOULDBLOCK;
876 xnlock_get_irqsave(&nklock, s);
877 if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
878 pollnum >
879 countq(&state->inq) + countq(&state->outq))) {
880 xnlock_put_irqrestore(&nklock, s);
881 return -ERESTARTSYS;
883 goto retry;
886 inith(xnpipe_m_link(mh));
887 xnpipe_m_size(mh) = count;
888 xnpipe_m_rdoff(mh) = 0;
890 if (copy_from_user(xnpipe_m_data(mh), buf, count)) {
891 state->ops.free_ibuf(mh, state->xstate);
892 return -EFAULT;
895 xnlock_get_irqsave(&nklock, s);
897 appendq(&state->inq, &mh->link);
899 /* Wake up a Xenomai sleeper if any. */
900 if (xnsynch_wakeup_one_sleeper(&state->synchbase))
901 xnpod_schedule();
903 if (state->ops.input) {
904 ret = state->ops.input(mh, 0, state->xstate);
905 if (ret)
906 count = (size_t)ret;
909 if (file->f_flags & O_SYNC) {
910 if (!emptyq_p(&state->inq)) {
911 if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
912 emptyq_p(&state->inq)))
913 count = -ERESTARTSYS;
917 xnlock_put_irqrestore(&nklock, s);
919 return (ssize_t)count;
922 static DECLARE_IOCTL_HANDLER(xnpipe_ioctl, file, cmd, arg)
924 struct xnpipe_state *state = file->private_data;
925 int ret = 0;
926 ssize_t n;
927 spl_t s;
929 switch (cmd) {
930 case XNPIPEIOC_GET_NRDEV:
932 if (put_user(XNPIPE_NDEVS, (int *)arg))
933 return -EFAULT;
935 break;
937 case XNPIPEIOC_OFLUSH:
939 xnlock_get_irqsave(&nklock, s);
941 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
942 xnlock_put_irqrestore(&nklock, s);
943 return -EPIPE;
946 n = xnpipe_flushq(state, outq, free_obuf, s);
947 state->ionrd -= n;
948 goto kick_wsync;
950 case XNPIPEIOC_IFLUSH:
952 xnlock_get_irqsave(&nklock, s);
954 if (!testbits(state->status, XNPIPE_KERN_CONN)) {
955 xnlock_put_irqrestore(&nklock, s);
956 return -EPIPE;
959 n = xnpipe_flushq(state, inq, free_ibuf, s);
961 kick_wsync:
963 if (n > 0 && testbits(state->status, XNPIPE_USER_WSYNC)) {
964 __setbits(state->status, XNPIPE_USER_WSYNC_READY);
965 xnpipe_schedule_request();
968 xnlock_put_irqrestore(&nklock, s);
969 ret = n;
970 break;
972 case XNPIPEIOC_SETSIG:
974 if (arg < 1 || arg >= _NSIG)
975 return -EINVAL;
977 xnpipe_asyncsig = arg;
978 break;
980 case FIONREAD:
982 n = testbits(state->status,
983 XNPIPE_KERN_CONN) ? state->ionrd : 0;
985 if (put_user(n, (int *)arg))
986 return -EFAULT;
988 break;
990 case TCGETS:
991 /* For isatty() probing. */
992 return -ENOTTY;
994 default:
996 return -EINVAL;
999 return ret;
1002 static int xnpipe_fasync(int fd, struct file *file, int on)
1004 struct xnpipe_state *state = file->private_data;
1005 int ret, queued;
1006 spl_t s;
1008 queued = (state->asyncq != NULL);
1009 ret = fasync_helper(fd, file, on, &state->asyncq);
1011 if (state->asyncq) {
1012 if (!queued) {
1013 xnlock_get_irqsave(&nklock, s);
1014 appendq(&xnpipe_asyncq, &state->alink);
1015 xnlock_put_irqrestore(&nklock, s);
1017 } else if (queued) {
1018 xnlock_get_irqsave(&nklock, s);
1019 removeq(&xnpipe_asyncq, &state->alink);
1020 xnlock_put_irqrestore(&nklock, s);
1023 return ret;
1026 static unsigned xnpipe_poll(struct file *file, poll_table *pt)
1028 struct xnpipe_state *state = file->private_data;
1029 unsigned r_mask = 0, w_mask = 0;
1030 spl_t s;
1032 poll_wait(file, &state->readq, pt);
1034 xnlock_get_irqsave(&nklock, s);
1036 if (testbits(state->status, XNPIPE_KERN_CONN))
1037 w_mask |= (POLLOUT | POLLWRNORM);
1038 else
1039 r_mask |= POLLHUP;
1041 if (!emptyq_p(&state->outq))
1042 r_mask |= (POLLIN | POLLRDNORM);
1043 else
1045 * Procs which have issued a timed out poll req will
1046 * remain linked to the sleepers queue, and will be
1047 * silently unlinked the next time the Xenomai side
1048 * kicks xnpipe_wakeup_proc.
1050 xnpipe_enqueue_wait(state, XNPIPE_USER_WREAD);
1052 xnlock_put_irqrestore(&nklock, s);
1055 * A descriptor is always ready for writing with the current
1056 * implementation, so there is no need to have/handle the
1057 * writeq queue so far.
1060 return r_mask | w_mask;
1063 static struct file_operations xnpipe_fops = {
1064 .owner = THIS_MODULE,
1065 .read = xnpipe_read,
1066 .write = xnpipe_write,
1067 .poll = xnpipe_poll,
1068 .unlocked_ioctl = xnpipe_ioctl,
1069 .open = xnpipe_open,
1070 .release = xnpipe_release,
1071 .fasync = xnpipe_fasync
1074 int xnpipe_mount(void)
1076 struct xnpipe_state *state;
1077 int i;
1079 for (state = &xnpipe_states[0];
1080 state < &xnpipe_states[XNPIPE_NDEVS]; state++) {
1081 inith(&state->slink);
1082 inith(&state->alink);
1083 state->status = 0;
1084 state->asyncq = NULL;
1085 initq(&state->inq);
1086 initq(&state->outq);
1089 initq(&xnpipe_sleepq);
1090 initq(&xnpipe_asyncq);
1092 xnpipe_class = class_create(THIS_MODULE, "rtpipe");
1093 if (IS_ERR(xnpipe_class)) {
1094 xnlogerr("error creating rtpipe class, err=%ld.\n",
1095 PTR_ERR(xnpipe_class));
1096 return -EBUSY;
1099 for (i = 0; i < XNPIPE_NDEVS; i++) {
1100 DECLARE_DEVHANDLE(cldev);
1101 cldev = wrap_device_create(xnpipe_class, NULL,
1102 MKDEV(XNPIPE_DEV_MAJOR, i),
1103 NULL, "rtp%d", i);
1104 if (IS_ERR(cldev)) {
1105 xnlogerr
1106 ("can't add device class, major=%d, minor=%d, err=%ld\n",
1107 XNPIPE_DEV_MAJOR, i, PTR_ERR(cldev));
1108 class_destroy(xnpipe_class);
1109 return -EBUSY;
1113 if (register_chrdev(XNPIPE_DEV_MAJOR, "rtpipe", &xnpipe_fops)) {
1114 xnlogerr
1115 ("unable to reserve major #%d for message pipe support.\n",
1116 XNPIPE_DEV_MAJOR);
1117 return -EBUSY;
1120 xnpipe_wakeup_apc =
1121 rthal_apc_alloc("pipe_wakeup", &xnpipe_wakeup_proc, NULL);
1123 return 0;
1126 void xnpipe_umount(void)
1128 int i;
1130 rthal_apc_free(xnpipe_wakeup_apc);
1131 unregister_chrdev(XNPIPE_DEV_MAJOR, "rtpipe");
1133 for (i = 0; i < XNPIPE_NDEVS; i++)
1134 wrap_device_destroy(xnpipe_class, MKDEV(XNPIPE_DEV_MAJOR, i));
1136 class_destroy(xnpipe_class);