2 * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
3 * Copyright (C) 2005 Dmitry Adamushko <dmitry.adamushko@gmail.com>
5 * Xenomai is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published
7 * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA
8 * 02139, USA; either version 2 of the License, or (at your option)
11 * Xenomai is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/init.h>
25 #include <linux/fcntl.h>
26 #include <linux/poll.h>
27 #include <linux/termios.h>
28 #include <linux/spinlock.h>
29 #include <linux/device.h>
31 #include <asm/uaccess.h>
32 #include <nucleus/pod.h>
33 #include <nucleus/heap.h>
34 #include <nucleus/pipe.h>
36 static int xnpipe_asyncsig
= SIGIO
;
38 struct xnpipe_state xnpipe_states
[XNPIPE_NDEVS
];
40 #define XNPIPE_BITMAP_SIZE ((XNPIPE_NDEVS + BITS_PER_LONG - 1) / BITS_PER_LONG)
41 static unsigned long xnpipe_bitmap
[XNPIPE_BITMAP_SIZE
];
43 struct xnqueue xnpipe_sleepq
, xnpipe_asyncq
;
45 int xnpipe_wakeup_apc
;
47 static DECLARE_DEVCLASS(xnpipe_class
);
49 /* Allocation of minor values */
51 static inline int xnpipe_minor_alloc(int minor
)
55 if ((minor
< 0 && minor
!= XNPIPE_MINOR_AUTO
) || minor
>= XNPIPE_NDEVS
)
58 xnlock_get_irqsave(&nklock
, s
);
60 if (minor
== XNPIPE_MINOR_AUTO
)
61 minor
= find_first_zero_bit(xnpipe_bitmap
, XNPIPE_NDEVS
);
63 if (minor
== XNPIPE_NDEVS
||
64 testbits(xnpipe_bitmap
[minor
/ BITS_PER_LONG
],
65 1UL << (minor
% BITS_PER_LONG
)))
68 __setbits(xnpipe_bitmap
[minor
/ BITS_PER_LONG
],
69 1UL << (minor
% BITS_PER_LONG
));
71 xnlock_put_irqrestore(&nklock
, s
);
76 static inline void xnpipe_minor_free(int minor
)
78 __clrbits(xnpipe_bitmap
[minor
/ BITS_PER_LONG
],
79 1UL << (minor
% BITS_PER_LONG
));
82 static inline void xnpipe_enqueue_wait(struct xnpipe_state
*state
, int mask
)
84 if (state
->wcount
!= 0x7fffffff && state
->wcount
++ == 0)
85 appendq(&xnpipe_sleepq
, &state
->slink
);
87 __setbits(state
->status
, mask
);
90 static inline void xnpipe_dequeue_wait(struct xnpipe_state
*state
, int mask
)
92 if (testbits(state
->status
, mask
))
93 if (--state
->wcount
== 0) {
94 removeq(&xnpipe_sleepq
, &state
->slink
);
95 __clrbits(state
->status
, mask
);
99 static inline void xnpipe_dequeue_all(struct xnpipe_state
*state
, int mask
)
101 if (testbits(state
->status
, mask
)) {
104 removeq(&xnpipe_sleepq
, &state
->slink
);
105 __clrbits(state
->status
, mask
);
110 /* Must be entered with nklock held, interrupts off. */
111 #define xnpipe_wait(__state, __mask, __s, __cond) \
113 wait_queue_head_t *__waitq; \
114 DEFINE_WAIT(__wait); \
117 if ((__mask) & XNPIPE_USER_WREAD) \
118 __waitq = &(__state)->readq; \
120 __waitq = &(__state)->syncq; \
122 xnpipe_enqueue_wait(__state, __mask); \
123 xnlock_put_irqrestore(&nklock, __s); \
126 __sigpending = signal_pending(current); \
129 prepare_to_wait_exclusive(__waitq, &__wait, TASK_INTERRUPTIBLE); \
135 finish_wait(__waitq, &__wait); \
137 /* Restore the interrupt state initially set by the caller. */ \
138 xnlock_get_irqsave(&nklock, __s); \
139 xnpipe_dequeue_wait(__state, __mask); \
144 static void xnpipe_wakeup_proc(void *cookie
)
146 struct xnpipe_state
*state
;
147 struct xnholder
*h
, *nh
;
151 xnlock_get_irqsave(&nklock
, s
);
153 nh
= getheadq(&xnpipe_sleepq
);
154 while ((h
= nh
) != NULL
) {
155 nh
= nextq(&xnpipe_sleepq
, h
);
156 state
= link2xnpipe(h
, slink
);
157 rbits
= testbits(state
->status
, XNPIPE_USER_ALL_READY
);
159 __clrbits(state
->status
, rbits
);
161 * We could be switched out as a result of
162 * waking up a waiter, so we need the
163 * housekeeping and release the nklock before
164 * calling wake_up_interruptible().
166 if ((rbits
& XNPIPE_USER_WREAD_READY
) != 0) {
167 if (waitqueue_active(&state
->readq
)) {
168 xnlock_put_irqrestore(&nklock
, s
);
169 wake_up_interruptible(&state
->readq
);
170 xnlock_get_irqsave(&nklock
, s
);
173 if ((rbits
& XNPIPE_USER_WSYNC_READY
) != 0) {
174 if (waitqueue_active(&state
->syncq
)) {
175 xnlock_put_irqrestore(&nklock
, s
);
176 wake_up_interruptible(&state
->syncq
);
177 xnlock_get_irqsave(&nklock
, s
);
180 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
182 * Assume a waiter might have entered/left the
183 * queue, so we need to refetch the sleep
184 * queue head to be safe.
186 nh
= getheadq(&xnpipe_sleepq
);
192 * Scan the async queue, sending the proper signal to
195 nh
= getheadq(&xnpipe_asyncq
);
196 while ((h
= nh
) != NULL
) {
197 nh
= nextq(&xnpipe_asyncq
, h
);
198 state
= link2xnpipe(h
, alink
);
200 if (testbits(state
->status
, XNPIPE_USER_SIGIO
)) {
201 __clrbits(state
->status
, XNPIPE_USER_SIGIO
);
202 xnlock_put_irqrestore(&nklock
, s
);
203 kill_fasync(&state
->asyncq
, xnpipe_asyncsig
, POLL_IN
);
204 xnlock_get_irqsave(&nklock
, s
);
205 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
206 nh
= getheadq(&xnpipe_asyncq
);
211 xnlock_put_irqrestore(&nklock
, s
);
214 static inline void xnpipe_schedule_request(void) /* hw IRQs off */
216 __rthal_apc_schedule(xnpipe_wakeup_apc
);
219 static inline ssize_t
xnpipe_flush_bufq(void (*fn
)(void *buf
, void *xstate
),
223 struct xnpipe_mh
*mh
;
227 /* Queue is private, no locking is required. */
228 while ((h
= getq(q
)) != NULL
) {
230 n
+= xnpipe_m_size(mh
);
234 /* We must return the overall count of bytes flushed. */
239 * Move the specified queue contents to a private queue, then call the
240 * flush handler to purge it. The latter is run without locking.
241 * Returns the number of bytes flushed. Must be entered with nklock
242 * held, interrupts off.
244 #define xnpipe_flushq(__state, __q, __f, __s) \
246 struct xnqueue __privq; \
250 moveq(&__privq, &(state)->__q); \
251 xnlock_put_irqrestore(&nklock, (__s)); \
252 n = xnpipe_flush_bufq((__state)->ops.__f, &__privq, (__state)->xstate); \
253 xnlock_get_irqsave(&nklock, (__s)); \
258 static void *xnpipe_default_alloc_ibuf(size_t size
, void *xstate
)
262 buf
= xnmalloc(size
);
263 if (likely(buf
!= NULL
))
266 if (size
> xnheap_max_contiguous(&kheap
))
267 /* Request will never succeed. */
268 return (struct xnpipe_mh
*)-1;
273 static void xnpipe_default_free_ibuf(void *buf
, void *xstate
)
278 static void xnpipe_default_release(void *xstate
)
282 static inline int xnpipe_set_ops(struct xnpipe_state
*state
,
283 struct xnpipe_operations
*ops
)
287 if (ops
->free_obuf
== NULL
)
289 * Caller must provide a way to free unread outgoing
294 /* Set some default handlers for common usage. */
295 if (ops
->alloc_ibuf
== NULL
)
296 state
->ops
.alloc_ibuf
= xnpipe_default_alloc_ibuf
;
297 if (ops
->free_ibuf
== NULL
)
298 state
->ops
.free_ibuf
= xnpipe_default_free_ibuf
;
299 if (ops
->release
== NULL
)
300 state
->ops
.release
= xnpipe_default_release
;
305 int xnpipe_connect(int minor
, struct xnpipe_operations
*ops
, void *xstate
)
307 struct xnpipe_state
*state
;
308 int need_sched
= 0, ret
;
311 minor
= xnpipe_minor_alloc(minor
);
315 state
= &xnpipe_states
[minor
];
317 xnlock_get_irqsave(&nklock
, s
);
319 ret
= xnpipe_set_ops(state
, ops
);
321 xnlock_put_irqrestore(&nklock
, s
);
325 __setbits(state
->status
, XNPIPE_KERN_CONN
);
326 xnsynch_init(&state
->synchbase
, XNSYNCH_FIFO
, NULL
);
327 state
->xstate
= xstate
;
330 if (testbits(state
->status
, XNPIPE_USER_CONN
)) {
331 if (testbits(state
->status
, XNPIPE_USER_WREAD
)) {
333 * Wake up the regular Linux task waiting for
334 * the kernel side to connect (xnpipe_open).
336 __setbits(state
->status
, XNPIPE_USER_WREAD_READY
);
340 if (state
->asyncq
) { /* Schedule asynch sig. */
341 __setbits(state
->status
, XNPIPE_USER_SIGIO
);
347 xnpipe_schedule_request();
349 xnlock_put_irqrestore(&nklock
, s
);
353 EXPORT_SYMBOL_GPL(xnpipe_connect
);
355 int xnpipe_disconnect(int minor
)
357 struct xnpipe_state
*state
;
361 if (minor
< 0 || minor
>= XNPIPE_NDEVS
)
364 state
= &xnpipe_states
[minor
];
366 xnlock_get_irqsave(&nklock
, s
);
368 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
369 xnlock_put_irqrestore(&nklock
, s
);
373 __clrbits(state
->status
, XNPIPE_KERN_CONN
);
375 state
->ionrd
-= xnpipe_flushq(state
, outq
, free_obuf
, s
);
377 if (!testbits(state
->status
, XNPIPE_USER_CONN
))
380 xnpipe_flushq(state
, inq
, free_ibuf
, s
);
382 if (xnsynch_destroy(&state
->synchbase
) == XNSYNCH_RESCHED
)
385 if (testbits(state
->status
, XNPIPE_USER_WREAD
)) {
387 * Wake up the regular Linux task waiting for some
388 * operation from the Xenomai side (read/write or
391 __setbits(state
->status
, XNPIPE_USER_WREAD_READY
);
395 if (state
->asyncq
) { /* Schedule asynch sig. */
396 __setbits(state
->status
, XNPIPE_USER_SIGIO
);
402 * If xnpipe_release() has not fully run, enter lingering
403 * close. This will prevent the extra state from being wiped
406 if (testbits(state
->status
, XNPIPE_USER_CONN
))
407 __setbits(state
->status
, XNPIPE_KERN_LCLOSE
);
409 xnlock_put_irqrestore(&nklock
, s
);
410 state
->ops
.release(state
->xstate
);
411 xnlock_get_irqsave(&nklock
, s
);
412 xnpipe_minor_free(minor
);
416 xnpipe_schedule_request();
418 xnlock_put_irqrestore(&nklock
, s
);
422 EXPORT_SYMBOL_GPL(xnpipe_disconnect
);
424 ssize_t
xnpipe_send(int minor
, struct xnpipe_mh
*mh
, size_t size
, int flags
)
426 struct xnpipe_state
*state
;
430 if (minor
< 0 || minor
>= XNPIPE_NDEVS
)
433 if (size
<= sizeof(*mh
))
436 state
= &xnpipe_states
[minor
];
438 xnlock_get_irqsave(&nklock
, s
);
440 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
441 xnlock_put_irqrestore(&nklock
, s
);
445 inith(xnpipe_m_link(mh
));
446 xnpipe_m_size(mh
) = size
- sizeof(*mh
);
447 xnpipe_m_rdoff(mh
) = 0;
448 state
->ionrd
+= xnpipe_m_size(mh
);
450 if (flags
& XNPIPE_URGENT
)
451 prependq(&state
->outq
, xnpipe_m_link(mh
));
453 appendq(&state
->outq
, xnpipe_m_link(mh
));
455 if (!testbits(state
->status
, XNPIPE_USER_CONN
)) {
456 xnlock_put_irqrestore(&nklock
, s
);
457 return (ssize_t
) size
;
460 if (testbits(state
->status
, XNPIPE_USER_WREAD
)) {
462 * Wake up the regular Linux task waiting for input
463 * from the Xenomai side.
465 __setbits(state
->status
, XNPIPE_USER_WREAD_READY
);
469 if (state
->asyncq
) { /* Schedule asynch sig. */
470 __setbits(state
->status
, XNPIPE_USER_SIGIO
);
475 xnpipe_schedule_request();
477 xnlock_put_irqrestore(&nklock
, s
);
479 return (ssize_t
) size
;
481 EXPORT_SYMBOL_GPL(xnpipe_send
);
483 ssize_t
xnpipe_mfixup(int minor
, struct xnpipe_mh
*mh
, ssize_t size
)
485 struct xnpipe_state
*state
;
488 if (minor
< 0 || minor
>= XNPIPE_NDEVS
)
494 state
= &xnpipe_states
[minor
];
496 xnlock_get_irqsave(&nklock
, s
);
498 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
499 xnlock_put_irqrestore(&nklock
, s
);
503 xnpipe_m_size(mh
) += size
;
504 state
->ionrd
+= size
;
506 xnlock_put_irqrestore(&nklock
, s
);
508 return (ssize_t
) size
;
510 EXPORT_SYMBOL_GPL(xnpipe_mfixup
);
512 ssize_t
xnpipe_recv(int minor
, struct xnpipe_mh
**pmh
, xnticks_t timeout
)
514 struct xnpipe_state
*state
;
520 if (minor
< 0 || minor
>= XNPIPE_NDEVS
)
523 if (xnpod_asynch_p())
526 state
= &xnpipe_states
[minor
];
528 xnlock_get_irqsave(&nklock
, s
);
530 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
532 goto unlock_and_exit
;
535 curr
= xnpod_current_thread();
537 while ((h
= getq(&state
->inq
)) == NULL
) {
538 if (timeout
== XN_NONBLOCK
) {
540 goto unlock_and_exit
;
543 xnsynch_sleep_on(&state
->synchbase
, timeout
, XN_RELATIVE
);
545 if (xnthread_test_info(curr
, XNTIMEO
)) {
547 goto unlock_and_exit
;
549 if (xnthread_test_info(curr
, XNBREAK
)) {
551 goto unlock_and_exit
;
553 if (xnthread_test_info(curr
, XNRMID
)) {
555 goto unlock_and_exit
;
558 /* remaining timeout */
559 timeout
= xnthread_timeout(curr
);
564 ret
= (ssize_t
) xnpipe_m_size(*pmh
);
566 if (testbits(state
->status
, XNPIPE_USER_WSYNC
)) {
567 __setbits(state
->status
, XNPIPE_USER_WSYNC_READY
);
568 xnpipe_schedule_request();
573 xnlock_put_irqrestore(&nklock
, s
);
577 EXPORT_SYMBOL_GPL(xnpipe_recv
);
579 int xnpipe_flush(int minor
, int mode
)
581 struct xnpipe_state
*state
;
585 if (minor
< 0 || minor
>= XNPIPE_NDEVS
)
588 state
= &xnpipe_states
[minor
];
590 xnlock_get_irqsave(&nklock
, s
);
592 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
593 xnlock_put_irqrestore(&nklock
, s
);
597 msgcount
= countq(&state
->outq
) + countq(&state
->inq
);
599 if (mode
& XNPIPE_OFLUSH
)
600 state
->ionrd
-= xnpipe_flushq(state
, outq
, free_obuf
, s
);
602 if (mode
& XNPIPE_IFLUSH
)
603 xnpipe_flushq(state
, inq
, free_ibuf
, s
);
605 if (testbits(state
->status
, XNPIPE_USER_WSYNC
) &&
606 msgcount
> countq(&state
->outq
) + countq(&state
->inq
)) {
607 __setbits(state
->status
, XNPIPE_USER_WSYNC_READY
);
608 xnpipe_schedule_request();
611 xnlock_put_irqrestore(&nklock
, s
);
615 EXPORT_SYMBOL_GPL(xnpipe_flush
);
617 /* Must be entered with nklock held, interrupts off. */
618 #define xnpipe_cleanup_user_conn(__state, __s) \
620 xnpipe_flushq((__state), outq, free_obuf, (__s)); \
621 xnpipe_flushq((__state), inq, free_ibuf, (__s)); \
622 __clrbits((__state)->status, XNPIPE_USER_CONN); \
623 if (testbits((__state)->status, XNPIPE_KERN_LCLOSE)) { \
624 clrbits((__state)->status, XNPIPE_KERN_LCLOSE); \
625 xnlock_put_irqrestore(&nklock, (__s)); \
626 (__state)->ops.release((__state)->xstate); \
627 xnlock_get_irqsave(&nklock, (__s)); \
628 xnpipe_minor_free(xnminor_from_state(__state)); \
633 * Open the pipe from user-space.
636 static int xnpipe_open(struct inode
*inode
, struct file
*file
)
638 int minor
, err
= 0, sigpending
;
639 struct xnpipe_state
*state
;
642 minor
= MINOR(inode
->i_rdev
);
644 if (minor
>= XNPIPE_NDEVS
)
645 return -ENXIO
; /* TssTss... stop playing with mknod() ;o) */
647 state
= &xnpipe_states
[minor
];
649 xnlock_get_irqsave(&nklock
, s
);
651 /* Enforce exclusive open for the message queues. */
652 if (testbits(state
->status
, XNPIPE_USER_CONN
)) {
653 xnlock_put_irqrestore(&nklock
, s
);
657 __setbits(state
->status
, XNPIPE_USER_CONN
);
659 file
->private_data
= state
;
660 init_waitqueue_head(&state
->readq
);
661 init_waitqueue_head(&state
->syncq
);
664 __clrbits(state
->status
,
665 XNPIPE_USER_ALL_WAIT
| XNPIPE_USER_ALL_READY
|
668 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
669 if (testbits(file
->f_flags
, O_NONBLOCK
)) {
670 xnpipe_cleanup_user_conn(state
, s
);
671 xnlock_put_irqrestore(&nklock
, s
);
675 sigpending
= xnpipe_wait(state
, XNPIPE_USER_WREAD
, s
,
676 testbits(state
->status
,
679 xnpipe_cleanup_user_conn(state
, s
);
680 xnlock_put_irqrestore(&nklock
, s
);
686 xnpipe_cleanup_user_conn(state
, s
);
688 xnlock_put_irqrestore(&nklock
, s
);
693 static int xnpipe_release(struct inode
*inode
, struct file
*file
)
695 struct xnpipe_state
*state
= file
->private_data
;
698 xnlock_get_irqsave(&nklock
, s
);
700 xnpipe_dequeue_all(state
, XNPIPE_USER_WREAD
);
701 xnpipe_dequeue_all(state
, XNPIPE_USER_WSYNC
);
703 if (testbits(state
->status
, XNPIPE_KERN_CONN
)) {
704 /* Unblock waiters. */
705 if (xnsynch_nsleepers(&state
->synchbase
) > 0) {
706 xnsynch_flush(&state
->synchbase
, XNRMID
);
711 if (state
->ops
.input
)
712 state
->ops
.input(NULL
, -EPIPE
, state
->xstate
);
714 if (state
->asyncq
) { /* Clear the async queue */
715 removeq(&xnpipe_asyncq
, &state
->alink
);
716 __clrbits(state
->status
, XNPIPE_USER_SIGIO
);
717 xnlock_put_irqrestore(&nklock
, s
);
718 fasync_helper(-1, file
, 0, &state
->asyncq
);
719 xnlock_get_irqsave(&nklock
, s
);
722 xnpipe_cleanup_user_conn(state
, s
);
724 * The extra state may not be available from now on, if
725 * xnpipe_disconnect() entered lingering close before we got
726 * there; so calling xnpipe_cleanup_user_conn() should be the
729 xnlock_put_irqrestore(&nklock
, s
);
734 static ssize_t
xnpipe_read(struct file
*file
,
735 char *buf
, size_t count
, loff_t
*ppos
)
737 struct xnpipe_state
*state
= file
->private_data
;
738 int sigpending
, err
= 0;
739 size_t nbytes
, inbytes
;
740 struct xnpipe_mh
*mh
;
745 if (!access_ok(VERIFY_WRITE
, buf
, count
))
748 xnlock_get_irqsave(&nklock
, s
);
750 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
751 xnlock_put_irqrestore(&nklock
, s
);
755 * Queue probe and proc enqueuing must be seen atomically,
756 * including from the Xenomai side.
758 h
= getq(&state
->outq
);
762 if (file
->f_flags
& O_NONBLOCK
) {
763 xnlock_put_irqrestore(&nklock
, s
);
767 sigpending
= xnpipe_wait(state
, XNPIPE_USER_WREAD
, s
,
768 !emptyq_p(&state
->outq
));
769 h
= getq(&state
->outq
);
773 xnlock_put_irqrestore(&nklock
, s
);
774 return sigpending
? -ERESTARTSYS
: 0;
779 * We allow more data to be appended to the current message
780 * bucket while its contents is being copied to the user
781 * buffer, therefore, we need to loop until: 1) all the data
782 * has been copied, 2) we consumed the user buffer space
789 nbytes
= xnpipe_m_size(mh
) - xnpipe_m_rdoff(mh
);
791 if (nbytes
+ inbytes
> count
)
792 nbytes
= count
- inbytes
;
797 xnlock_put_irqrestore(&nklock
, s
);
798 /* More data could be appended while doing this: */
800 __copy_to_user(buf
+ inbytes
,
801 xnpipe_m_data(mh
) + xnpipe_m_rdoff(mh
),
803 xnlock_get_irqsave(&nklock
, s
);
811 xnpipe_m_rdoff(mh
) += nbytes
;
814 state
->ionrd
-= inbytes
;
817 if (xnpipe_m_size(mh
) > xnpipe_m_rdoff(mh
))
818 prependq(&state
->outq
, &mh
->link
);
821 * We always want to fire the output handler because
822 * whatever the error state is for userland (e.g
823 * -EFAULT), we did pull a message from our output
826 if (state
->ops
.output
)
827 state
->ops
.output(mh
, state
->xstate
);
828 xnlock_put_irqrestore(&nklock
, s
);
829 state
->ops
.free_obuf(mh
, state
->xstate
);
830 xnlock_get_irqsave(&nklock
, s
);
831 if (testbits(state
->status
, XNPIPE_USER_WSYNC
)) {
832 __setbits(state
->status
, XNPIPE_USER_WSYNC_READY
);
833 xnpipe_schedule_request();
837 xnlock_put_irqrestore(&nklock
, s
);
842 static ssize_t
xnpipe_write(struct file
*file
,
843 const char *buf
, size_t count
, loff_t
*ppos
)
845 struct xnpipe_state
*state
= file
->private_data
;
846 struct xnpipe_mh
*mh
;
853 if (!access_ok(VERIFY_READ
, buf
, count
))
856 xnlock_get_irqsave(&nklock
, s
);
860 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
861 xnlock_put_irqrestore(&nklock
, s
);
865 pollnum
= countq(&state
->inq
) + countq(&state
->outq
);
866 xnlock_put_irqrestore(&nklock
, s
);
868 mh
= state
->ops
.alloc_ibuf(count
+ sizeof(*mh
), state
->xstate
);
869 if (mh
== (struct xnpipe_mh
*)-1)
873 if (file
->f_flags
& O_NONBLOCK
)
876 xnlock_get_irqsave(&nklock
, s
);
877 if (xnpipe_wait(state
, XNPIPE_USER_WSYNC
, s
,
879 countq(&state
->inq
) + countq(&state
->outq
))) {
880 xnlock_put_irqrestore(&nklock
, s
);
886 inith(xnpipe_m_link(mh
));
887 xnpipe_m_size(mh
) = count
;
888 xnpipe_m_rdoff(mh
) = 0;
890 if (copy_from_user(xnpipe_m_data(mh
), buf
, count
)) {
891 state
->ops
.free_ibuf(mh
, state
->xstate
);
895 xnlock_get_irqsave(&nklock
, s
);
897 appendq(&state
->inq
, &mh
->link
);
899 /* Wake up a Xenomai sleeper if any. */
900 if (xnsynch_wakeup_one_sleeper(&state
->synchbase
))
903 if (state
->ops
.input
) {
904 ret
= state
->ops
.input(mh
, 0, state
->xstate
);
909 if (file
->f_flags
& O_SYNC
) {
910 if (!emptyq_p(&state
->inq
)) {
911 if (xnpipe_wait(state
, XNPIPE_USER_WSYNC
, s
,
912 emptyq_p(&state
->inq
)))
913 count
= -ERESTARTSYS
;
917 xnlock_put_irqrestore(&nklock
, s
);
919 return (ssize_t
)count
;
922 static DECLARE_IOCTL_HANDLER(xnpipe_ioctl
, file
, cmd
, arg
)
924 struct xnpipe_state
*state
= file
->private_data
;
930 case XNPIPEIOC_GET_NRDEV
:
932 if (put_user(XNPIPE_NDEVS
, (int *)arg
))
937 case XNPIPEIOC_OFLUSH
:
939 xnlock_get_irqsave(&nklock
, s
);
941 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
942 xnlock_put_irqrestore(&nklock
, s
);
946 n
= xnpipe_flushq(state
, outq
, free_obuf
, s
);
950 case XNPIPEIOC_IFLUSH
:
952 xnlock_get_irqsave(&nklock
, s
);
954 if (!testbits(state
->status
, XNPIPE_KERN_CONN
)) {
955 xnlock_put_irqrestore(&nklock
, s
);
959 n
= xnpipe_flushq(state
, inq
, free_ibuf
, s
);
963 if (n
> 0 && testbits(state
->status
, XNPIPE_USER_WSYNC
)) {
964 __setbits(state
->status
, XNPIPE_USER_WSYNC_READY
);
965 xnpipe_schedule_request();
968 xnlock_put_irqrestore(&nklock
, s
);
972 case XNPIPEIOC_SETSIG
:
974 if (arg
< 1 || arg
>= _NSIG
)
977 xnpipe_asyncsig
= arg
;
982 n
= testbits(state
->status
,
983 XNPIPE_KERN_CONN
) ? state
->ionrd
: 0;
985 if (put_user(n
, (int *)arg
))
991 /* For isatty() probing. */
1002 static int xnpipe_fasync(int fd
, struct file
*file
, int on
)
1004 struct xnpipe_state
*state
= file
->private_data
;
1008 queued
= (state
->asyncq
!= NULL
);
1009 ret
= fasync_helper(fd
, file
, on
, &state
->asyncq
);
1011 if (state
->asyncq
) {
1013 xnlock_get_irqsave(&nklock
, s
);
1014 appendq(&xnpipe_asyncq
, &state
->alink
);
1015 xnlock_put_irqrestore(&nklock
, s
);
1017 } else if (queued
) {
1018 xnlock_get_irqsave(&nklock
, s
);
1019 removeq(&xnpipe_asyncq
, &state
->alink
);
1020 xnlock_put_irqrestore(&nklock
, s
);
1026 static unsigned xnpipe_poll(struct file
*file
, poll_table
*pt
)
1028 struct xnpipe_state
*state
= file
->private_data
;
1029 unsigned r_mask
= 0, w_mask
= 0;
1032 poll_wait(file
, &state
->readq
, pt
);
1034 xnlock_get_irqsave(&nklock
, s
);
1036 if (testbits(state
->status
, XNPIPE_KERN_CONN
))
1037 w_mask
|= (POLLOUT
| POLLWRNORM
);
1041 if (!emptyq_p(&state
->outq
))
1042 r_mask
|= (POLLIN
| POLLRDNORM
);
1045 * Procs which have issued a timed out poll req will
1046 * remain linked to the sleepers queue, and will be
1047 * silently unlinked the next time the Xenomai side
1048 * kicks xnpipe_wakeup_proc.
1050 xnpipe_enqueue_wait(state
, XNPIPE_USER_WREAD
);
1052 xnlock_put_irqrestore(&nklock
, s
);
1055 * A descriptor is always ready for writing with the current
1056 * implementation, so there is no need to have/handle the
1057 * writeq queue so far.
1060 return r_mask
| w_mask
;
1063 static struct file_operations xnpipe_fops
= {
1064 .owner
= THIS_MODULE
,
1065 .read
= xnpipe_read
,
1066 .write
= xnpipe_write
,
1067 .poll
= xnpipe_poll
,
1068 .unlocked_ioctl
= xnpipe_ioctl
,
1069 .open
= xnpipe_open
,
1070 .release
= xnpipe_release
,
1071 .fasync
= xnpipe_fasync
1074 int xnpipe_mount(void)
1076 struct xnpipe_state
*state
;
1079 for (state
= &xnpipe_states
[0];
1080 state
< &xnpipe_states
[XNPIPE_NDEVS
]; state
++) {
1081 inith(&state
->slink
);
1082 inith(&state
->alink
);
1084 state
->asyncq
= NULL
;
1086 initq(&state
->outq
);
1089 initq(&xnpipe_sleepq
);
1090 initq(&xnpipe_asyncq
);
1092 xnpipe_class
= class_create(THIS_MODULE
, "rtpipe");
1093 if (IS_ERR(xnpipe_class
)) {
1094 xnlogerr("error creating rtpipe class, err=%ld.\n",
1095 PTR_ERR(xnpipe_class
));
1099 for (i
= 0; i
< XNPIPE_NDEVS
; i
++) {
1100 DECLARE_DEVHANDLE(cldev
);
1101 cldev
= wrap_device_create(xnpipe_class
, NULL
,
1102 MKDEV(XNPIPE_DEV_MAJOR
, i
),
1104 if (IS_ERR(cldev
)) {
1106 ("can't add device class, major=%d, minor=%d, err=%ld\n",
1107 XNPIPE_DEV_MAJOR
, i
, PTR_ERR(cldev
));
1108 class_destroy(xnpipe_class
);
1113 if (register_chrdev(XNPIPE_DEV_MAJOR
, "rtpipe", &xnpipe_fops
)) {
1115 ("unable to reserve major #%d for message pipe support.\n",
1121 rthal_apc_alloc("pipe_wakeup", &xnpipe_wakeup_proc
, NULL
);
1126 void xnpipe_umount(void)
1130 rthal_apc_free(xnpipe_wakeup_apc
);
1131 unregister_chrdev(XNPIPE_DEV_MAJOR
, "rtpipe");
1133 for (i
= 0; i
< XNPIPE_NDEVS
; i
++)
1134 wrap_device_destroy(xnpipe_class
, MKDEV(XNPIPE_DEV_MAJOR
, i
));
1136 class_destroy(xnpipe_class
);