bfin: remove inline keyword
[xenomai-head.git] / ksrc / nucleus / select.c
bloba44f784b1460856faa8b20ef0e47aaf7a921014a
1 /*!\file nucleus/select.c
2 * \brief file descriptors events multiplexing.
3 * \author Gilles Chanteperdrix
5 * Copyright (C) 2008 Efixo <gilles.chanteperdrix@xenomai.org>
7 * Xenomai is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published
9 * by the Free Software Foundation; either version 2 of the License,
10 * or (at your option) any later version.
12 * Xenomai is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Xenomai; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
20 * 02111-1307, USA.
22 * \ingroup select
25 /*!
26 * \ingroup nucleus
27 * \defgroup select File descriptors events multiplexing services.
29 * File descriptors events multiplexing services.
31 * This module implements the services needed for implementing the posix
32 * "select" service, or any other events multiplexing services.
34 * Following the implementation of the posix select service, this module defines
35 * three types of events:
36 * - \a XNSELECT_READ meaning that a file descriptor is ready for reading;
37 * - \a XNSELECT_WRITE meaning that a file descriptor is ready for writing;
38 * - \a XNSELECT_EXCEPT meaning that a file descriptor received an exceptional
39 * event.
41 * It works by defining two structures:
42 * - a @a struct @a xnselect structure, which should be added to every file
43 * descriptor for every event type (read, write, or except);
44 * - a @a struct @a xnselector structure, the selection structure, passed by
45 * the thread calling the xnselect service, where this service does all its
46 * housekeeping.
47 *@{*/
49 #include <nucleus/heap.h>
50 #include <nucleus/pod.h>
51 #include <nucleus/synch.h>
52 #include <nucleus/select.h>
53 #include <linux/types.h>
54 #include <linux/bitops.h> /* For hweight_long */
56 static xnqueue_t xnselectors;
57 static int xnselect_apc;
59 #define link2binding(baddr, memb) \
60 container_of(baddr, struct xnselect_binding, memb)
62 /**
63 * Initialize a @a struct @a xnselect structure.
65 * This service must be called to initialize a @a struct @a xnselect structure
66 * before it is bound to a selector by the means of xnselect_bind().
68 * @param select_block pointer to the xnselect structure to be initialized
70 void xnselect_init(struct xnselect *select_block)
72 initq(&select_block->bindings);
74 EXPORT_SYMBOL_GPL(xnselect_init);
76 static inline int xnselect_wakeup(struct xnselector *selector)
78 return xnsynch_flush(&selector->synchbase, 0) == XNSYNCH_RESCHED;
81 /**
82 * Bind a file descriptor (represented by its @a xnselect structure) to a
83 * selector block.
85 * @param select_block pointer to the @a struct @a xnselect to be bound;
87 * @param binding pointer to a newly allocated (using xnmalloc) @a struct
88 * @a xnselect_binding;
90 * @param selector pointer to the selector structure;
92 * @param type type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
93 * XNSELECT_EXCEPT);
95 * @param index index of the file descriptor (represented by @a select_block) in the bit fields used by the @a selector structure;
97 * @param state current state of the file descriptor>.
99 * @a select_block must have been initialized with xnselect_init(),
100 * the @a xnselector structure must have been initialized with
101 * xnselector_init(), @a binding may be uninitialized.
103 * This service must be called with nklock locked, irqs off. For this reason,
104 * the @a binding parameter must have been allocated by the caller outside the
105 * locking section.
107 * @retval -EINVAL if @a type or @a index is invalid;
108 * @retval 0 otherwise.
110 int xnselect_bind(struct xnselect *select_block,
111 struct xnselect_binding *binding,
112 struct xnselector *selector,
113 unsigned type,
114 unsigned index,
115 unsigned state)
117 if (type >= XNSELECT_MAX_TYPES || index > __FD_SETSIZE)
118 return -EINVAL;
120 binding->selector = selector;
121 binding->fd = select_block;
122 binding->type = type;
123 binding->bit_index = index;
124 inith(&binding->link);
125 inith(&binding->slink);
127 appendq(&selector->bindings, &binding->slink);
128 appendq(&select_block->bindings, &binding->link);
129 __FD_SET__(index, &selector->fds[type].expected);
130 if (state) {
131 __FD_SET__(index, &selector->fds[type].pending);
132 if (xnselect_wakeup(selector))
133 xnpod_schedule();
134 } else
135 __FD_CLR__(index, &selector->fds[type].pending);
137 return 0;
139 EXPORT_SYMBOL_GPL(xnselect_bind);
141 /* Must be called with nklock locked irqs off */
142 int __xnselect_signal(struct xnselect *select_block, unsigned state)
144 xnholder_t *holder;
145 int resched;
147 for(resched = 0, holder = getheadq(&select_block->bindings);
148 holder; holder = nextq(&select_block->bindings, holder)) {
149 struct xnselect_binding *binding;
150 struct xnselector *selector;
152 binding = link2binding(holder, link);
154 selector = binding->selector;
155 if (state) {
156 if (!__FD_ISSET__(binding->bit_index,
157 &selector->fds[binding->type].pending)) {
158 __FD_SET__(binding->bit_index,
159 &selector->fds[binding->type].pending);
160 if (xnselect_wakeup(selector))
161 resched = 1;
163 } else
164 __FD_CLR__(binding->bit_index,
165 &selector->fds[binding->type].pending);
168 return resched;
170 EXPORT_SYMBOL_GPL(__xnselect_signal);
173 * Destroy the @a xnselect structure associated with a file descriptor.
175 * Any binding with a @a xnselector block is destroyed.
177 * @param select_block pointer to the @a xnselect structure associated with a file descriptor
179 void xnselect_destroy(struct xnselect *select_block)
181 xnholder_t *holder;
182 int resched = 0;
183 spl_t s;
185 xnlock_get_irqsave(&nklock, s);
186 while ((holder = getq(&select_block->bindings))) {
187 struct xnselect_binding *binding;
188 struct xnselector *selector;
190 binding = link2binding(holder, link);
191 selector = binding->selector;
193 __FD_CLR__(binding->bit_index,
194 &selector->fds[binding->type].expected);
195 if (!__FD_ISSET__(binding->bit_index,
196 &selector->fds[binding->type].pending)) {
197 __FD_SET__(binding->bit_index,
198 &selector->fds[binding->type].pending);
199 if (xnselect_wakeup(selector))
200 resched = 1;
202 removeq(&selector->bindings, &binding->slink);
203 xnlock_put_irqrestore(&nklock, s);
205 xnfree(binding);
207 xnlock_get_irqsave(&nklock, s);
209 if (resched)
210 xnpod_schedule();
211 xnlock_put_irqrestore(&nklock, s);
213 EXPORT_SYMBOL_GPL(xnselect_destroy);
215 static unsigned
216 fd_set_andnot(fd_set *result, fd_set *first, fd_set *second, unsigned n)
218 unsigned i, not_empty = 0;
220 for (i = 0; i < __FDELT__(n); i++)
221 if((result->fds_bits[i] =
222 first->fds_bits[i] & ~(second->fds_bits[i])))
223 not_empty = 1;
225 if (i < __FDSET_LONGS__
226 && (result->fds_bits[i] =
227 first->fds_bits[i] & ~(second->fds_bits[i]) & (__FDMASK__(n) - 1)))
228 not_empty = 1;
230 return not_empty;
233 static unsigned
234 fd_set_and(fd_set *result, fd_set *first, fd_set *second, unsigned n)
236 unsigned i, not_empty = 0;
238 for (i = 0; i < __FDELT__(n); i++)
239 if((result->fds_bits[i] =
240 first->fds_bits[i] & second->fds_bits[i]))
241 not_empty = 1;
243 if (i < __FDSET_LONGS__
244 && (result->fds_bits[i] =
245 first->fds_bits[i] & second->fds_bits[i] & (__FDMASK__(n) - 1)))
246 not_empty = 1;
248 return not_empty;
251 static void fd_set_zeropad(fd_set *set, unsigned n)
253 unsigned i;
255 i = __FDELT__(n);
257 if (i < __FDSET_LONGS__)
258 set->fds_bits[i] &= (__FDMASK__(n) - 1);
260 for(i++; i < __FDSET_LONGS__; i++)
261 set->fds_bits[i] = 0;
264 static unsigned fd_set_popcount(fd_set *set, unsigned n)
266 unsigned count = 0, i;
268 for (i = 0; i < __FDELT__(n); i++)
269 if (set->fds_bits[i])
270 count += hweight_long(set->fds_bits[i]);
272 if (i < __FDSET_LONGS__ && (set->fds_bits[i] & (__FDMASK__(n) - 1)))
273 count += hweight_long(set->fds_bits[i] & (__FDMASK__(n) - 1));
275 return count;
279 * Initialize a selector structure.
281 * @param selector The selector structure to be initialized.
283 * @retval 0
285 int xnselector_init(struct xnselector *selector)
287 unsigned i;
289 xnsynch_init(&selector->synchbase, XNSYNCH_FIFO, NULL);
290 for (i = 0; i < XNSELECT_MAX_TYPES; i++) {
291 __FD_ZERO__(&selector->fds[i].expected);
292 __FD_ZERO__(&selector->fds[i].pending);
294 initq(&selector->bindings);
295 return 0;
297 EXPORT_SYMBOL_GPL(xnselector_init);
300 * Check the state of a number of file descriptors, wait for a state change if
301 * no descriptor is ready.
303 * @param selector structure to check for pending events
304 * @param out_fds The set of descriptors with pending events if a strictly positive number is returned, or the set of descriptors not yet bound if -ECHRNG is returned;
305 * @param in_fds the set of descriptors which events should be checked
306 * @param nfds the highest-numbered descriptor in any of the @a in_fds sets, plus 1;
307 * @param timeout the timeout, whose meaning depends on @a timeout_mode, note
308 * that xnselect() pass @a timeout and @a timeout_mode unchanged to
309 * xnsynch_sleep_on, so passing a relative value different from XN_INFINITE as a
310 * timeout with @a timeout_mode set to XN_RELATIVE, will cause a longer sleep
311 * than expected if the sleep is interrupted.
312 * @param timeout_mode the mode of @a timeout.
314 * @retval -EINVAL if @a nfds is negative;
315 * @retval -ECHRNG if some of the descriptors passed in @a in_fds have not yet
316 * been registered with xnselect_bind(), @a out_fds contains the set of such
317 * descriptors;
318 * @retval -EINTR if @a xnselect was interrupted while waiting;
319 * @retval 0 in case of timeout.
320 * @retval the number of file descriptors having received an event.
322 int xnselect(struct xnselector *selector,
323 fd_set *out_fds[XNSELECT_MAX_TYPES],
324 fd_set *in_fds[XNSELECT_MAX_TYPES],
325 int nfds,
326 xnticks_t timeout, xntmode_t timeout_mode)
328 unsigned i, not_empty = 0;
329 xnthread_t *curr;
330 spl_t s;
332 if ((unsigned) nfds > __FD_SETSIZE)
333 return -EINVAL;
335 curr = xnpod_current_thread();
337 for (i = 0; i < XNSELECT_MAX_TYPES; i++)
338 if (out_fds[i])
339 fd_set_zeropad(out_fds[i], nfds);
341 xnlock_get_irqsave(&nklock, s);
342 for (i = 0; i < XNSELECT_MAX_TYPES; i++)
343 if (out_fds[i]
344 && fd_set_andnot(out_fds[i], in_fds[i],
345 &selector->fds[i].expected, nfds))
346 not_empty = 1;
347 xnlock_put_irqrestore(&nklock, s);
349 if (not_empty)
350 return -ECHRNG;
352 xnlock_get_irqsave(&nklock, s);
353 for (i = 0; i < XNSELECT_MAX_TYPES; i++)
354 if (out_fds[i]
355 && fd_set_and(out_fds[i], in_fds[i],
356 &selector->fds[i].pending, nfds))
357 not_empty = 1;
359 while (!not_empty) {
360 xnsynch_sleep_on(&selector->synchbase, timeout, timeout_mode);
362 for (i = 0; i < XNSELECT_MAX_TYPES; i++)
363 if (out_fds[i]
364 && fd_set_and(out_fds[i], in_fds[i],
365 &selector->fds[i].pending, nfds))
366 not_empty = 1;
368 if (xnthread_test_info(curr, XNBREAK | XNTIMEO))
369 break;
371 xnlock_put_irqrestore(&nklock, s);
373 if (not_empty) {
374 unsigned count;
376 for (count = 0, i = 0; i < XNSELECT_MAX_TYPES; i++)
377 if (out_fds[i])
378 count += fd_set_popcount(out_fds[i], nfds);
380 return count;
383 if (xnthread_test_info(curr, XNBREAK))
384 return -EINTR;
386 return 0; /* Timeout */
388 EXPORT_SYMBOL_GPL(xnselect);
391 * Destroy a selector block.
393 * All bindings with file descriptor are destroyed.
395 * @param selector the selector block to be destroyed
397 void xnselector_destroy(struct xnselector *selector)
399 spl_t s;
401 inith(&selector->destroy_link);
402 xnlock_get_irqsave(&nklock, s);
403 appendq(&xnselectors, &selector->destroy_link);
404 __rthal_apc_schedule(xnselect_apc);
405 xnlock_put_irqrestore(&nklock, s);
407 EXPORT_SYMBOL_GPL(xnselector_destroy);
409 static void xnselector_destroy_loop(void *cookie)
411 struct xnselector *selector;
412 xnholder_t *holder;
413 int resched;
414 spl_t s;
416 xnlock_get_irqsave(&nklock, s);
417 while ((holder = getq(&xnselectors))) {
418 selector = container_of(holder, struct xnselector, destroy_link);
419 while ((holder = getq(&selector->bindings))) {
420 struct xnselect_binding *binding;
421 struct xnselect *fd;
423 binding = link2binding(holder, slink);
424 fd = binding->fd;
425 removeq(&fd->bindings, &binding->link);
426 xnlock_put_irqrestore(&nklock, s);
428 xnfree(binding);
430 xnlock_get_irqsave(&nklock, s);
432 resched =
433 xnsynch_destroy(&selector->synchbase) == XNSYNCH_RESCHED;
434 xnlock_put_irqrestore(&nklock, s);
436 xnfree(selector);
437 if (resched)
438 xnpod_schedule();
440 xnlock_get_irqsave(&nklock, s);
442 xnlock_put_irqrestore(&nklock, s);
445 int xnselect_mount(void)
447 initq(&xnselectors);
448 xnselect_apc = rthal_apc_alloc("xnselectors_destroy",
449 xnselector_destroy_loop, NULL);
450 if (xnselect_apc < 0)
451 return xnselect_apc;
453 return 0;
456 int xnselect_umount(void)
458 rthal_apc_free(xnselect_apc);
459 return 0;
462 /*@}*/