libroot_debug: Merge guarded heap into libroot_debug.
[haiku.git] / src / system / kernel / wait_for_objects.cpp
blob4fab084c9c5b6742eff1e7e6dfcc7653e376b175
1 /*
2 * Copyright 2007-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 */
7 #include <fs/select_sync_pool.h>
8 #include <wait_for_objects.h>
10 #include <new>
12 #include <poll.h>
13 #include <signal.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/select.h>
18 #include <OS.h>
19 #include <Select.h>
21 #include <AutoDeleter.h>
23 #include <fs/fd.h>
24 #include <port.h>
25 #include <sem.h>
26 #include <syscalls.h>
27 #include <syscall_restart.h>
28 #include <thread.h>
29 #include <tracing.h>
30 #include <util/AutoLock.h>
31 #include <util/DoublyLinkedList.h>
32 #include <vfs.h>
35 //#define TRACE_WAIT_FOR_OBJECTS
36 #ifdef TRACE_WAIT_FOR_OBJECTS
37 # define PRINT(x) dprintf x
38 # define FUNCTION(x) dprintf x
39 #else
40 # define PRINT(x) ;
41 # define FUNCTION(x) ;
42 #endif
45 using std::nothrow;
48 struct select_sync_pool_entry
49 : DoublyLinkedListLinkImpl<select_sync_pool_entry> {
50 selectsync *sync;
51 uint16 events;
54 typedef DoublyLinkedList<select_sync_pool_entry> SelectSyncPoolEntryList;
56 struct select_sync_pool {
57 SelectSyncPoolEntryList entries;
61 struct select_ops {
62 status_t (*select)(int32 object, struct select_info* info, bool kernel);
63 status_t (*deselect)(int32 object, struct select_info* info, bool kernel);
67 static const select_ops kSelectOps[] = {
68 // B_OBJECT_TYPE_FD
70 select_fd,
71 deselect_fd
74 // B_OBJECT_TYPE_SEMAPHORE
76 select_sem,
77 deselect_sem
80 // B_OBJECT_TYPE_PORT
82 select_port,
83 deselect_port
86 // B_OBJECT_TYPE_THREAD
88 select_thread,
89 deselect_thread
93 static const uint32 kSelectOpsCount = sizeof(kSelectOps) / sizeof(select_ops);
97 #if WAIT_FOR_OBJECTS_TRACING
100 namespace WaitForObjectsTracing {
103 class SelectTraceEntry : public AbstractTraceEntry {
104 protected:
105 SelectTraceEntry(int count, fd_set* readSet, fd_set* writeSet,
106 fd_set* errorSet)
108 fReadSet(NULL),
109 fWriteSet(NULL),
110 fErrorSet(NULL),
111 fCount(count)
113 int sets = (readSet != NULL ? 1 : 0) + (writeSet != NULL ? 1 : 0)
114 + (errorSet != NULL ? 1 : 0);
115 if (sets > 0 && count > 0) {
116 uint32 bytes = _howmany(count, NFDBITS) * sizeof(fd_mask);
117 uint8* allocated = (uint8*)alloc_tracing_buffer(bytes * sets);
118 if (allocated != NULL) {
119 if (readSet != NULL) {
120 fReadSet = (fd_set*)allocated;
121 memcpy(fReadSet, readSet, bytes);
122 allocated += bytes;
124 if (writeSet != NULL) {
125 fWriteSet = (fd_set*)allocated;
126 memcpy(fWriteSet, writeSet, bytes);
127 allocated += bytes;
129 if (errorSet != NULL) {
130 fErrorSet = (fd_set*)allocated;
131 memcpy(fErrorSet, errorSet, bytes);
137 void AddDump(TraceOutput& out, const char* name)
139 out.Print(name);
141 _PrintSet(out, "read", fReadSet);
142 _PrintSet(out, ", write", fWriteSet);
143 _PrintSet(out, ", error", fErrorSet);
146 private:
147 void _PrintSet(TraceOutput& out, const char* name, fd_set* set)
150 out.Print("%s: <", name);
152 if (set != NULL) {
153 bool first = true;
154 for (int i = 0; i < fCount; i++) {
155 if (!FD_ISSET(i, set))
156 continue;
158 if (first) {
159 out.Print("%d", i);
160 first = false;
161 } else
162 out.Print(", %d", i);
166 out.Print(">");
169 protected:
170 fd_set* fReadSet;
171 fd_set* fWriteSet;
172 fd_set* fErrorSet;
173 int fCount;
177 class SelectBegin : public SelectTraceEntry {
178 public:
179 SelectBegin(int count, fd_set* readSet, fd_set* writeSet,
180 fd_set* errorSet, bigtime_t timeout)
182 SelectTraceEntry(count, readSet, writeSet, errorSet),
183 fTimeout(timeout)
185 Initialized();
188 virtual void AddDump(TraceOutput& out)
190 SelectTraceEntry::AddDump(out, "select begin: ");
191 out.Print(", timeout: %" B_PRIdBIGTIME, fTimeout);
194 private:
195 bigtime_t fTimeout;
199 class SelectDone : public SelectTraceEntry {
200 public:
201 SelectDone(int count, fd_set* readSet, fd_set* writeSet,
202 fd_set* errorSet, status_t status)
204 SelectTraceEntry(status == B_OK ? count : 0, readSet, writeSet,
205 errorSet),
206 fStatus(status)
208 Initialized();
211 virtual void AddDump(TraceOutput& out)
213 if (fStatus == B_OK)
214 SelectTraceEntry::AddDump(out, "select done: ");
215 else
216 out.Print("select done: error: %#" B_PRIx32, fStatus);
219 private:
220 status_t fStatus;
224 class PollTraceEntry : public AbstractTraceEntry {
225 protected:
226 PollTraceEntry(pollfd* fds, int count, bool resultEvents)
228 fEntries(NULL),
229 fCount(0)
231 if (fds != NULL && count > 0) {
232 for (int i = 0; i < count; i++) {
233 if (resultEvents ? fds[i].revents : fds[i].events)
234 fCount++;
238 if (fCount == 0)
239 return;
241 fEntries = (FDEntry*)alloc_tracing_buffer(fCount * sizeof(FDEntry));
242 if (fEntries != NULL) {
243 for (int i = 0; i < fCount; fds++) {
244 uint16 events = resultEvents ? fds->revents : fds->events;
245 if (events != 0) {
246 fEntries[i].fd = fds->fd;
247 fEntries[i].events = events;
248 i++;
254 void AddDump(TraceOutput& out)
256 if (fEntries == NULL)
257 return;
259 static const struct {
260 const char* name;
261 uint16 event;
262 } kEventNames[] = {
263 { "r", POLLIN },
264 { "w", POLLOUT },
265 { "rb", POLLRDBAND },
266 { "wb", POLLWRBAND },
267 { "rp", POLLPRI },
268 { "err", POLLERR },
269 { "hup", POLLHUP },
270 { "inv", POLLNVAL },
271 { NULL, 0 }
274 bool firstFD = true;
275 for (int i = 0; i < fCount; i++) {
276 if (firstFD) {
277 out.Print("<%u: ", fEntries[i].fd);
278 firstFD = false;
279 } else
280 out.Print(", <%u: ", fEntries[i].fd);
282 bool firstEvent = true;
283 for (int k = 0; kEventNames[k].name != NULL; k++) {
284 if ((fEntries[i].events & kEventNames[k].event) != 0) {
285 if (firstEvent) {
286 out.Print("%s", kEventNames[k].name);
287 firstEvent = false;
288 } else
289 out.Print(", %s", kEventNames[k].name);
293 out.Print(">");
297 protected:
298 struct FDEntry {
299 uint16 fd;
300 uint16 events;
303 FDEntry* fEntries;
304 int fCount;
308 class PollBegin : public PollTraceEntry {
309 public:
310 PollBegin(pollfd* fds, int count, bigtime_t timeout)
312 PollTraceEntry(fds, count, false),
313 fTimeout(timeout)
315 Initialized();
318 virtual void AddDump(TraceOutput& out)
320 out.Print("poll begin: ");
321 PollTraceEntry::AddDump(out);
322 out.Print(", timeout: %" B_PRIdBIGTIME, fTimeout);
325 private:
326 bigtime_t fTimeout;
330 class PollDone : public PollTraceEntry {
331 public:
332 PollDone(pollfd* fds, int count, int result)
334 PollTraceEntry(fds, result >= 0 ? count : 0, true),
335 fResult(result)
337 Initialized();
340 virtual void AddDump(TraceOutput& out)
342 if (fResult >= 0) {
343 out.Print("poll done: count: %d: ", fResult);
344 PollTraceEntry::AddDump(out);
345 } else
346 out.Print("poll done: error: %#x", fResult);
349 private:
350 int fResult;
353 } // namespace WaitForObjectsTracing
355 # define T(x) new(std::nothrow) WaitForObjectsTracing::x
357 #else
358 # define T(x)
359 #endif // WAIT_FOR_OBJECTS_TRACING
362 // #pragma mark -
366 Clears all bits in the fd_set - since we are using variable sized
367 arrays in the kernel, we can't use the FD_ZERO() macro provided by
368 sys/select.h for this task.
369 All other FD_xxx() macros are safe to use, though.
371 static inline void
372 fd_zero(fd_set *set, int numFDs)
374 if (set != NULL)
375 memset(set, 0, _howmany(numFDs, NFDBITS) * sizeof(fd_mask));
379 static status_t
380 create_select_sync(int numFDs, select_sync*& _sync)
382 // create sync structure
383 select_sync* sync = new(nothrow) select_sync;
384 if (sync == NULL)
385 return B_NO_MEMORY;
386 ObjectDeleter<select_sync> syncDeleter(sync);
388 // create info set
389 sync->set = new(nothrow) select_info[numFDs];
390 if (sync->set == NULL)
391 return B_NO_MEMORY;
392 ArrayDeleter<select_info> setDeleter(sync->set);
394 // create select event semaphore
395 sync->sem = create_sem(0, "select");
396 if (sync->sem < 0)
397 return sync->sem;
399 sync->count = numFDs;
400 sync->ref_count = 1;
402 for (int i = 0; i < numFDs; i++) {
403 sync->set[i].next = NULL;
404 sync->set[i].sync = sync;
407 setDeleter.Detach();
408 syncDeleter.Detach();
409 _sync = sync;
411 return B_OK;
415 void
416 put_select_sync(select_sync* sync)
418 FUNCTION(("put_select_sync(%p): -> %ld\n", sync, sync->ref_count - 1));
420 if (atomic_add(&sync->ref_count, -1) == 1) {
421 delete_sem(sync->sem);
422 delete[] sync->set;
423 delete sync;
428 static int
429 common_select(int numFDs, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
430 bigtime_t timeout, const sigset_t *sigMask, bool kernel)
432 status_t status = B_OK;
433 int fd;
435 FUNCTION(("[%ld] common_select(%d, %p, %p, %p, %lld, %p, %d)\n",
436 find_thread(NULL), numFDs, readSet, writeSet, errorSet, timeout,
437 sigMask, kernel));
439 // check if fds are valid before doing anything
441 for (fd = 0; fd < numFDs; fd++) {
442 if (((readSet && FD_ISSET(fd, readSet))
443 || (writeSet && FD_ISSET(fd, writeSet))
444 || (errorSet && FD_ISSET(fd, errorSet)))
445 && !fd_is_valid(fd, kernel))
446 return B_FILE_ERROR;
449 // allocate sync object
450 select_sync* sync;
451 status = create_select_sync(numFDs, sync);
452 if (status != B_OK)
453 return status;
455 T(SelectBegin(numFDs, readSet, writeSet, errorSet, timeout));
457 // start selecting file descriptors
459 for (fd = 0; fd < numFDs; fd++) {
460 sync->set[fd].selected_events = 0;
461 sync->set[fd].events = 0;
463 if (readSet && FD_ISSET(fd, readSet))
464 sync->set[fd].selected_events = SELECT_FLAG(B_SELECT_READ);
465 if (writeSet && FD_ISSET(fd, writeSet))
466 sync->set[fd].selected_events |= SELECT_FLAG(B_SELECT_WRITE);
467 if (errorSet && FD_ISSET(fd, errorSet))
468 sync->set[fd].selected_events |= SELECT_FLAG(B_SELECT_ERROR);
470 if (sync->set[fd].selected_events != 0) {
471 select_fd(fd, sync->set + fd, kernel);
472 // array position is the same as the fd for select()
476 // set new signal mask
477 sigset_t oldSigMask;
478 if (sigMask != NULL)
479 sigprocmask(SIG_SETMASK, sigMask, &oldSigMask);
481 // wait for something to happen
482 status = acquire_sem_etc(sync->sem, 1,
483 B_CAN_INTERRUPT | (timeout >= 0 ? B_ABSOLUTE_TIMEOUT : 0), timeout);
485 // restore the old signal mask
486 if (sigMask != NULL)
487 sigprocmask(SIG_SETMASK, &oldSigMask, NULL);
489 PRINT(("common_select(): acquire_sem_etc() returned: %lx\n", status));
491 // deselect file descriptors
493 for (fd = 0; fd < numFDs; fd++)
494 deselect_fd(fd, sync->set + fd, kernel);
496 PRINT(("common_select(): events deselected\n"));
498 // collect the events that have happened in the meantime
500 int count = 0;
502 if (status == B_INTERRUPTED) {
503 // We must not clear the sets in this case, as applications may
504 // rely on the contents of them.
505 put_select_sync(sync);
506 T(SelectDone(numFDs, readSet, writeSet, errorSet, status));
507 return B_INTERRUPTED;
510 // Clear sets to store the received events
511 // (we can't use the macros, because we have variable sized arrays;
512 // the other FD_xxx() macros are safe, though).
513 fd_zero(readSet, numFDs);
514 fd_zero(writeSet, numFDs);
515 fd_zero(errorSet, numFDs);
517 if (status == B_OK) {
518 for (count = 0, fd = 0;fd < numFDs; fd++) {
519 if (readSet && sync->set[fd].events & SELECT_FLAG(B_SELECT_READ)) {
520 FD_SET(fd, readSet);
521 count++;
523 if (writeSet
524 && sync->set[fd].events & SELECT_FLAG(B_SELECT_WRITE)) {
525 FD_SET(fd, writeSet);
526 count++;
528 if (errorSet
529 && sync->set[fd].events & SELECT_FLAG(B_SELECT_ERROR)) {
530 FD_SET(fd, errorSet);
531 count++;
536 // B_TIMED_OUT and B_WOULD_BLOCK are supposed to return 0
538 put_select_sync(sync);
540 T(SelectDone(numFDs, readSet, writeSet, errorSet, status));
542 return count;
546 static int
547 common_poll(struct pollfd *fds, nfds_t numFDs, bigtime_t timeout, bool kernel)
549 // allocate sync object
550 select_sync* sync;
551 status_t status = create_select_sync(numFDs, sync);
552 if (status != B_OK)
553 return status;
555 T(PollBegin(fds, numFDs, timeout));
557 // start polling file descriptors (by selecting them)
559 bool invalid = false;
560 for (uint32 i = 0; i < numFDs; i++) {
561 int fd = fds[i].fd;
563 // initialize events masks
564 sync->set[i].selected_events = fds[i].events
565 | POLLNVAL | POLLERR | POLLHUP;
566 sync->set[i].events = 0;
567 fds[i].revents = 0;
569 if (fd >= 0 && select_fd(fd, sync->set + i, kernel) != B_OK) {
570 sync->set[i].events = POLLNVAL;
571 fds[i].revents = POLLNVAL;
572 // indicates that the FD doesn't need to be deselected
573 invalid = true;
577 if (!invalid) {
578 status = acquire_sem_etc(sync->sem, 1,
579 B_CAN_INTERRUPT | (timeout >= 0 ? B_ABSOLUTE_TIMEOUT : 0), timeout);
582 // deselect file descriptors
584 for (uint32 i = 0; i < numFDs; i++) {
585 if (fds[i].fd >= 0 && (fds[i].revents & POLLNVAL) == 0)
586 deselect_fd(fds[i].fd, sync->set + i, kernel);
589 // collect the events that have happened in the meantime
591 int count = 0;
592 switch (status) {
593 case B_OK:
594 for (uint32 i = 0; i < numFDs; i++) {
595 if (fds[i].fd < 0)
596 continue;
598 // POLLxxx flags and B_SELECT_xxx flags are compatible
599 fds[i].revents = sync->set[i].events
600 & sync->set[i].selected_events;
601 if (fds[i].revents != 0)
602 count++;
604 break;
605 case B_INTERRUPTED:
606 count = B_INTERRUPTED;
607 break;
608 default:
609 // B_TIMED_OUT, and B_WOULD_BLOCK
610 break;
613 put_select_sync(sync);
615 T(PollDone(fds, numFDs, count));
617 return count;
621 static ssize_t
622 common_wait_for_objects(object_wait_info* infos, int numInfos, uint32 flags,
623 bigtime_t timeout, bool kernel)
625 status_t status = B_OK;
627 // allocate sync object
628 select_sync* sync;
629 status = create_select_sync(numInfos, sync);
630 if (status != B_OK)
631 return status;
633 // start selecting objects
635 bool invalid = false;
636 for (int i = 0; i < numInfos; i++) {
637 uint16 type = infos[i].type;
638 int32 object = infos[i].object;
640 // initialize events masks
641 sync->set[i].selected_events = infos[i].events
642 | B_EVENT_INVALID | B_EVENT_ERROR | B_EVENT_DISCONNECTED;
643 sync->set[i].events = 0;
644 infos[i].events = 0;
646 if (type >= kSelectOpsCount
647 || kSelectOps[type].select(object, sync->set + i, kernel) != B_OK) {
648 sync->set[i].events = B_EVENT_INVALID;
649 infos[i].events = B_EVENT_INVALID;
650 // indicates that the object doesn't need to be deselected
651 invalid = true;
655 if (!invalid) {
656 status = acquire_sem_etc(sync->sem, 1, B_CAN_INTERRUPT | flags,
657 timeout);
660 // deselect objects
662 for (int i = 0; i < numInfos; i++) {
663 uint16 type = infos[i].type;
665 if (type < kSelectOpsCount && (infos[i].events & B_EVENT_INVALID) == 0)
666 kSelectOps[type].deselect(infos[i].object, sync->set + i, kernel);
669 // collect the events that have happened in the meantime
671 ssize_t count = 0;
672 if (status == B_OK) {
673 for (int i = 0; i < numInfos; i++) {
674 infos[i].events = sync->set[i].events
675 & sync->set[i].selected_events;
676 if (infos[i].events != 0)
677 count++;
679 } else {
680 // B_INTERRUPTED, B_TIMED_OUT, and B_WOULD_BLOCK
681 count = status;
684 put_select_sync(sync);
686 return count;
690 // #pragma mark - kernel private
693 status_t
694 notify_select_events(select_info* info, uint16 events)
696 FUNCTION(("notify_select_events(%p (%p), 0x%x)\n", info, info->sync,
697 events));
699 if (info == NULL
700 || info->sync == NULL
701 || info->sync->sem < B_OK)
702 return B_BAD_VALUE;
704 atomic_or(&info->events, events);
706 // only wake up the waiting select()/poll() call if the events
707 // match one of the selected ones
708 if (info->selected_events & events)
709 return release_sem_etc(info->sync->sem, 1, B_DO_NOT_RESCHEDULE);
711 return B_OK;
715 void
716 notify_select_events_list(select_info* list, uint16 events)
718 struct select_info* info = list;
719 while (info != NULL) {
720 notify_select_events(info, events);
721 info = info->next;
726 // #pragma mark - public kernel API
729 status_t
730 notify_select_event(struct selectsync *sync, uint8 event)
732 return notify_select_events((select_info*)sync, SELECT_FLAG(event));
736 // #pragma mark - private kernel exported API
739 static select_sync_pool_entry *
740 find_select_sync_pool_entry(select_sync_pool *pool, selectsync *sync)
742 for (SelectSyncPoolEntryList::Iterator it = pool->entries.GetIterator();
743 it.HasNext();) {
744 select_sync_pool_entry *entry = it.Next();
745 if (entry->sync == sync)
746 return entry;
749 return NULL;
753 static status_t
754 add_select_sync_pool_entry(select_sync_pool *pool, selectsync *sync,
755 uint8 event)
757 // check, whether the entry does already exist
758 select_sync_pool_entry *entry = find_select_sync_pool_entry(pool, sync);
759 if (!entry) {
760 entry = new (std::nothrow) select_sync_pool_entry;
761 if (!entry)
762 return B_NO_MEMORY;
764 entry->sync = sync;
765 entry->events = 0;
767 pool->entries.Add(entry);
770 entry->events |= SELECT_FLAG(event);
772 return B_OK;
776 status_t
777 add_select_sync_pool_entry(select_sync_pool **_pool, selectsync *sync,
778 uint8 event)
780 // create the pool, if necessary
781 select_sync_pool *pool = *_pool;
782 if (!pool) {
783 pool = new (std::nothrow) select_sync_pool;
784 if (!pool)
785 return B_NO_MEMORY;
787 *_pool = pool;
790 // add the entry
791 status_t error = add_select_sync_pool_entry(pool, sync, event);
793 // cleanup
794 if (pool->entries.IsEmpty()) {
795 delete pool;
796 *_pool = NULL;
799 return error;
803 status_t
804 remove_select_sync_pool_entry(select_sync_pool **_pool, selectsync *sync,
805 uint8 event)
807 select_sync_pool *pool = *_pool;
808 if (!pool)
809 return B_ENTRY_NOT_FOUND;
811 // clear the event flag of the concerned entries
812 bool found = false;
813 for (SelectSyncPoolEntryList::Iterator it = pool->entries.GetIterator();
814 it.HasNext();) {
815 select_sync_pool_entry *entry = it.Next();
816 if (entry->sync == sync) {
817 found = true;
818 entry->events &= ~SELECT_FLAG(event);
820 // remove the entry, if no longer needed
821 if (entry->events == 0) {
822 it.Remove();
823 delete entry;
828 if (!found)
829 return B_ENTRY_NOT_FOUND;
831 // delete the pool, if no longer needed
832 if (pool->entries.IsEmpty()) {
833 delete pool;
834 *_pool = NULL;
837 return B_OK;
841 void
842 delete_select_sync_pool(select_sync_pool *pool)
844 if (!pool)
845 return;
847 while (select_sync_pool_entry *entry = pool->entries.Head()) {
848 pool->entries.Remove(entry);
849 delete entry;
852 delete pool;
856 void
857 notify_select_event_pool(select_sync_pool *pool, uint8 event)
859 if (!pool)
860 return;
862 FUNCTION(("notify_select_event_pool(%p, %u)\n", pool, event));
864 for (SelectSyncPoolEntryList::Iterator it = pool->entries.GetIterator();
865 it.HasNext();) {
866 select_sync_pool_entry *entry = it.Next();
867 if (entry->events & SELECT_FLAG(event))
868 notify_select_event(entry->sync, event);
873 // #pragma mark - Kernel POSIX layer
876 ssize_t
877 _kern_select(int numFDs, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
878 bigtime_t timeout, const sigset_t *sigMask)
880 if (timeout >= 0)
881 timeout += system_time();
883 return common_select(numFDs, readSet, writeSet, errorSet, timeout,
884 sigMask, true);
888 ssize_t
889 _kern_poll(struct pollfd *fds, int numFDs, bigtime_t timeout)
891 if (timeout >= 0)
892 timeout += system_time();
894 return common_poll(fds, numFDs, timeout, true);
898 ssize_t
899 _kern_wait_for_objects(object_wait_info* infos, int numInfos, uint32 flags,
900 bigtime_t timeout)
902 return common_wait_for_objects(infos, numInfos, flags, timeout, true);
906 // #pragma mark - User syscalls
909 ssize_t
910 _user_select(int numFDs, fd_set *userReadSet, fd_set *userWriteSet,
911 fd_set *userErrorSet, bigtime_t timeout, const sigset_t *userSigMask)
913 fd_set *readSet = NULL, *writeSet = NULL, *errorSet = NULL;
914 uint32 bytes = _howmany(numFDs, NFDBITS) * sizeof(fd_mask);
915 sigset_t sigMask;
916 int result;
918 syscall_restart_handle_timeout_pre(timeout);
920 if (numFDs < 0)
921 return B_BAD_VALUE;
923 if ((userReadSet != NULL && !IS_USER_ADDRESS(userReadSet))
924 || (userWriteSet != NULL && !IS_USER_ADDRESS(userWriteSet))
925 || (userErrorSet != NULL && !IS_USER_ADDRESS(userErrorSet))
926 || (userSigMask != NULL && !IS_USER_ADDRESS(userSigMask)))
927 return B_BAD_ADDRESS;
929 // copy parameters
931 if (userReadSet != NULL) {
932 readSet = (fd_set *)malloc(bytes);
933 if (readSet == NULL)
934 return B_NO_MEMORY;
936 if (user_memcpy(readSet, userReadSet, bytes) < B_OK) {
937 result = B_BAD_ADDRESS;
938 goto err;
942 if (userWriteSet != NULL) {
943 writeSet = (fd_set *)malloc(bytes);
944 if (writeSet == NULL) {
945 result = B_NO_MEMORY;
946 goto err;
948 if (user_memcpy(writeSet, userWriteSet, bytes) < B_OK) {
949 result = B_BAD_ADDRESS;
950 goto err;
954 if (userErrorSet != NULL) {
955 errorSet = (fd_set *)malloc(bytes);
956 if (errorSet == NULL) {
957 result = B_NO_MEMORY;
958 goto err;
960 if (user_memcpy(errorSet, userErrorSet, bytes) < B_OK) {
961 result = B_BAD_ADDRESS;
962 goto err;
966 if (userSigMask != NULL)
967 sigMask = *userSigMask;
969 result = common_select(numFDs, readSet, writeSet, errorSet, timeout,
970 userSigMask ? &sigMask : NULL, false);
972 // copy back results
974 if (result >= B_OK
975 && ((readSet != NULL
976 && user_memcpy(userReadSet, readSet, bytes) < B_OK)
977 || (writeSet != NULL
978 && user_memcpy(userWriteSet, writeSet, bytes) < B_OK)
979 || (errorSet != NULL
980 && user_memcpy(userErrorSet, errorSet, bytes) < B_OK))) {
981 result = B_BAD_ADDRESS;
982 } else
983 syscall_restart_handle_timeout_post(result, timeout);
985 err:
986 free(readSet);
987 free(writeSet);
988 free(errorSet);
990 return result;
994 ssize_t
995 _user_poll(struct pollfd *userfds, int numFDs, bigtime_t timeout)
997 struct pollfd *fds;
998 size_t bytes;
999 int result;
1001 syscall_restart_handle_timeout_pre(timeout);
1003 if (numFDs < 0)
1004 return B_BAD_VALUE;
1006 if (numFDs == 0) {
1007 // special case: no FDs
1008 result = common_poll(NULL, 0, timeout, false);
1009 return result < 0
1010 ? syscall_restart_handle_timeout_post(result, timeout) : result;
1013 // copy parameters
1014 if (userfds == NULL || !IS_USER_ADDRESS(userfds))
1015 return B_BAD_ADDRESS;
1017 fds = (struct pollfd *)malloc(bytes = numFDs * sizeof(struct pollfd));
1018 if (fds == NULL)
1019 return B_NO_MEMORY;
1021 if (user_memcpy(fds, userfds, bytes) < B_OK) {
1022 result = B_BAD_ADDRESS;
1023 goto err;
1026 result = common_poll(fds, numFDs, timeout, false);
1028 // copy back results
1029 if (numFDs > 0 && user_memcpy(userfds, fds, bytes) != 0) {
1030 if (result >= 0)
1031 result = B_BAD_ADDRESS;
1032 } else
1033 syscall_restart_handle_timeout_post(result, timeout);
1035 err:
1036 free(fds);
1038 return result;
1042 ssize_t
1043 _user_wait_for_objects(object_wait_info* userInfos, int numInfos, uint32 flags,
1044 bigtime_t timeout)
1046 syscall_restart_handle_timeout_pre(flags, timeout);
1048 if (numInfos < 0)
1049 return B_BAD_VALUE;
1051 if (numInfos == 0) {
1052 // special case: no infos
1053 ssize_t result = common_wait_for_objects(NULL, 0, flags, timeout,
1054 false);
1055 return result < 0
1056 ? syscall_restart_handle_timeout_post(result, timeout) : result;
1059 if (userInfos == NULL || !IS_USER_ADDRESS(userInfos))
1060 return B_BAD_ADDRESS;
1062 int bytes = sizeof(object_wait_info) * numInfos;
1063 object_wait_info* infos = (object_wait_info*)malloc(bytes);
1064 if (infos == NULL)
1065 return B_NO_MEMORY;
1067 // copy parameters to kernel space, call the function, and copy the results
1068 // back
1069 ssize_t result;
1070 if (user_memcpy(infos, userInfos, bytes) == B_OK) {
1071 result = common_wait_for_objects(infos, numInfos, flags, timeout,
1072 false);
1074 if (result >= 0 && user_memcpy(userInfos, infos, bytes) != B_OK) {
1075 result = B_BAD_ADDRESS;
1076 } else
1077 syscall_restart_handle_timeout_post(result, timeout);
1078 } else
1079 result = B_BAD_ADDRESS;
1081 free(infos);
1083 return result;