2 * Copyright 2007-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
7 #include <fs/select_sync_pool.h>
8 #include <wait_for_objects.h>
16 #include <sys/select.h>
21 #include <AutoDeleter.h>
27 #include <syscall_restart.h>
30 #include <util/AutoLock.h>
31 #include <util/DoublyLinkedList.h>
35 //#define TRACE_WAIT_FOR_OBJECTS
36 #ifdef TRACE_WAIT_FOR_OBJECTS
37 # define PRINT(x) dprintf x
38 # define FUNCTION(x) dprintf x
41 # define FUNCTION(x) ;
48 struct select_sync_pool_entry
49 : DoublyLinkedListLinkImpl
<select_sync_pool_entry
> {
54 typedef DoublyLinkedList
<select_sync_pool_entry
> SelectSyncPoolEntryList
;
56 struct select_sync_pool
{
57 SelectSyncPoolEntryList entries
;
62 status_t (*select
)(int32 object
, struct select_info
* info
, bool kernel
);
63 status_t (*deselect
)(int32 object
, struct select_info
* info
, bool kernel
);
67 static const select_ops kSelectOps
[] = {
74 // B_OBJECT_TYPE_SEMAPHORE
86 // B_OBJECT_TYPE_THREAD
93 static const uint32 kSelectOpsCount
= sizeof(kSelectOps
) / sizeof(select_ops
);
97 #if WAIT_FOR_OBJECTS_TRACING
100 namespace WaitForObjectsTracing
{
103 class SelectTraceEntry
: public AbstractTraceEntry
{
105 SelectTraceEntry(int count
, fd_set
* readSet
, fd_set
* writeSet
,
113 int sets
= (readSet
!= NULL
? 1 : 0) + (writeSet
!= NULL
? 1 : 0)
114 + (errorSet
!= NULL
? 1 : 0);
115 if (sets
> 0 && count
> 0) {
116 uint32 bytes
= _howmany(count
, NFDBITS
) * sizeof(fd_mask
);
117 uint8
* allocated
= (uint8
*)alloc_tracing_buffer(bytes
* sets
);
118 if (allocated
!= NULL
) {
119 if (readSet
!= NULL
) {
120 fReadSet
= (fd_set
*)allocated
;
121 memcpy(fReadSet
, readSet
, bytes
);
124 if (writeSet
!= NULL
) {
125 fWriteSet
= (fd_set
*)allocated
;
126 memcpy(fWriteSet
, writeSet
, bytes
);
129 if (errorSet
!= NULL
) {
130 fErrorSet
= (fd_set
*)allocated
;
131 memcpy(fErrorSet
, errorSet
, bytes
);
137 void AddDump(TraceOutput
& out
, const char* name
)
141 _PrintSet(out
, "read", fReadSet
);
142 _PrintSet(out
, ", write", fWriteSet
);
143 _PrintSet(out
, ", error", fErrorSet
);
147 void _PrintSet(TraceOutput
& out
, const char* name
, fd_set
* set
)
150 out
.Print("%s: <", name
);
154 for (int i
= 0; i
< fCount
; i
++) {
155 if (!FD_ISSET(i
, set
))
162 out
.Print(", %d", i
);
177 class SelectBegin
: public SelectTraceEntry
{
179 SelectBegin(int count
, fd_set
* readSet
, fd_set
* writeSet
,
180 fd_set
* errorSet
, bigtime_t timeout
)
182 SelectTraceEntry(count
, readSet
, writeSet
, errorSet
),
188 virtual void AddDump(TraceOutput
& out
)
190 SelectTraceEntry::AddDump(out
, "select begin: ");
191 out
.Print(", timeout: %" B_PRIdBIGTIME
, fTimeout
);
199 class SelectDone
: public SelectTraceEntry
{
201 SelectDone(int count
, fd_set
* readSet
, fd_set
* writeSet
,
202 fd_set
* errorSet
, status_t status
)
204 SelectTraceEntry(status
== B_OK
? count
: 0, readSet
, writeSet
,
211 virtual void AddDump(TraceOutput
& out
)
214 SelectTraceEntry::AddDump(out
, "select done: ");
216 out
.Print("select done: error: %#" B_PRIx32
, fStatus
);
224 class PollTraceEntry
: public AbstractTraceEntry
{
226 PollTraceEntry(pollfd
* fds
, int count
, bool resultEvents
)
231 if (fds
!= NULL
&& count
> 0) {
232 for (int i
= 0; i
< count
; i
++) {
233 if (resultEvents
? fds
[i
].revents
: fds
[i
].events
)
241 fEntries
= (FDEntry
*)alloc_tracing_buffer(fCount
* sizeof(FDEntry
));
242 if (fEntries
!= NULL
) {
243 for (int i
= 0; i
< fCount
; fds
++) {
244 uint16 events
= resultEvents
? fds
->revents
: fds
->events
;
246 fEntries
[i
].fd
= fds
->fd
;
247 fEntries
[i
].events
= events
;
254 void AddDump(TraceOutput
& out
)
256 if (fEntries
== NULL
)
259 static const struct {
265 { "rb", POLLRDBAND
},
266 { "wb", POLLWRBAND
},
275 for (int i
= 0; i
< fCount
; i
++) {
277 out
.Print("<%u: ", fEntries
[i
].fd
);
280 out
.Print(", <%u: ", fEntries
[i
].fd
);
282 bool firstEvent
= true;
283 for (int k
= 0; kEventNames
[k
].name
!= NULL
; k
++) {
284 if ((fEntries
[i
].events
& kEventNames
[k
].event
) != 0) {
286 out
.Print("%s", kEventNames
[k
].name
);
289 out
.Print(", %s", kEventNames
[k
].name
);
308 class PollBegin
: public PollTraceEntry
{
310 PollBegin(pollfd
* fds
, int count
, bigtime_t timeout
)
312 PollTraceEntry(fds
, count
, false),
318 virtual void AddDump(TraceOutput
& out
)
320 out
.Print("poll begin: ");
321 PollTraceEntry::AddDump(out
);
322 out
.Print(", timeout: %" B_PRIdBIGTIME
, fTimeout
);
330 class PollDone
: public PollTraceEntry
{
332 PollDone(pollfd
* fds
, int count
, int result
)
334 PollTraceEntry(fds
, result
>= 0 ? count
: 0, true),
340 virtual void AddDump(TraceOutput
& out
)
343 out
.Print("poll done: count: %d: ", fResult
);
344 PollTraceEntry::AddDump(out
);
346 out
.Print("poll done: error: %#x", fResult
);
353 } // namespace WaitForObjectsTracing
355 # define T(x) new(std::nothrow) WaitForObjectsTracing::x
359 #endif // WAIT_FOR_OBJECTS_TRACING
366 Clears all bits in the fd_set - since we are using variable sized
367 arrays in the kernel, we can't use the FD_ZERO() macro provided by
368 sys/select.h for this task.
369 All other FD_xxx() macros are safe to use, though.
372 fd_zero(fd_set
*set
, int numFDs
)
375 memset(set
, 0, _howmany(numFDs
, NFDBITS
) * sizeof(fd_mask
));
380 create_select_sync(int numFDs
, select_sync
*& _sync
)
382 // create sync structure
383 select_sync
* sync
= new(nothrow
) select_sync
;
386 ObjectDeleter
<select_sync
> syncDeleter(sync
);
389 sync
->set
= new(nothrow
) select_info
[numFDs
];
390 if (sync
->set
== NULL
)
392 ArrayDeleter
<select_info
> setDeleter(sync
->set
);
394 // create select event semaphore
395 sync
->sem
= create_sem(0, "select");
399 sync
->count
= numFDs
;
402 for (int i
= 0; i
< numFDs
; i
++) {
403 sync
->set
[i
].next
= NULL
;
404 sync
->set
[i
].sync
= sync
;
408 syncDeleter
.Detach();
416 put_select_sync(select_sync
* sync
)
418 FUNCTION(("put_select_sync(%p): -> %ld\n", sync
, sync
->ref_count
- 1));
420 if (atomic_add(&sync
->ref_count
, -1) == 1) {
421 delete_sem(sync
->sem
);
429 common_select(int numFDs
, fd_set
*readSet
, fd_set
*writeSet
, fd_set
*errorSet
,
430 bigtime_t timeout
, const sigset_t
*sigMask
, bool kernel
)
432 status_t status
= B_OK
;
435 FUNCTION(("[%ld] common_select(%d, %p, %p, %p, %lld, %p, %d)\n",
436 find_thread(NULL
), numFDs
, readSet
, writeSet
, errorSet
, timeout
,
439 // check if fds are valid before doing anything
441 for (fd
= 0; fd
< numFDs
; fd
++) {
442 if (((readSet
&& FD_ISSET(fd
, readSet
))
443 || (writeSet
&& FD_ISSET(fd
, writeSet
))
444 || (errorSet
&& FD_ISSET(fd
, errorSet
)))
445 && !fd_is_valid(fd
, kernel
))
449 // allocate sync object
451 status
= create_select_sync(numFDs
, sync
);
455 T(SelectBegin(numFDs
, readSet
, writeSet
, errorSet
, timeout
));
457 // start selecting file descriptors
459 for (fd
= 0; fd
< numFDs
; fd
++) {
460 sync
->set
[fd
].selected_events
= 0;
461 sync
->set
[fd
].events
= 0;
463 if (readSet
&& FD_ISSET(fd
, readSet
))
464 sync
->set
[fd
].selected_events
= SELECT_FLAG(B_SELECT_READ
);
465 if (writeSet
&& FD_ISSET(fd
, writeSet
))
466 sync
->set
[fd
].selected_events
|= SELECT_FLAG(B_SELECT_WRITE
);
467 if (errorSet
&& FD_ISSET(fd
, errorSet
))
468 sync
->set
[fd
].selected_events
|= SELECT_FLAG(B_SELECT_ERROR
);
470 if (sync
->set
[fd
].selected_events
!= 0) {
471 select_fd(fd
, sync
->set
+ fd
, kernel
);
472 // array position is the same as the fd for select()
476 // set new signal mask
479 sigprocmask(SIG_SETMASK
, sigMask
, &oldSigMask
);
481 // wait for something to happen
482 status
= acquire_sem_etc(sync
->sem
, 1,
483 B_CAN_INTERRUPT
| (timeout
>= 0 ? B_ABSOLUTE_TIMEOUT
: 0), timeout
);
485 // restore the old signal mask
487 sigprocmask(SIG_SETMASK
, &oldSigMask
, NULL
);
489 PRINT(("common_select(): acquire_sem_etc() returned: %lx\n", status
));
491 // deselect file descriptors
493 for (fd
= 0; fd
< numFDs
; fd
++)
494 deselect_fd(fd
, sync
->set
+ fd
, kernel
);
496 PRINT(("common_select(): events deselected\n"));
498 // collect the events that have happened in the meantime
502 if (status
== B_INTERRUPTED
) {
503 // We must not clear the sets in this case, as applications may
504 // rely on the contents of them.
505 put_select_sync(sync
);
506 T(SelectDone(numFDs
, readSet
, writeSet
, errorSet
, status
));
507 return B_INTERRUPTED
;
510 // Clear sets to store the received events
511 // (we can't use the macros, because we have variable sized arrays;
512 // the other FD_xxx() macros are safe, though).
513 fd_zero(readSet
, numFDs
);
514 fd_zero(writeSet
, numFDs
);
515 fd_zero(errorSet
, numFDs
);
517 if (status
== B_OK
) {
518 for (count
= 0, fd
= 0;fd
< numFDs
; fd
++) {
519 if (readSet
&& sync
->set
[fd
].events
& SELECT_FLAG(B_SELECT_READ
)) {
524 && sync
->set
[fd
].events
& SELECT_FLAG(B_SELECT_WRITE
)) {
525 FD_SET(fd
, writeSet
);
529 && sync
->set
[fd
].events
& SELECT_FLAG(B_SELECT_ERROR
)) {
530 FD_SET(fd
, errorSet
);
536 // B_TIMED_OUT and B_WOULD_BLOCK are supposed to return 0
538 put_select_sync(sync
);
540 T(SelectDone(numFDs
, readSet
, writeSet
, errorSet
, status
));
547 common_poll(struct pollfd
*fds
, nfds_t numFDs
, bigtime_t timeout
, bool kernel
)
549 // allocate sync object
551 status_t status
= create_select_sync(numFDs
, sync
);
555 T(PollBegin(fds
, numFDs
, timeout
));
557 // start polling file descriptors (by selecting them)
559 bool invalid
= false;
560 for (uint32 i
= 0; i
< numFDs
; i
++) {
563 // initialize events masks
564 sync
->set
[i
].selected_events
= fds
[i
].events
565 | POLLNVAL
| POLLERR
| POLLHUP
;
566 sync
->set
[i
].events
= 0;
569 if (fd
>= 0 && select_fd(fd
, sync
->set
+ i
, kernel
) != B_OK
) {
570 sync
->set
[i
].events
= POLLNVAL
;
571 fds
[i
].revents
= POLLNVAL
;
572 // indicates that the FD doesn't need to be deselected
578 status
= acquire_sem_etc(sync
->sem
, 1,
579 B_CAN_INTERRUPT
| (timeout
>= 0 ? B_ABSOLUTE_TIMEOUT
: 0), timeout
);
582 // deselect file descriptors
584 for (uint32 i
= 0; i
< numFDs
; i
++) {
585 if (fds
[i
].fd
>= 0 && (fds
[i
].revents
& POLLNVAL
) == 0)
586 deselect_fd(fds
[i
].fd
, sync
->set
+ i
, kernel
);
589 // collect the events that have happened in the meantime
594 for (uint32 i
= 0; i
< numFDs
; i
++) {
598 // POLLxxx flags and B_SELECT_xxx flags are compatible
599 fds
[i
].revents
= sync
->set
[i
].events
600 & sync
->set
[i
].selected_events
;
601 if (fds
[i
].revents
!= 0)
606 count
= B_INTERRUPTED
;
609 // B_TIMED_OUT, and B_WOULD_BLOCK
613 put_select_sync(sync
);
615 T(PollDone(fds
, numFDs
, count
));
622 common_wait_for_objects(object_wait_info
* infos
, int numInfos
, uint32 flags
,
623 bigtime_t timeout
, bool kernel
)
625 status_t status
= B_OK
;
627 // allocate sync object
629 status
= create_select_sync(numInfos
, sync
);
633 // start selecting objects
635 bool invalid
= false;
636 for (int i
= 0; i
< numInfos
; i
++) {
637 uint16 type
= infos
[i
].type
;
638 int32 object
= infos
[i
].object
;
640 // initialize events masks
641 sync
->set
[i
].selected_events
= infos
[i
].events
642 | B_EVENT_INVALID
| B_EVENT_ERROR
| B_EVENT_DISCONNECTED
;
643 sync
->set
[i
].events
= 0;
646 if (type
>= kSelectOpsCount
647 || kSelectOps
[type
].select(object
, sync
->set
+ i
, kernel
) != B_OK
) {
648 sync
->set
[i
].events
= B_EVENT_INVALID
;
649 infos
[i
].events
= B_EVENT_INVALID
;
650 // indicates that the object doesn't need to be deselected
656 status
= acquire_sem_etc(sync
->sem
, 1, B_CAN_INTERRUPT
| flags
,
662 for (int i
= 0; i
< numInfos
; i
++) {
663 uint16 type
= infos
[i
].type
;
665 if (type
< kSelectOpsCount
&& (infos
[i
].events
& B_EVENT_INVALID
) == 0)
666 kSelectOps
[type
].deselect(infos
[i
].object
, sync
->set
+ i
, kernel
);
669 // collect the events that have happened in the meantime
672 if (status
== B_OK
) {
673 for (int i
= 0; i
< numInfos
; i
++) {
674 infos
[i
].events
= sync
->set
[i
].events
675 & sync
->set
[i
].selected_events
;
676 if (infos
[i
].events
!= 0)
680 // B_INTERRUPTED, B_TIMED_OUT, and B_WOULD_BLOCK
684 put_select_sync(sync
);
690 // #pragma mark - kernel private
694 notify_select_events(select_info
* info
, uint16 events
)
696 FUNCTION(("notify_select_events(%p (%p), 0x%x)\n", info
, info
->sync
,
700 || info
->sync
== NULL
701 || info
->sync
->sem
< B_OK
)
704 atomic_or(&info
->events
, events
);
706 // only wake up the waiting select()/poll() call if the events
707 // match one of the selected ones
708 if (info
->selected_events
& events
)
709 return release_sem_etc(info
->sync
->sem
, 1, B_DO_NOT_RESCHEDULE
);
716 notify_select_events_list(select_info
* list
, uint16 events
)
718 struct select_info
* info
= list
;
719 while (info
!= NULL
) {
720 notify_select_events(info
, events
);
726 // #pragma mark - public kernel API
730 notify_select_event(struct selectsync
*sync
, uint8 event
)
732 return notify_select_events((select_info
*)sync
, SELECT_FLAG(event
));
736 // #pragma mark - private kernel exported API
739 static select_sync_pool_entry
*
740 find_select_sync_pool_entry(select_sync_pool
*pool
, selectsync
*sync
)
742 for (SelectSyncPoolEntryList::Iterator it
= pool
->entries
.GetIterator();
744 select_sync_pool_entry
*entry
= it
.Next();
745 if (entry
->sync
== sync
)
754 add_select_sync_pool_entry(select_sync_pool
*pool
, selectsync
*sync
,
757 // check, whether the entry does already exist
758 select_sync_pool_entry
*entry
= find_select_sync_pool_entry(pool
, sync
);
760 entry
= new (std::nothrow
) select_sync_pool_entry
;
767 pool
->entries
.Add(entry
);
770 entry
->events
|= SELECT_FLAG(event
);
777 add_select_sync_pool_entry(select_sync_pool
**_pool
, selectsync
*sync
,
780 // create the pool, if necessary
781 select_sync_pool
*pool
= *_pool
;
783 pool
= new (std::nothrow
) select_sync_pool
;
791 status_t error
= add_select_sync_pool_entry(pool
, sync
, event
);
794 if (pool
->entries
.IsEmpty()) {
804 remove_select_sync_pool_entry(select_sync_pool
**_pool
, selectsync
*sync
,
807 select_sync_pool
*pool
= *_pool
;
809 return B_ENTRY_NOT_FOUND
;
811 // clear the event flag of the concerned entries
813 for (SelectSyncPoolEntryList::Iterator it
= pool
->entries
.GetIterator();
815 select_sync_pool_entry
*entry
= it
.Next();
816 if (entry
->sync
== sync
) {
818 entry
->events
&= ~SELECT_FLAG(event
);
820 // remove the entry, if no longer needed
821 if (entry
->events
== 0) {
829 return B_ENTRY_NOT_FOUND
;
831 // delete the pool, if no longer needed
832 if (pool
->entries
.IsEmpty()) {
842 delete_select_sync_pool(select_sync_pool
*pool
)
847 while (select_sync_pool_entry
*entry
= pool
->entries
.Head()) {
848 pool
->entries
.Remove(entry
);
857 notify_select_event_pool(select_sync_pool
*pool
, uint8 event
)
862 FUNCTION(("notify_select_event_pool(%p, %u)\n", pool
, event
));
864 for (SelectSyncPoolEntryList::Iterator it
= pool
->entries
.GetIterator();
866 select_sync_pool_entry
*entry
= it
.Next();
867 if (entry
->events
& SELECT_FLAG(event
))
868 notify_select_event(entry
->sync
, event
);
873 // #pragma mark - Kernel POSIX layer
877 _kern_select(int numFDs
, fd_set
*readSet
, fd_set
*writeSet
, fd_set
*errorSet
,
878 bigtime_t timeout
, const sigset_t
*sigMask
)
881 timeout
+= system_time();
883 return common_select(numFDs
, readSet
, writeSet
, errorSet
, timeout
,
889 _kern_poll(struct pollfd
*fds
, int numFDs
, bigtime_t timeout
)
892 timeout
+= system_time();
894 return common_poll(fds
, numFDs
, timeout
, true);
899 _kern_wait_for_objects(object_wait_info
* infos
, int numInfos
, uint32 flags
,
902 return common_wait_for_objects(infos
, numInfos
, flags
, timeout
, true);
906 // #pragma mark - User syscalls
910 _user_select(int numFDs
, fd_set
*userReadSet
, fd_set
*userWriteSet
,
911 fd_set
*userErrorSet
, bigtime_t timeout
, const sigset_t
*userSigMask
)
913 fd_set
*readSet
= NULL
, *writeSet
= NULL
, *errorSet
= NULL
;
914 uint32 bytes
= _howmany(numFDs
, NFDBITS
) * sizeof(fd_mask
);
918 syscall_restart_handle_timeout_pre(timeout
);
923 if ((userReadSet
!= NULL
&& !IS_USER_ADDRESS(userReadSet
))
924 || (userWriteSet
!= NULL
&& !IS_USER_ADDRESS(userWriteSet
))
925 || (userErrorSet
!= NULL
&& !IS_USER_ADDRESS(userErrorSet
))
926 || (userSigMask
!= NULL
&& !IS_USER_ADDRESS(userSigMask
)))
927 return B_BAD_ADDRESS
;
931 if (userReadSet
!= NULL
) {
932 readSet
= (fd_set
*)malloc(bytes
);
936 if (user_memcpy(readSet
, userReadSet
, bytes
) < B_OK
) {
937 result
= B_BAD_ADDRESS
;
942 if (userWriteSet
!= NULL
) {
943 writeSet
= (fd_set
*)malloc(bytes
);
944 if (writeSet
== NULL
) {
945 result
= B_NO_MEMORY
;
948 if (user_memcpy(writeSet
, userWriteSet
, bytes
) < B_OK
) {
949 result
= B_BAD_ADDRESS
;
954 if (userErrorSet
!= NULL
) {
955 errorSet
= (fd_set
*)malloc(bytes
);
956 if (errorSet
== NULL
) {
957 result
= B_NO_MEMORY
;
960 if (user_memcpy(errorSet
, userErrorSet
, bytes
) < B_OK
) {
961 result
= B_BAD_ADDRESS
;
966 if (userSigMask
!= NULL
)
967 sigMask
= *userSigMask
;
969 result
= common_select(numFDs
, readSet
, writeSet
, errorSet
, timeout
,
970 userSigMask
? &sigMask
: NULL
, false);
976 && user_memcpy(userReadSet
, readSet
, bytes
) < B_OK
)
978 && user_memcpy(userWriteSet
, writeSet
, bytes
) < B_OK
)
980 && user_memcpy(userErrorSet
, errorSet
, bytes
) < B_OK
))) {
981 result
= B_BAD_ADDRESS
;
983 syscall_restart_handle_timeout_post(result
, timeout
);
995 _user_poll(struct pollfd
*userfds
, int numFDs
, bigtime_t timeout
)
1001 syscall_restart_handle_timeout_pre(timeout
);
1007 // special case: no FDs
1008 result
= common_poll(NULL
, 0, timeout
, false);
1010 ? syscall_restart_handle_timeout_post(result
, timeout
) : result
;
1014 if (userfds
== NULL
|| !IS_USER_ADDRESS(userfds
))
1015 return B_BAD_ADDRESS
;
1017 fds
= (struct pollfd
*)malloc(bytes
= numFDs
* sizeof(struct pollfd
));
1021 if (user_memcpy(fds
, userfds
, bytes
) < B_OK
) {
1022 result
= B_BAD_ADDRESS
;
1026 result
= common_poll(fds
, numFDs
, timeout
, false);
1028 // copy back results
1029 if (numFDs
> 0 && user_memcpy(userfds
, fds
, bytes
) != 0) {
1031 result
= B_BAD_ADDRESS
;
1033 syscall_restart_handle_timeout_post(result
, timeout
);
1043 _user_wait_for_objects(object_wait_info
* userInfos
, int numInfos
, uint32 flags
,
1046 syscall_restart_handle_timeout_pre(flags
, timeout
);
1051 if (numInfos
== 0) {
1052 // special case: no infos
1053 ssize_t result
= common_wait_for_objects(NULL
, 0, flags
, timeout
,
1056 ? syscall_restart_handle_timeout_post(result
, timeout
) : result
;
1059 if (userInfos
== NULL
|| !IS_USER_ADDRESS(userInfos
))
1060 return B_BAD_ADDRESS
;
1062 int bytes
= sizeof(object_wait_info
) * numInfos
;
1063 object_wait_info
* infos
= (object_wait_info
*)malloc(bytes
);
1067 // copy parameters to kernel space, call the function, and copy the results
1070 if (user_memcpy(infos
, userInfos
, bytes
) == B_OK
) {
1071 result
= common_wait_for_objects(infos
, numInfos
, flags
, timeout
,
1074 if (result
>= 0 && user_memcpy(userInfos
, infos
, bytes
) != B_OK
) {
1075 result
= B_BAD_ADDRESS
;
1077 syscall_restart_handle_timeout_post(result
, timeout
);
1079 result
= B_BAD_ADDRESS
;