Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / lib / libc / port / threads / scalls.c
blob180d09d6041720c91aaea7761f5a93f3c6ecdeb7
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2015, Joyent, Inc. All rights reserved.
28 /* Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
30 #include "lint.h"
31 #include "thr_uberdata.h"
32 #include <stdarg.h>
33 #include <poll.h>
34 #include <stropts.h>
35 #include <dlfcn.h>
36 #include <wait.h>
37 #include <sys/socket.h>
38 #include <sys/uio.h>
39 #include <sys/file.h>
40 #include <sys/door.h>
42 #pragma weak lockf64 = lockf
43 #pragma weak openat64 = openat
44 #pragma weak open64 = open
45 #pragma weak creat64 = creat
46 #pragma weak pread64 = pread
47 #pragma weak preadv64 = preadv
48 #pragma weak pwrite64 = pwrite
49 #pragma weak pwritev64 = pwritev
52 * atfork_lock protects the pthread_atfork() data structures.
54 * fork_lock does double-duty. Not only does it (and atfork_lock)
55 * serialize calls to fork() and forkall(), but it also serializes calls
56 * to thr_suspend() and thr_continue() (because fork() and forkall() also
57 * suspend and continue other threads and they want no competition).
59 * Functions called in dlopen()ed L10N objects can do anything, including
60 * call malloc() and free(). Such calls are not fork-safe when protected
61 * by an ordinary mutex that is acquired in libc's prefork processing
62 * because, with an interposed malloc library present, there would be a
63 * lock ordering violation due to the pthread_atfork() prefork function
64 * in the interposition library acquiring its malloc lock(s) before the
65 * ordinary mutex in libc being acquired by libc's prefork functions.
67 * Within libc, calls to malloc() and free() are fork-safe if the calls
68 * are made while holding no other libc locks. This covers almost all
69 * of libc's malloc() and free() calls. For those libc code paths, such
70 * as the above-mentioned L10N calls, that require serialization and that
71 * may call malloc() or free(), libc uses callout_lock_enter() to perform
72 * the serialization. This works because callout_lock is not acquired as
73 * part of running the pthread_atfork() prefork handlers (to avoid the
74 * lock ordering violation described above). Rather, it is simply
75 * reinitialized in postfork1_child() to cover the case that some
76 * now-defunct thread might have been suspended while holding it.
79 void
80 fork_lock_enter(void)
82 ASSERT(curthread->ul_critical == 0);
83 (void) mutex_lock(&curthread->ul_uberdata->fork_lock);
86 void
87 fork_lock_exit(void)
89 ASSERT(curthread->ul_critical == 0);
90 (void) mutex_unlock(&curthread->ul_uberdata->fork_lock);
94 * Use cancel_safe_mutex_lock() to protect against being cancelled while
95 * holding callout_lock and calling outside of libc (via L10N plugins).
96 * We will honor a pending cancellation request when callout_lock_exit()
97 * is called, by calling cancel_safe_mutex_unlock().
99 void
100 callout_lock_enter(void)
102 ASSERT(curthread->ul_critical == 0);
103 cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock);
106 void
107 callout_lock_exit(void)
109 ASSERT(curthread->ul_critical == 0);
110 cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock);
113 pid_t
114 forkx(int flags)
116 ulwp_t *self = curthread;
117 uberdata_t *udp = self->ul_uberdata;
118 pid_t pid;
120 if (self->ul_vfork) {
122 * We are a child of vfork(); omit all of the fork
123 * logic and go straight to the system call trap.
124 * A vfork() child of a multithreaded parent
125 * must never call fork().
127 if (udp->uberflags.uf_mt) {
128 errno = ENOTSUP;
129 return (-1);
131 pid = __forkx(flags);
132 if (pid == 0) { /* child */
133 udp->pid = getpid();
134 self->ul_vfork = 0;
136 return (pid);
139 sigoff(self);
140 if (self->ul_fork) {
142 * Cannot call fork() from a fork handler.
144 sigon(self);
145 errno = EDEADLK;
146 return (-1);
148 self->ul_fork = 1;
151 * The functions registered by pthread_atfork() are defined by
152 * the application and its libraries and we must not hold any
153 * internal lmutex_lock()-acquired locks while invoking them.
154 * We hold only udp->atfork_lock to protect the atfork linkages.
155 * If one of these pthread_atfork() functions attempts to fork
156 * or to call pthread_atfork(), libc will detect the error and
157 * fail the call with EDEADLK. Otherwise, the pthread_atfork()
158 * functions are free to do anything they please (except they
159 * will not receive any signals).
161 (void) mutex_lock(&udp->atfork_lock);
164 * Posix (SUSv3) requires fork() to be async-signal-safe.
165 * This cannot be made to happen with fork handlers in place
166 * (they grab locks). To be in nominal compliance, don't run
167 * any fork handlers if we are called within a signal context.
168 * This leaves the child process in a questionable state with
169 * respect to its locks, but at least the parent process does
170 * not become deadlocked due to the calling thread attempting
171 * to acquire a lock that it already owns.
173 if (self->ul_siglink == NULL)
174 _prefork_handler();
177 * Block every other thread attempting thr_suspend() or thr_continue().
179 (void) mutex_lock(&udp->fork_lock);
182 * Block all signals.
183 * Just deferring them via sigoff() is not enough.
184 * We have to avoid taking a deferred signal in the child
185 * that was actually sent to the parent before __forkx().
187 block_all_signals(self);
190 * This suspends all threads but this one, leaving them
191 * suspended outside of any critical regions in the library.
192 * Thus, we are assured that no lmutex_lock()-acquired library
193 * locks are held while we invoke fork() from the current thread.
195 suspend_fork();
197 pid = __forkx(flags);
199 if (pid == 0) { /* child */
201 * Clear our schedctl pointer.
202 * Discard any deferred signal that was sent to the parent.
203 * Because we blocked all signals before __forkx(), a
204 * deferred signal cannot have been taken by the child.
206 self->ul_schedctl_called = NULL;
207 self->ul_schedctl = NULL;
208 self->ul_cursig = 0;
209 self->ul_siginfo.si_signo = 0;
210 udp->pid = getpid();
211 /* reset the library's data structures to reflect one thread */
212 unregister_locks();
213 postfork1_child();
214 restore_signals(self);
215 (void) mutex_unlock(&udp->fork_lock);
216 if (self->ul_siglink == NULL)
217 _postfork_child_handler();
218 } else {
219 /* restart all threads that were suspended for fork() */
220 continue_fork(0);
221 restore_signals(self);
222 (void) mutex_unlock(&udp->fork_lock);
223 if (self->ul_siglink == NULL)
224 _postfork_parent_handler();
227 (void) mutex_unlock(&udp->atfork_lock);
228 self->ul_fork = 0;
229 sigon(self);
231 return (pid);
235 * fork() is fork1() for both Posix threads and Solaris threads.
236 * The forkall() interface exists for applications that require
237 * the semantics of replicating all threads.
239 #pragma weak fork1 = fork
240 pid_t
241 fork(void)
243 return (forkx(0));
247 * Much of the logic here is the same as in forkx().
248 * See the comments in forkx(), above.
250 pid_t
251 forkallx(int flags)
253 ulwp_t *self = curthread;
254 uberdata_t *udp = self->ul_uberdata;
255 pid_t pid;
257 if (self->ul_vfork) {
258 if (udp->uberflags.uf_mt) {
259 errno = ENOTSUP;
260 return (-1);
262 pid = __forkallx(flags);
263 if (pid == 0) { /* child */
264 udp->pid = getpid();
265 self->ul_vfork = 0;
267 return (pid);
270 sigoff(self);
271 if (self->ul_fork) {
272 sigon(self);
273 errno = EDEADLK;
274 return (-1);
276 self->ul_fork = 1;
277 (void) mutex_lock(&udp->atfork_lock);
278 (void) mutex_lock(&udp->fork_lock);
279 block_all_signals(self);
280 suspend_fork();
282 pid = __forkallx(flags);
284 if (pid == 0) {
285 self->ul_schedctl_called = NULL;
286 self->ul_schedctl = NULL;
287 self->ul_cursig = 0;
288 self->ul_siginfo.si_signo = 0;
289 udp->pid = getpid();
290 unregister_locks();
291 continue_fork(1);
292 } else {
293 continue_fork(0);
295 restore_signals(self);
296 (void) mutex_unlock(&udp->fork_lock);
297 (void) mutex_unlock(&udp->atfork_lock);
298 self->ul_fork = 0;
299 sigon(self);
301 return (pid);
304 pid_t
305 forkall(void)
307 return (forkallx(0));
311 * For the implementation of cancellation at cancellation points.
313 #define PROLOGUE \
315 ulwp_t *self = curthread; \
316 int nocancel = \
317 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
318 self->ul_critical | self->ul_sigdefer); \
319 int abort = 0; \
320 if (nocancel == 0) { \
321 self->ul_save_async = self->ul_cancel_async; \
322 if (!self->ul_cancel_disabled) { \
323 self->ul_cancel_async = 1; \
324 if (self->ul_cancel_pending) \
325 pthread_exit(PTHREAD_CANCELED); \
327 self->ul_sp = stkptr(); \
328 } else if (self->ul_cancel_pending && \
329 !self->ul_cancel_disabled) { \
330 set_cancel_eintr_flag(self); \
331 abort = 1; \
334 #define EPILOGUE \
335 if (nocancel == 0) { \
336 self->ul_sp = 0; \
337 self->ul_cancel_async = self->ul_save_async; \
342 * Perform the body of the action required by most of the cancelable
343 * function calls. The return(function_call) part is to allow the
344 * compiler to make the call be executed with tail recursion, which
345 * saves a register window on sparc and slightly (not much) improves
346 * the code for x86/x64 compilations.
348 #define PERFORM(function_call) \
349 PROLOGUE \
350 if (abort) { \
351 *self->ul_errnop = EINTR; \
352 return (-1); \
354 if (nocancel) \
355 return (function_call); \
356 rv = function_call; \
357 EPILOGUE \
358 return (rv);
361 * Specialized prologue for sigsuspend() and pollsys().
362 * These system calls pass a signal mask to the kernel.
363 * The kernel replaces the thread's signal mask with the
364 * temporary mask before the thread goes to sleep. If
365 * a signal is received, the signal handler will execute
366 * with the temporary mask, as modified by the sigaction
367 * for the particular signal.
369 * We block all signals until we reach the kernel with the
370 * temporary mask. This eliminates race conditions with
371 * setting the signal mask while signals are being posted.
373 #define PROLOGUE_MASK(sigmask) \
375 ulwp_t *self = curthread; \
376 int nocancel = \
377 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
378 self->ul_critical | self->ul_sigdefer); \
379 if (!self->ul_vfork) { \
380 if (sigmask) { \
381 block_all_signals(self); \
382 self->ul_tmpmask = *sigmask; \
383 delete_reserved_signals(&self->ul_tmpmask); \
384 self->ul_sigsuspend = 1; \
386 if (nocancel == 0) { \
387 self->ul_save_async = self->ul_cancel_async; \
388 if (!self->ul_cancel_disabled) { \
389 self->ul_cancel_async = 1; \
390 if (self->ul_cancel_pending) { \
391 if (self->ul_sigsuspend) { \
392 self->ul_sigsuspend = 0;\
393 restore_signals(self); \
395 pthread_exit(PTHREAD_CANCELED); \
398 self->ul_sp = stkptr(); \
403 * If a signal is taken, we return from the system call wrapper with
404 * our original signal mask restored (see code in call_user_handler()).
405 * If not (self->ul_sigsuspend is still non-zero), we must restore our
406 * original signal mask ourself.
408 #define EPILOGUE_MASK \
409 if (nocancel == 0) { \
410 self->ul_sp = 0; \
411 self->ul_cancel_async = self->ul_save_async; \
413 if (self->ul_sigsuspend) { \
414 self->ul_sigsuspend = 0; \
415 restore_signals(self); \
420 * Cancellation prologue and epilogue functions,
421 * for cancellation points too complex to include here.
423 void
424 _cancel_prologue(void)
426 ulwp_t *self = curthread;
428 self->ul_cancel_prologue =
429 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |
430 self->ul_critical | self->ul_sigdefer) != 0;
431 if (self->ul_cancel_prologue == 0) {
432 self->ul_save_async = self->ul_cancel_async;
433 if (!self->ul_cancel_disabled) {
434 self->ul_cancel_async = 1;
435 if (self->ul_cancel_pending)
436 pthread_exit(PTHREAD_CANCELED);
438 self->ul_sp = stkptr();
439 } else if (self->ul_cancel_pending &&
440 !self->ul_cancel_disabled) {
441 set_cancel_eintr_flag(self);
445 void
446 _cancel_epilogue(void)
448 ulwp_t *self = curthread;
450 if (self->ul_cancel_prologue == 0) {
451 self->ul_sp = 0;
452 self->ul_cancel_async = self->ul_save_async;
457 * Called from _thrp_join() (thr_join() is a cancellation point)
460 lwp_wait(thread_t tid, thread_t *found)
462 int error;
464 PROLOGUE
465 if (abort)
466 return (EINTR);
467 while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active())
468 continue;
469 EPILOGUE
470 return (error);
473 ssize_t
474 read(int fd, void *buf, size_t size)
476 extern ssize_t __read(int, void *, size_t);
477 ssize_t rv;
479 PERFORM(__read(fd, buf, size))
482 ssize_t
483 write(int fd, const void *buf, size_t size)
485 extern ssize_t __write(int, const void *, size_t);
486 ssize_t rv;
488 PERFORM(__write(fd, buf, size))
492 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
493 int *flagsp)
495 extern int __getmsg(int, struct strbuf *, struct strbuf *, int *);
496 int rv;
498 PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp))
502 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
503 int *bandp, int *flagsp)
505 extern int __getpmsg(int, struct strbuf *, struct strbuf *,
506 int *, int *);
507 int rv;
509 PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
512 #pragma weak __xpg4_putmsg = putmsg
513 #pragma weak __xpg4_putpmsg = putpmsg
516 putmsg(int fd, const struct strbuf *ctlptr,
517 const struct strbuf *dataptr, int flags)
519 extern int __putmsg(int, const struct strbuf *,
520 const struct strbuf *, int);
521 int rv;
523 PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
527 putpmsg(int fd, const struct strbuf *ctlptr,
528 const struct strbuf *dataptr, int band, int flags)
530 extern int __putpmsg(int, const struct strbuf *,
531 const struct strbuf *, int, int);
532 int rv;
534 PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
538 nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
540 int error;
542 PROLOGUE
543 error = abort? EINTR : __nanosleep(rqtp, rmtp);
544 EPILOGUE
545 if (error) {
546 errno = error;
547 return (-1);
549 return (0);
553 clock_nanosleep(clockid_t clock_id, int flags,
554 const timespec_t *rqtp, timespec_t *rmtp)
556 timespec_t reltime;
557 hrtime_t start;
558 hrtime_t rqlapse;
559 hrtime_t lapse;
560 int error;
562 switch (clock_id) {
563 case CLOCK_VIRTUAL:
564 case CLOCK_PROCESS_CPUTIME_ID:
565 case CLOCK_THREAD_CPUTIME_ID:
566 return (ENOTSUP);
567 case CLOCK_REALTIME:
568 case CLOCK_HIGHRES:
569 break;
570 default:
571 return (EINVAL);
573 if (flags & TIMER_ABSTIME) {
574 abstime_to_reltime(clock_id, rqtp, &reltime);
575 rmtp = NULL;
576 } else {
577 reltime = *rqtp;
578 if (clock_id == CLOCK_HIGHRES)
579 start = gethrtime();
581 restart:
582 PROLOGUE
583 error = abort? EINTR : __nanosleep(&reltime, rmtp);
584 EPILOGUE
585 if (error == 0 && clock_id == CLOCK_HIGHRES) {
587 * Don't return yet if we didn't really get a timeout.
588 * This can happen if we return because someone resets
589 * the system clock.
591 if (flags & TIMER_ABSTIME) {
592 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
593 rqtp->tv_nsec > gethrtime()) {
594 abstime_to_reltime(clock_id, rqtp, &reltime);
595 goto restart;
597 } else {
598 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
599 rqtp->tv_nsec;
600 lapse = gethrtime() - start;
601 if (rqlapse > lapse) {
602 hrt2ts(rqlapse - lapse, &reltime);
603 goto restart;
607 if (error == 0 && clock_id == CLOCK_REALTIME &&
608 (flags & TIMER_ABSTIME)) {
610 * Don't return yet just because someone reset the
611 * system clock. Recompute the new relative time
612 * and reissue the nanosleep() call if necessary.
614 * Resetting the system clock causes all sorts of
615 * problems and the SUSV3 standards body should
616 * have made the behavior of clock_nanosleep() be
617 * implementation-defined in such a case rather than
618 * being specific about honoring the new system time.
619 * Standards bodies are filled with fools and idiots.
621 abstime_to_reltime(clock_id, rqtp, &reltime);
622 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
623 goto restart;
625 return (error);
628 unsigned int
629 sleep(unsigned int sec)
631 unsigned int rem = 0;
632 timespec_t ts;
633 timespec_t tsr;
635 ts.tv_sec = (time_t)sec;
636 ts.tv_nsec = 0;
637 if (nanosleep(&ts, &tsr) == -1 && errno == EINTR) {
638 rem = (unsigned int)tsr.tv_sec;
639 if (tsr.tv_nsec >= NANOSEC / 2)
640 rem++;
642 return (rem);
646 usleep(useconds_t usec)
648 timespec_t ts;
650 ts.tv_sec = usec / MICROSEC;
651 ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
652 (void) nanosleep(&ts, NULL);
653 return (0);
657 close(int fildes)
659 extern void _aio_close(int);
660 extern int __close(int);
661 int rv;
664 * If we call _aio_close() while in a critical region,
665 * we will draw an ASSERT() failure, so don't do it.
666 * No calls to close() from within libc need _aio_close();
667 * only the application's calls to close() need this,
668 * and such calls are never from a libc critical region.
670 if (curthread->ul_critical == 0)
671 _aio_close(fildes);
672 PERFORM(__close(fildes))
676 door_call(int d, door_arg_t *params)
678 extern int __door_call(int, door_arg_t *);
679 int rv;
681 PERFORM(__door_call(d, params))
685 fcntl(int fildes, int cmd, ...)
687 extern int __fcntl(int, int, ...);
688 intptr_t arg;
689 int rv;
690 va_list ap;
692 va_start(ap, cmd);
693 arg = va_arg(ap, intptr_t);
694 va_end(ap);
695 if (cmd != F_SETLKW)
696 return (__fcntl(fildes, cmd, arg));
697 PERFORM(__fcntl(fildes, cmd, arg))
701 fdatasync(int fildes)
703 extern int __fdsync(int, int);
704 int rv;
706 PERFORM(__fdsync(fildes, O_DSYNC))
710 fsync(int fildes)
712 extern int __fdsync(int, int);
713 int rv;
715 PERFORM(__fdsync(fildes, O_SYNC))
719 lockf(int fildes, int function, off_t size)
721 extern int __lockf(int, int, off_t);
722 int rv;
724 PERFORM(__lockf(fildes, function, size))
727 ssize_t
728 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
730 extern ssize_t __msgrcv(int, void *, size_t, long, int);
731 ssize_t rv;
733 PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
737 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
739 extern int __msgsnd(int, const void *, size_t, int);
740 int rv;
742 PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg))
746 msync(void *addr, size_t len, int flags)
748 extern int __msync(void *, size_t, int);
749 int rv;
751 PERFORM(__msync(addr, len, flags))
755 openat(int fd, const char *path, int oflag, ...)
757 mode_t mode;
758 int rv;
759 va_list ap;
761 va_start(ap, oflag);
762 mode = va_arg(ap, mode_t);
763 va_end(ap);
764 PERFORM(__openat(fd, path, oflag, mode))
768 open(const char *path, int oflag, ...)
770 mode_t mode;
771 int rv;
772 va_list ap;
774 va_start(ap, oflag);
775 mode = va_arg(ap, mode_t);
776 va_end(ap);
777 PERFORM(__open(path, oflag, mode))
781 creat(const char *path, mode_t mode)
783 return (open(path, O_WRONLY | O_CREAT | O_TRUNC, mode));
787 pause(void)
789 extern int __pause(void);
790 int rv;
792 PERFORM(__pause())
795 ssize_t
796 pread(int fildes, void *buf, size_t nbyte, off_t offset)
798 extern ssize_t __pread(int, void *, size_t, off_t);
799 ssize_t rv;
801 PERFORM(__pread(fildes, buf, nbyte, offset))
804 ssize_t
805 preadv(int fildes, const struct iovec *iov, int iovcnt, off_t offset)
808 extern ssize_t __preadv(int, const struct iovec *, int, off_t, off_t);
809 ssize_t rv;
811 PERFORM(__preadv(fildes, iov, iovcnt, offset, 0))
813 ssize_t
814 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
816 extern ssize_t __pwrite(int, const void *, size_t, off_t);
817 ssize_t rv;
819 PERFORM(__pwrite(fildes, buf, nbyte, offset))
822 ssize_t
823 pwritev(int fildes, const struct iovec *iov, int iovcnt, off_t offset)
825 extern ssize_t __pwritev(int, const struct iovec *, int, off_t, off_t);
826 ssize_t rv;
828 PERFORM(__pwritev(fildes, iov, iovcnt, offset, 0))
831 ssize_t
832 readv(int fildes, const struct iovec *iov, int iovcnt)
834 extern ssize_t __readv(int, const struct iovec *, int);
835 ssize_t rv;
837 PERFORM(__readv(fildes, iov, iovcnt))
841 sigpause(int sig)
843 extern int __sigpause(int);
844 int rv;
846 PERFORM(__sigpause(sig))
850 sigsuspend(const sigset_t *set)
852 extern int __sigsuspend(const sigset_t *);
853 int rv;
855 PROLOGUE_MASK(set)
856 rv = __sigsuspend(set);
857 EPILOGUE_MASK
858 return (rv);
862 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
863 const sigset_t *sigmask)
865 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
866 const sigset_t *);
867 int rv;
869 PROLOGUE_MASK(sigmask)
870 rv = __pollsys(fds, nfd, timeout, sigmask);
871 EPILOGUE_MASK
872 return (rv);
876 sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
878 extern int __sigtimedwait(const sigset_t *, siginfo_t *,
879 const timespec_t *);
880 siginfo_t info;
881 int sig;
883 PROLOGUE
884 if (abort) {
885 *self->ul_errnop = EINTR;
886 sig = -1;
887 } else {
888 sig = __sigtimedwait(set, &info, timeout);
889 if (sig == SIGCANCEL &&
890 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
891 do_sigcancel();
892 *self->ul_errnop = EINTR;
893 sig = -1;
896 EPILOGUE
897 if (sig != -1 && infop)
898 (void) memcpy(infop, &info, sizeof (*infop));
899 return (sig);
902 #pragma weak __posix_sigwait = sigwait
905 sigwait(const sigset_t *set, int *sig)
907 if ((*sig = sigtimedwait(set, NULL, NULL)) != -1)
908 return 0;
909 if (errno == 0)
910 errno = EINVAL;
911 return errno;
915 sigwaitinfo(const sigset_t *set, siginfo_t *info)
917 return (sigtimedwait(set, info, NULL));
921 sigqueue(pid_t pid, int signo, const union sigval value)
923 extern int __sigqueue(pid_t pid, int signo,
924 /* const union sigval */ void *value, int si_code, int block);
925 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
929 _so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version,
930 int flags)
932 extern int __so_accept(int, struct sockaddr *, uint_t *, int, int);
933 int rv;
935 PERFORM(__so_accept(sock, addr, addrlen, version, flags))
939 _so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version)
941 extern int __so_connect(int, struct sockaddr *, uint_t, int);
942 int rv;
944 PERFORM(__so_connect(sock, addr, addrlen, version))
948 _so_recv(int sock, void *buf, size_t len, int flags)
950 extern int __so_recv(int, void *, size_t, int);
951 int rv;
953 PERFORM(__so_recv(sock, buf, len, flags))
957 _so_recvfrom(int sock, void *buf, size_t len, int flags,
958 struct sockaddr *addr, int *addrlen)
960 extern int __so_recvfrom(int, void *, size_t, int,
961 struct sockaddr *, int *);
962 int rv;
964 PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen))
968 _so_recvmsg(int sock, struct msghdr *msg, int flags)
970 extern int __so_recvmsg(int, struct msghdr *, int);
971 int rv;
973 PERFORM(__so_recvmsg(sock, msg, flags))
977 _so_send(int sock, const void *buf, size_t len, int flags)
979 extern int __so_send(int, const void *, size_t, int);
980 int rv;
982 PERFORM(__so_send(sock, buf, len, flags))
986 _so_sendmsg(int sock, const struct msghdr *msg, int flags)
988 extern int __so_sendmsg(int, const struct msghdr *, int);
989 int rv;
991 PERFORM(__so_sendmsg(sock, msg, flags))
995 _so_sendto(int sock, const void *buf, size_t len, int flags,
996 const struct sockaddr *addr, int *addrlen)
998 extern int __so_sendto(int, const void *, size_t, int,
999 const struct sockaddr *, int *);
1000 int rv;
1002 PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen))
1006 tcdrain(int fildes)
1008 extern int __tcdrain(int);
1009 int rv;
1011 PERFORM(__tcdrain(fildes))
1015 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
1017 extern int __waitid(idtype_t, id_t, siginfo_t *, int);
1018 int rv;
1020 if (options & WNOHANG)
1021 return (__waitid(idtype, id, infop, options));
1022 PERFORM(__waitid(idtype, id, infop, options))
1025 ssize_t
1026 writev(int fildes, const struct iovec *iov, int iovcnt)
1028 extern ssize_t __writev(int, const struct iovec *, int);
1029 ssize_t rv;
1031 PERFORM(__writev(fildes, iov, iovcnt))