4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2015, Joyent, Inc. All rights reserved.
28 /* Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
31 #include "thr_uberdata.h"
37 #include <sys/socket.h>
43 * atfork_lock protects the pthread_atfork() data structures.
45 * fork_lock does double-duty. Not only does it (and atfork_lock)
46 * serialize calls to fork() and forkall(), but it also serializes calls
47 * to thr_suspend() and thr_continue() (because fork() and forkall() also
48 * suspend and continue other threads and they want no competition).
50 * Functions called in dlopen()ed L10N objects can do anything, including
51 * call malloc() and free(). Such calls are not fork-safe when protected
52 * by an ordinary mutex that is acquired in libc's prefork processing
53 * because, with an interposed malloc library present, there would be a
54 * lock ordering violation due to the pthread_atfork() prefork function
55 * in the interposition library acquiring its malloc lock(s) before the
56 * ordinary mutex in libc being acquired by libc's prefork functions.
58 * Within libc, calls to malloc() and free() are fork-safe if the calls
59 * are made while holding no other libc locks. This covers almost all
60 * of libc's malloc() and free() calls. For those libc code paths, such
61 * as the above-mentioned L10N calls, that require serialization and that
62 * may call malloc() or free(), libc uses callout_lock_enter() to perform
63 * the serialization. This works because callout_lock is not acquired as
64 * part of running the pthread_atfork() prefork handlers (to avoid the
65 * lock ordering violation described above). Rather, it is simply
66 * reinitialized in postfork1_child() to cover the case that some
67 * now-defunct thread might have been suspended while holding it.
73 ASSERT(curthread
->ul_critical
== 0);
74 (void) mutex_lock(&curthread
->ul_uberdata
->fork_lock
);
80 ASSERT(curthread
->ul_critical
== 0);
81 (void) mutex_unlock(&curthread
->ul_uberdata
->fork_lock
);
85 * Use cancel_safe_mutex_lock() to protect against being cancelled while
86 * holding callout_lock and calling outside of libc (via L10N plugins).
87 * We will honor a pending cancellation request when callout_lock_exit()
88 * is called, by calling cancel_safe_mutex_unlock().
91 callout_lock_enter(void)
93 ASSERT(curthread
->ul_critical
== 0);
94 cancel_safe_mutex_lock(&curthread
->ul_uberdata
->callout_lock
);
98 callout_lock_exit(void)
100 ASSERT(curthread
->ul_critical
== 0);
101 cancel_safe_mutex_unlock(&curthread
->ul_uberdata
->callout_lock
);
107 ulwp_t
*self
= curthread
;
108 uberdata_t
*udp
= self
->ul_uberdata
;
111 if (self
->ul_vfork
) {
113 * We are a child of vfork(); omit all of the fork
114 * logic and go straight to the system call trap.
115 * A vfork() child of a multithreaded parent
116 * must never call fork().
118 if (udp
->uberflags
.uf_mt
) {
122 pid
= __forkx(flags
);
123 if (pid
== 0) { /* child */
133 * Cannot call fork() from a fork handler.
142 * The functions registered by pthread_atfork() are defined by
143 * the application and its libraries and we must not hold any
144 * internal lmutex_lock()-acquired locks while invoking them.
145 * We hold only udp->atfork_lock to protect the atfork linkages.
146 * If one of these pthread_atfork() functions attempts to fork
147 * or to call pthread_atfork(), libc will detect the error and
148 * fail the call with EDEADLK. Otherwise, the pthread_atfork()
149 * functions are free to do anything they please (except they
150 * will not receive any signals).
152 (void) mutex_lock(&udp
->atfork_lock
);
155 * Posix (SUSv3) requires fork() to be async-signal-safe.
156 * This cannot be made to happen with fork handlers in place
157 * (they grab locks). To be in nominal compliance, don't run
158 * any fork handlers if we are called within a signal context.
159 * This leaves the child process in a questionable state with
160 * respect to its locks, but at least the parent process does
161 * not become deadlocked due to the calling thread attempting
162 * to acquire a lock that it already owns.
164 if (self
->ul_siglink
== NULL
)
168 * Block every other thread attempting thr_suspend() or thr_continue().
170 (void) mutex_lock(&udp
->fork_lock
);
174 * Just deferring them via sigoff() is not enough.
175 * We have to avoid taking a deferred signal in the child
176 * that was actually sent to the parent before __forkx().
178 block_all_signals(self
);
181 * This suspends all threads but this one, leaving them
182 * suspended outside of any critical regions in the library.
183 * Thus, we are assured that no lmutex_lock()-acquired library
184 * locks are held while we invoke fork() from the current thread.
188 pid
= __forkx(flags
);
190 if (pid
== 0) { /* child */
192 * Clear our schedctl pointer.
193 * Discard any deferred signal that was sent to the parent.
194 * Because we blocked all signals before __forkx(), a
195 * deferred signal cannot have been taken by the child.
197 self
->ul_schedctl_called
= NULL
;
198 self
->ul_schedctl
= NULL
;
200 self
->ul_siginfo
.si_signo
= 0;
202 /* reset the library's data structures to reflect one thread */
205 restore_signals(self
);
206 (void) mutex_unlock(&udp
->fork_lock
);
207 if (self
->ul_siglink
== NULL
)
208 _postfork_child_handler();
210 /* restart all threads that were suspended for fork() */
212 restore_signals(self
);
213 (void) mutex_unlock(&udp
->fork_lock
);
214 if (self
->ul_siglink
== NULL
)
215 _postfork_parent_handler();
218 (void) mutex_unlock(&udp
->atfork_lock
);
226 * fork() is fork1() for both Posix threads and Solaris threads.
227 * The forkall() interface exists for applications that require
228 * the semantics of replicating all threads.
230 #pragma weak fork1 = fork
238 * Much of the logic here is the same as in forkx().
239 * See the comments in forkx(), above.
244 ulwp_t
*self
= curthread
;
245 uberdata_t
*udp
= self
->ul_uberdata
;
248 if (self
->ul_vfork
) {
249 if (udp
->uberflags
.uf_mt
) {
253 pid
= __forkallx(flags
);
254 if (pid
== 0) { /* child */
268 (void) mutex_lock(&udp
->atfork_lock
);
269 (void) mutex_lock(&udp
->fork_lock
);
270 block_all_signals(self
);
273 pid
= __forkallx(flags
);
276 self
->ul_schedctl_called
= NULL
;
277 self
->ul_schedctl
= NULL
;
279 self
->ul_siginfo
.si_signo
= 0;
286 restore_signals(self
);
287 (void) mutex_unlock(&udp
->fork_lock
);
288 (void) mutex_unlock(&udp
->atfork_lock
);
298 return (forkallx(0));
302 * For the implementation of cancellation at cancellation points.
306 ulwp_t *self = curthread; \
308 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
309 self->ul_critical | self->ul_sigdefer); \
311 if (nocancel == 0) { \
312 self->ul_save_async = self->ul_cancel_async; \
313 if (!self->ul_cancel_disabled) { \
314 self->ul_cancel_async = 1; \
315 if (self->ul_cancel_pending) \
316 pthread_exit(PTHREAD_CANCELED); \
318 self->ul_sp = stkptr(); \
319 } else if (self->ul_cancel_pending && \
320 !self->ul_cancel_disabled) { \
321 set_cancel_eintr_flag(self); \
326 if (nocancel == 0) { \
328 self->ul_cancel_async = self->ul_save_async; \
333 * Perform the body of the action required by most of the cancelable
334 * function calls. The return(function_call) part is to allow the
335 * compiler to make the call be executed with tail recursion, which
336 * saves a register window on sparc and slightly (not much) improves
337 * the code for x86/x64 compilations.
339 #define PERFORM(function_call) \
342 *self->ul_errnop = EINTR; \
346 return (function_call); \
347 rv = function_call; \
352 * Specialized prologue for sigsuspend() and pollsys().
353 * These system calls pass a signal mask to the kernel.
354 * The kernel replaces the thread's signal mask with the
355 * temporary mask before the thread goes to sleep. If
356 * a signal is received, the signal handler will execute
357 * with the temporary mask, as modified by the sigaction
358 * for the particular signal.
360 * We block all signals until we reach the kernel with the
361 * temporary mask. This eliminates race conditions with
362 * setting the signal mask while signals are being posted.
364 #define PROLOGUE_MASK(sigmask) \
366 ulwp_t *self = curthread; \
368 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
369 self->ul_critical | self->ul_sigdefer); \
370 if (!self->ul_vfork) { \
372 block_all_signals(self); \
373 self->ul_tmpmask = *sigmask; \
374 delete_reserved_signals(&self->ul_tmpmask); \
375 self->ul_sigsuspend = 1; \
377 if (nocancel == 0) { \
378 self->ul_save_async = self->ul_cancel_async; \
379 if (!self->ul_cancel_disabled) { \
380 self->ul_cancel_async = 1; \
381 if (self->ul_cancel_pending) { \
382 if (self->ul_sigsuspend) { \
383 self->ul_sigsuspend = 0;\
384 restore_signals(self); \
386 pthread_exit(PTHREAD_CANCELED); \
389 self->ul_sp = stkptr(); \
394 * If a signal is taken, we return from the system call wrapper with
395 * our original signal mask restored (see code in call_user_handler()).
396 * If not (self->ul_sigsuspend is still non-zero), we must restore our
397 * original signal mask ourself.
399 #define EPILOGUE_MASK \
400 if (nocancel == 0) { \
402 self->ul_cancel_async = self->ul_save_async; \
404 if (self->ul_sigsuspend) { \
405 self->ul_sigsuspend = 0; \
406 restore_signals(self); \
411 * Cancellation prologue and epilogue functions,
412 * for cancellation points too complex to include here.
415 _cancel_prologue(void)
417 ulwp_t
*self
= curthread
;
419 self
->ul_cancel_prologue
=
420 (self
->ul_vfork
| self
->ul_nocancel
| self
->ul_libc_locks
|
421 self
->ul_critical
| self
->ul_sigdefer
) != 0;
422 if (self
->ul_cancel_prologue
== 0) {
423 self
->ul_save_async
= self
->ul_cancel_async
;
424 if (!self
->ul_cancel_disabled
) {
425 self
->ul_cancel_async
= 1;
426 if (self
->ul_cancel_pending
)
427 pthread_exit(PTHREAD_CANCELED
);
429 self
->ul_sp
= stkptr();
430 } else if (self
->ul_cancel_pending
&&
431 !self
->ul_cancel_disabled
) {
432 set_cancel_eintr_flag(self
);
437 _cancel_epilogue(void)
439 ulwp_t
*self
= curthread
;
441 if (self
->ul_cancel_prologue
== 0) {
443 self
->ul_cancel_async
= self
->ul_save_async
;
448 * Called from _thrp_join() (thr_join() is a cancellation point)
451 lwp_wait(thread_t tid
, thread_t
*found
)
458 while ((error
= __lwp_wait(tid
, found
)) == EINTR
&& !cancel_active())
465 read(int fd
, void *buf
, size_t size
)
467 extern ssize_t
__read(int, void *, size_t);
470 PERFORM(__read(fd
, buf
, size
))
474 write(int fd
, const void *buf
, size_t size
)
476 extern ssize_t
__write(int, const void *, size_t);
479 PERFORM(__write(fd
, buf
, size
))
483 getmsg(int fd
, struct strbuf
*ctlptr
, struct strbuf
*dataptr
,
486 extern int __getmsg(int, struct strbuf
*, struct strbuf
*, int *);
489 PERFORM(__getmsg(fd
, ctlptr
, dataptr
, flagsp
))
493 getpmsg(int fd
, struct strbuf
*ctlptr
, struct strbuf
*dataptr
,
494 int *bandp
, int *flagsp
)
496 extern int __getpmsg(int, struct strbuf
*, struct strbuf
*,
500 PERFORM(__getpmsg(fd
, ctlptr
, dataptr
, bandp
, flagsp
))
504 putmsg(int fd
, const struct strbuf
*ctlptr
,
505 const struct strbuf
*dataptr
, int flags
)
507 extern int __putmsg(int, const struct strbuf
*,
508 const struct strbuf
*, int);
511 PERFORM(__putmsg(fd
, ctlptr
, dataptr
, flags
))
515 __xpg4_putmsg(int fd
, const struct strbuf
*ctlptr
,
516 const struct strbuf
*dataptr
, int flags
)
518 extern int __putmsg(int, const struct strbuf
*,
519 const struct strbuf
*, int);
522 PERFORM(__putmsg(fd
, ctlptr
, dataptr
, flags
|MSG_XPG4
))
526 putpmsg(int fd
, const struct strbuf
*ctlptr
,
527 const struct strbuf
*dataptr
, int band
, int flags
)
529 extern int __putpmsg(int, const struct strbuf
*,
530 const struct strbuf
*, int, int);
533 PERFORM(__putpmsg(fd
, ctlptr
, dataptr
, band
, flags
))
537 __xpg4_putpmsg(int fd
, const struct strbuf
*ctlptr
,
538 const struct strbuf
*dataptr
, int band
, int flags
)
540 extern int __putpmsg(int, const struct strbuf
*,
541 const struct strbuf
*, int, int);
544 PERFORM(__putpmsg(fd
, ctlptr
, dataptr
, band
, flags
|MSG_XPG4
))
548 nanosleep(const timespec_t
*rqtp
, timespec_t
*rmtp
)
553 error
= abort
? EINTR
: __nanosleep(rqtp
, rmtp
);
563 clock_nanosleep(clockid_t clock_id
, int flags
,
564 const timespec_t
*rqtp
, timespec_t
*rmtp
)
574 case CLOCK_PROCESS_CPUTIME_ID
:
575 case CLOCK_THREAD_CPUTIME_ID
:
583 if (flags
& TIMER_ABSTIME
) {
584 abstime_to_reltime(clock_id
, rqtp
, &reltime
);
588 if (clock_id
== CLOCK_HIGHRES
)
593 error
= abort
? EINTR
: __nanosleep(&reltime
, rmtp
);
595 if (error
== 0 && clock_id
== CLOCK_HIGHRES
) {
597 * Don't return yet if we didn't really get a timeout.
598 * This can happen if we return because someone resets
601 if (flags
& TIMER_ABSTIME
) {
602 if ((hrtime_t
)(uint32_t)rqtp
->tv_sec
* NANOSEC
+
603 rqtp
->tv_nsec
> gethrtime()) {
604 abstime_to_reltime(clock_id
, rqtp
, &reltime
);
608 rqlapse
= (hrtime_t
)(uint32_t)rqtp
->tv_sec
* NANOSEC
+
610 lapse
= gethrtime() - start
;
611 if (rqlapse
> lapse
) {
612 hrt2ts(rqlapse
- lapse
, &reltime
);
617 if (error
== 0 && clock_id
== CLOCK_REALTIME
&&
618 (flags
& TIMER_ABSTIME
)) {
620 * Don't return yet just because someone reset the
621 * system clock. Recompute the new relative time
622 * and reissue the nanosleep() call if necessary.
624 * Resetting the system clock causes all sorts of
625 * problems and the SUSV3 standards body should
626 * have made the behavior of clock_nanosleep() be
627 * implementation-defined in such a case rather than
628 * being specific about honoring the new system time.
629 * Standards bodies are filled with fools and idiots.
631 abstime_to_reltime(clock_id
, rqtp
, &reltime
);
632 if (reltime
.tv_sec
!= 0 || reltime
.tv_nsec
!= 0)
639 sleep(unsigned int sec
)
641 unsigned int rem
= 0;
645 ts
.tv_sec
= (time_t)sec
;
647 if (nanosleep(&ts
, &tsr
) == -1 && errno
== EINTR
) {
648 rem
= (unsigned int)tsr
.tv_sec
;
649 if (tsr
.tv_nsec
>= NANOSEC
/ 2)
656 usleep(useconds_t usec
)
660 ts
.tv_sec
= usec
/ MICROSEC
;
661 ts
.tv_nsec
= (long)(usec
% MICROSEC
) * 1000;
662 (void) nanosleep(&ts
, NULL
);
669 extern void _aio_close(int);
670 extern int __close(int);
674 * If we call _aio_close() while in a critical region,
675 * we will draw an ASSERT() failure, so don't do it.
676 * No calls to close() from within libc need _aio_close();
677 * only the application's calls to close() need this,
678 * and such calls are never from a libc critical region.
680 if (curthread
->ul_critical
== 0)
682 PERFORM(__close(fildes
))
686 door_call(int d
, door_arg_t
*params
)
688 extern int __door_call(int, door_arg_t
*);
691 PERFORM(__door_call(d
, params
))
695 fcntl(int fildes
, int cmd
, ...)
697 extern int __fcntl(int, int, ...);
703 arg
= va_arg(ap
, intptr_t);
706 return (__fcntl(fildes
, cmd
, arg
));
707 PERFORM(__fcntl(fildes
, cmd
, arg
))
711 fdatasync(int fildes
)
713 extern int __fdsync(int, int);
716 PERFORM(__fdsync(fildes
, O_DSYNC
))
722 extern int __fdsync(int, int);
725 PERFORM(__fdsync(fildes
, O_SYNC
))
729 lockf(int fildes
, int function
, off_t size
)
731 extern int __lockf(int, int, off_t
);
734 PERFORM(__lockf(fildes
, function
, size
))
739 lockf64(int fildes
, int function
, off64_t size
)
741 extern int __lockf64(int, int, off64_t
);
744 PERFORM(__lockf64(fildes
, function
, size
))
749 msgrcv(int msqid
, void *msgp
, size_t msgsz
, long msgtyp
, int msgflg
)
751 extern ssize_t
__msgrcv(int, void *, size_t, long, int);
754 PERFORM(__msgrcv(msqid
, msgp
, msgsz
, msgtyp
, msgflg
))
758 msgsnd(int msqid
, const void *msgp
, size_t msgsz
, int msgflg
)
760 extern int __msgsnd(int, const void *, size_t, int);
763 PERFORM(__msgsnd(msqid
, msgp
, msgsz
, msgflg
))
767 msync(void *addr
, size_t len
, int flags
)
769 extern int __msync(void *, size_t, int);
772 PERFORM(__msync(addr
, len
, flags
))
776 openat(int fd
, const char *path
, int oflag
, ...)
783 mode
= va_arg(ap
, mode_t
);
785 PERFORM(__openat(fd
, path
, oflag
, mode
))
789 open(const char *path
, int oflag
, ...)
796 mode
= va_arg(ap
, mode_t
);
798 PERFORM(__open(path
, oflag
, mode
))
802 creat(const char *path
, mode_t mode
)
804 return (open(path
, O_WRONLY
| O_CREAT
| O_TRUNC
, mode
));
809 openat64(int fd
, const char *path
, int oflag
, ...)
816 mode
= va_arg(ap
, mode_t
);
818 PERFORM(__openat64(fd
, path
, oflag
, mode
))
822 open64(const char *path
, int oflag
, ...)
829 mode
= va_arg(ap
, mode_t
);
831 PERFORM(__open64(path
, oflag
, mode
))
835 creat64(const char *path
, mode_t mode
)
837 return (open64(path
, O_WRONLY
| O_CREAT
| O_TRUNC
, mode
));
844 extern int __pause(void);
851 pread(int fildes
, void *buf
, size_t nbyte
, off_t offset
)
853 extern ssize_t
__pread(int, void *, size_t, off_t
);
856 PERFORM(__pread(fildes
, buf
, nbyte
, offset
))
861 pread64(int fildes
, void *buf
, size_t nbyte
, off64_t offset
)
863 extern ssize_t
__pread64(int, void *, size_t, off64_t
);
866 PERFORM(__pread64(fildes
, buf
, nbyte
, offset
))
870 preadv64(int fildes
, const struct iovec
*iov
, int iovcnt
, off64_t offset
)
873 extern ssize_t
__preadv64(int, const struct iovec
*, int, off_t
, off_t
);
876 PERFORM(__preadv64(fildes
, iov
, iovcnt
, offset
& 0xffffffffULL
,
882 preadv(int fildes
, const struct iovec
*iov
, int iovcnt
, off_t offset
)
885 extern ssize_t
__preadv(int, const struct iovec
*, int, off_t
, off_t
);
888 PERFORM(__preadv(fildes
, iov
, iovcnt
, offset
, 0))
891 pwrite(int fildes
, const void *buf
, size_t nbyte
, off_t offset
)
893 extern ssize_t
__pwrite(int, const void *, size_t, off_t
);
896 PERFORM(__pwrite(fildes
, buf
, nbyte
, offset
))
901 pwrite64(int fildes
, const void *buf
, size_t nbyte
, off64_t offset
)
903 extern ssize_t
__pwrite64(int, const void *, size_t, off64_t
);
906 PERFORM(__pwrite64(fildes
, buf
, nbyte
, offset
))
910 pwritev64(int fildes
, const struct iovec
*iov
, int iovcnt
, off64_t offset
)
913 extern ssize_t
__pwritev64(int,
914 const struct iovec
*, int, off_t
, off_t
);
917 PERFORM(__pwritev64(fildes
, iov
, iovcnt
, offset
&
918 0xffffffffULL
, offset
>>32))
924 pwritev(int fildes
, const struct iovec
*iov
, int iovcnt
, off_t offset
)
926 extern ssize_t
__pwritev(int, const struct iovec
*, int, off_t
, off_t
);
929 PERFORM(__pwritev(fildes
, iov
, iovcnt
, offset
, 0))
933 readv(int fildes
, const struct iovec
*iov
, int iovcnt
)
935 extern ssize_t
__readv(int, const struct iovec
*, int);
938 PERFORM(__readv(fildes
, iov
, iovcnt
))
944 extern int __sigpause(int);
947 PERFORM(__sigpause(sig
))
951 sigsuspend(const sigset_t
*set
)
953 extern int __sigsuspend(const sigset_t
*);
957 rv
= __sigsuspend(set
);
963 _pollsys(struct pollfd
*fds
, nfds_t nfd
, const timespec_t
*timeout
,
964 const sigset_t
*sigmask
)
966 extern int __pollsys(struct pollfd
*, nfds_t
, const timespec_t
*,
970 PROLOGUE_MASK(sigmask
)
971 rv
= __pollsys(fds
, nfd
, timeout
, sigmask
);
977 sigtimedwait(const sigset_t
*set
, siginfo_t
*infop
, const timespec_t
*timeout
)
979 extern int __sigtimedwait(const sigset_t
*, siginfo_t
*,
986 *self
->ul_errnop
= EINTR
;
989 sig
= __sigtimedwait(set
, &info
, timeout
);
990 if (sig
== SIGCANCEL
&&
991 (SI_FROMKERNEL(&info
) || info
.si_code
== SI_LWP
)) {
993 *self
->ul_errnop
= EINTR
;
998 if (sig
!= -1 && infop
)
999 (void) memcpy(infop
, &info
, sizeof (*infop
));
1004 sigwait(sigset_t
*set
)
1006 return (sigtimedwait(set
, NULL
, NULL
));
1010 sigwaitinfo(const sigset_t
*set
, siginfo_t
*info
)
1012 return (sigtimedwait(set
, info
, NULL
));
1016 sigqueue(pid_t pid
, int signo
, const union sigval value
)
1018 extern int __sigqueue(pid_t pid
, int signo
,
1019 /* const union sigval */ void *value
, int si_code
, int block
);
1020 return (__sigqueue(pid
, signo
, value
.sival_ptr
, SI_QUEUE
, 0));
1024 _so_accept(int sock
, struct sockaddr
*addr
, uint_t
*addrlen
, int version
,
1027 extern int __so_accept(int, struct sockaddr
*, uint_t
*, int, int);
1030 PERFORM(__so_accept(sock
, addr
, addrlen
, version
, flags
))
1034 _so_connect(int sock
, struct sockaddr
*addr
, uint_t addrlen
, int version
)
1036 extern int __so_connect(int, struct sockaddr
*, uint_t
, int);
1039 PERFORM(__so_connect(sock
, addr
, addrlen
, version
))
1043 _so_recv(int sock
, void *buf
, size_t len
, int flags
)
1045 extern int __so_recv(int, void *, size_t, int);
1048 PERFORM(__so_recv(sock
, buf
, len
, flags
))
1052 _so_recvfrom(int sock
, void *buf
, size_t len
, int flags
,
1053 struct sockaddr
*addr
, int *addrlen
)
1055 extern int __so_recvfrom(int, void *, size_t, int,
1056 struct sockaddr
*, int *);
1059 PERFORM(__so_recvfrom(sock
, buf
, len
, flags
, addr
, addrlen
))
1063 _so_recvmsg(int sock
, struct msghdr
*msg
, int flags
)
1065 extern int __so_recvmsg(int, struct msghdr
*, int);
1068 PERFORM(__so_recvmsg(sock
, msg
, flags
))
1072 _so_send(int sock
, const void *buf
, size_t len
, int flags
)
1074 extern int __so_send(int, const void *, size_t, int);
1077 PERFORM(__so_send(sock
, buf
, len
, flags
))
1081 _so_sendmsg(int sock
, const struct msghdr
*msg
, int flags
)
1083 extern int __so_sendmsg(int, const struct msghdr
*, int);
1086 PERFORM(__so_sendmsg(sock
, msg
, flags
))
1090 _so_sendto(int sock
, const void *buf
, size_t len
, int flags
,
1091 const struct sockaddr
*addr
, int *addrlen
)
1093 extern int __so_sendto(int, const void *, size_t, int,
1094 const struct sockaddr
*, int *);
1097 PERFORM(__so_sendto(sock
, buf
, len
, flags
, addr
, addrlen
))
1103 extern int __tcdrain(int);
1106 PERFORM(__tcdrain(fildes
))
1110 waitid(idtype_t idtype
, id_t id
, siginfo_t
*infop
, int options
)
1112 extern int __waitid(idtype_t
, id_t
, siginfo_t
*, int);
1115 if (options
& WNOHANG
)
1116 return (__waitid(idtype
, id
, infop
, options
));
1117 PERFORM(__waitid(idtype
, id
, infop
, options
))
1121 writev(int fildes
, const struct iovec
*iov
, int iovcnt
)
1123 extern ssize_t
__writev(int, const struct iovec
*, int);
1126 PERFORM(__writev(fildes
, iov
, iovcnt
))